python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
from .graph_module import GraphModule
from .graph import Graph
from .node import Node
from ._symbolic_trace import symbolic_trace
from ._compatibility import compatibility
import copy
from typing import Callable, Dict, List, NamedTuple, Optional, Set
import torch
__all__ = ['Match', 'replace_pattern']
@compatibility(is_backward_compatible=True)
class Match(NamedTuple):
# Node from which the match was found
anchor: Node
# Maps nodes in the pattern subgraph to nodes in the larger graph
nodes_map: Dict[Node, Node]
class _SubgraphMatcher:
def __init__(self, pattern: Graph) -> None:
self.pattern = pattern
if len(pattern.nodes) == 0:
raise ValueError("_SubgraphMatcher cannot be initialized with an "
"empty pattern")
# `self.pattern_anchor` is the output Node in `pattern`
self.pattern_anchor = next(iter(reversed(pattern.nodes)))
# Ensure that there is only a single output value in the pattern
# since we don't support multiple outputs
assert len(self.pattern_anchor.all_input_nodes) == 1, \
"Pattern matching on multiple outputs is not supported"
# Maps nodes in the pattern subgraph to nodes in the larger graph
self.nodes_map: Dict[Node, Node] = {}
def matches_subgraph_from_anchor(self, anchor: Node) -> bool:
"""
Checks if the whole pattern can be matched starting from
``anchor`` in the larger graph.
Pattern matching is done by recursively comparing the pattern
node's use-def relationships against the graph node's.
"""
self.nodes_map = {}
return self._match_nodes(self.pattern_anchor, anchor)
# Compare the pattern node `pn` against the graph node `gn`
def _match_nodes(self, pn: Node, gn: Node) -> bool:
# Check if we've already matched these nodes in the current
# traversal
if pn in self.nodes_map:
return self.nodes_map[pn] == gn
def attributes_are_equal(pn: Node, gn: Node) -> bool:
# Use placeholder and output nodes as wildcards. The
# only exception is that an output node can't match
# a placeholder
if (pn.op == "placeholder"
or (pn.op == "output" and gn.op != "placeholder")):
return True
return pn.op == gn.op and pn.target == gn.target
# Terminate early if the node attributes are not equal
if not attributes_are_equal(pn, gn):
return False
# Optimistically mark `pn` as a match for `gn`
self.nodes_map[pn] = gn
# Traverse the use-def relationships to ensure that `pn` is a true
# match for `gn`
if pn.op == "placeholder":
return True
if (pn.op != "output"
and len(pn.all_input_nodes) != len(gn.all_input_nodes)):
return False
if pn.op == "output":
match_found = any(self._match_nodes(pn.all_input_nodes[0], gn_)
for gn_ in gn.all_input_nodes)
else:
match_found = (len(pn.all_input_nodes) == len(gn.all_input_nodes)
and all(self._match_nodes(pn_, gn_) for pn_, gn_
in zip(pn.all_input_nodes, gn.all_input_nodes)))
if not match_found:
self.nodes_map.pop(pn)
return False
return True
def _replace_submodules(gm: GraphModule, replacement: torch.nn.Module) -> None:
gm.delete_all_unused_submodules()
if isinstance(replacement, GraphModule):
replacement.graph.lint()
def try_get_submodule(mod: torch.nn.Module, target: str) -> Optional[torch.nn.Module]:
try:
mod_match = mod.get_submodule(target)
return mod_match
except AttributeError:
return None
for node in gm.graph.nodes:
if node.op == "call_module" or node.op == "get_attr":
gm_submod = try_get_submodule(gm, node.target)
replacement_submod = try_get_submodule(replacement, node.target)
# CASE 1: This target already exists as a submodule in our
# result GraphModule. Whether or not it exists in
# `replacement`, the existing submodule takes precedence.
if gm_submod is not None:
continue
# CASE 2: The target exists as a submodule in `replacement`
# only, so we need to copy it over.
elif replacement_submod is not None:
new_submod = copy.deepcopy(getattr(replacement, node.target))
gm.add_submodule(node.target, new_submod)
# CASE 3: The target doesn't exist as a submodule in `gm`
# or `replacement`
else:
raise RuntimeError("Attempted to create a \"", node.op,
"\" node during subgraph rewriting "
f"with target {node.target}, but "
"the referenced submodule does not "
"exist in either the original "
"GraphModule `gm` or the replacement"
" GraphModule `replacement`")
gm.graph.lint()
@compatibility(is_backward_compatible=True)
def replace_pattern(gm: GraphModule, pattern: Callable, replacement: Callable) -> List[Match]:
"""
Matches all possible non-overlapping sets of operators and their
data dependencies (``pattern``) in the Graph of a GraphModule
(``gm``), then replaces each of these matched subgraphs with another
subgraph (``replacement``).
Args:
``gm``: The GraphModule that wraps the Graph to operate on
``pattern``: The subgraph to match in ``gm`` for replacement
``replacement``: The subgraph to replace ``pattern`` with
Returns:
List[Match]: A list of ``Match`` objects representing the places
in the original graph that ``pattern`` was matched to. The list
is empty if there are no matches. ``Match`` is defined as:
.. code-block:: python
class Match(NamedTuple):
# Node from which the match was found
anchor: Node
# Maps nodes in the pattern subgraph to nodes in the larger graph
nodes_map: Dict[Node, Node]
Examples:
.. code-block:: python
import torch
from torch.fx import symbolic_trace, subgraph_rewriter
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, w1, w2):
m1 = torch.cat([w1, w2]).sum()
m2 = torch.cat([w1, w2]).sum()
return x + torch.max(m1) + torch.max(m2)
def pattern(w1, w2):
return torch.cat([w1, w2]).sum()
def replacement(w1, w2):
return torch.stack([w1, w2])
traced_module = symbolic_trace(M())
subgraph_rewriter.replace_pattern(traced_module, pattern, replacement)
The above code will first match ``pattern`` in the ``forward``
method of ``traced_module``. Pattern-matching is done based on
use-def relationships, not node names. For example, if you had
``p = torch.cat([a, b])`` in ``pattern``, you could match
``m = torch.cat([a, b])`` in the original ``forward`` function,
despite the variable names being different (``p`` vs ``m``).
The ``return`` statement in ``pattern`` is matched based on its
value only; it may or may not match to the ``return`` statement in
the larger graph. In other words, the pattern doesn't have to extend
to the end of the larger graph.
When the pattern is matched, it will be removed from the larger
function and replaced by ``replacement``. If there are multiple
matches for ``pattern`` in the larger function, each non-overlapping
match will be replaced. In the case of a match overlap, the first
found match in the set of overlapping matches will be replaced.
("First" here being defined as the first in a topological ordering
of the Nodes' use-def relationships. In most cases, the first Node
is the parameter that appears directly after ``self``, while the
last Node is whatever the function returns.)
One important thing to note is that the parameters of the
``pattern`` Callable must be used in the Callable itself,
and the parameters of the ``replacement`` Callable must match
the pattern. The first rule is why, in the above code block, the
``forward`` function has parameters ``x, w1, w2``, but the
``pattern`` function only has parameters ``w1, w2``. ``pattern``
doesn't use ``x``, so it shouldn't specify ``x`` as a parameter.
As an example of the second rule, consider replacing
.. code-block:: python
def pattern(x, y):
return torch.neg(x) + torch.relu(y)
with
.. code-block:: python
def replacement(x, y):
return torch.relu(x)
In this case, ``replacement`` needs the same number of parameters
as ``pattern`` (both ``x`` and ``y``), even though the parameter
``y`` isn't used in ``replacement``.
After calling ``subgraph_rewriter.replace_pattern``, the generated
Python code looks like this:
.. code-block:: python
def forward(self, x, w1, w2):
stack_1 = torch.stack([w1, w2])
sum_1 = stack_1.sum()
stack_2 = torch.stack([w1, w2])
sum_2 = stack_2.sum()
max_1 = torch.max(sum_1)
add_1 = x + max_1
max_2 = torch.max(sum_2)
add_2 = add_1 + max_2
return add_2
"""
# Get the graphs for `gm`, `pattern`, `replacement`
original_graph = gm.graph
pattern_graph = symbolic_trace(pattern).graph
replacement_graph = symbolic_trace(replacement).graph
# Find all possible pattern matches in original_graph. Note that
# pattern matches may overlap with each other.
matcher = _SubgraphMatcher(pattern_graph)
matches: List[Match] = []
# Consider each node as an "anchor" (deepest matching graph node)
for anchor in original_graph.nodes:
if matcher.matches_subgraph_from_anchor(anchor):
def pattern_is_contained(nodes_map: Dict[Node, Node]) -> bool:
# `lookup` represents all the nodes in `original_graph`
# that are part of `pattern`
lookup: Dict[Node, Node] = {v: k for k, v in nodes_map.items()}
for n in lookup.keys():
# Nodes that can "leak"...
# Placeholders (by definition)
if n.op == "placeholder":
continue
# Pattern output (acts as a container)
if lookup[n].op == "output":
continue
# Result contained by pattern output (what we'll
# hook in to the new Graph, thus what we'll
# potentially use in other areas of the Graph as
# an input Node)
if (len(lookup[n].users) == 1
and list(lookup[n].users.keys())[0].op == "output"):
continue
for user in n.users:
# If this node has users that were not in
# `lookup`, then it must leak out of the
# pattern subgraph
if user not in lookup:
return False
return True
# It's not a match if the pattern leaks out into the rest
# of the graph
if pattern_is_contained(matcher.nodes_map):
# Shallow copy nodes_map
matches.append(Match(anchor=anchor,
nodes_map=copy.copy({
key: value
for key, value in matcher.nodes_map.items()
})))
# The set of all nodes in `original_graph` that we've seen thus far
# as part of a pattern match
replaced_nodes: Set[Node] = set()
# As we progressively replace nodes, we'll need to keep track of how the match results should change
match_changed_node: Dict[Node, Node] = dict()
# Return True if one of the nodes in the current match has already
# been used as part of another match
def overlaps_with_prev_match(match: Match) -> bool:
for pn, gn in match.nodes_map.items():
if pn.op in ["placeholder", "output"]:
continue
if gn in replaced_nodes and gn.op != "placeholder":
return True
return False
for match in matches:
# Skip overlapping matches
if overlaps_with_prev_match(match):
continue
# Map replacement graph nodes to their copy in `original_graph`
val_map: Dict[Node, Node] = {}
pattern_placeholders = [n for n in pattern_graph.nodes
if n.op == "placeholder"]
assert len(pattern_placeholders) > 0
replacement_placeholders = [n for n in replacement_graph.nodes
if n.op == "placeholder"]
assert len(pattern_placeholders) == len(replacement_placeholders)
placeholder_map = {r: p for r, p
in zip(replacement_placeholders, pattern_placeholders)}
# node from `original_graph` that matched with the output node
# in `pattern`
subgraph_output: Node = match.anchor
def mark_node_as_replaced(n: Node) -> None:
if n not in match.nodes_map.values():
return
for n_ in n.all_input_nodes:
mark_node_as_replaced(n_)
replaced_nodes.add(n)
for input_node in subgraph_output.all_input_nodes:
mark_node_as_replaced(input_node)
# Initialize `val_map` with mappings from placeholder nodes in
# `replacement` to their corresponding node in `original_graph`
for replacement_node in replacement_placeholders:
# Get the `original_graph` placeholder node
# corresponding to the current `replacement_node`
pattern_node = placeholder_map[replacement_node]
original_graph_node = match_changed_node.get(match.nodes_map[pattern_node], match.nodes_map[pattern_node])
# Populate `val_map`
val_map[replacement_node] = original_graph_node
# Copy the replacement graph over
with original_graph.inserting_before(subgraph_output):
copied_output = original_graph.graph_copy(replacement_graph,
val_map)
# Hook the output Node of the replacement subgraph in to the
# original Graph at the correct location
# CASE 1: We need to hook the replacement subgraph in somewhere
# in the middle of the graph. We replace the Node in the
# original graph that corresponds to the end of the pattern
# subgraph
if subgraph_output.op != "output":
pattern_outputs = [n for n in pattern_graph.nodes
if n.op == "output"]
assert len(pattern_outputs) > 0
replacement_outputs = [n for n in replacement_graph.nodes
if n.op == "output"]
assert len(replacement_outputs) == len(pattern_outputs)
outputs_map = {p: r for r, p
in zip(replacement_outputs, pattern_outputs)}
for pn, gn in match.nodes_map.items():
if gn.op == "placeholder":
continue
# Search for the node corresponding to the output of the pattern
if pn.op != "output":
continue
assert subgraph_output == gn
# Update all anchor inputs to the new nodes
rn = outputs_map[pn]
for pn_input, rn_input in zip(pn.all_input_nodes, rn.all_input_nodes):
gn_input = match.nodes_map[pn_input]
rn_input_in_original_graph = val_map[rn_input]
gn_input.replace_all_uses_with(rn_input_in_original_graph)
# We store the updated node point in case other nodes want to use it
match_changed_node[gn_input] = rn_input_in_original_graph
assert subgraph_output.op != "output"
# CASE 2: The pattern subgraph match extends to the end of the
# original graph, so we need to change the current graph's
# output Node to reflect the insertion of the replacement graph.
# We'll keep the current output Node, but update its args and
# `_input_nodes` as necessary
else:
subgraph_output.args = ((copied_output,))
if isinstance(copied_output, Node):
subgraph_output._input_nodes = {copied_output: None}
assert isinstance(copied_output, Node)
# Erase the `pattern` nodes
for node in reversed(original_graph.nodes):
if len(node.users) == 0 and node.op != "output":
original_graph.erase_node(node)
# Update the passed-in GraphModule to reflect the new state of
# `original_graph`
gm.recompile()
# If `replacement` was an nn.Module, we'll need to make sure that
# all the submodules have been copied over correctly
if isinstance(replacement, torch.nn.Module):
_replace_submodules(gm, replacement)
return matches
| pytorch-master | torch/fx/subgraph_rewriter.py |
from .graph_module import GraphModule
from .graph import Graph
from .node import Argument, Node, Target, map_arg, map_aggregate
from .proxy import Proxy
from ._symbolic_trace import Tracer
from ._compatibility import compatibility
import torch.fx.traceback as fx_traceback
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
import inspect
from contextlib import contextmanager
__all__ = ['Interpreter', 'Transformer']
@compatibility(is_backward_compatible=True)
class Interpreter:
"""
An Interpreter executes an FX graph Node-by-Node. This pattern
can be useful for many things, including writing code
transformations as well as analysis passes.
Methods in the Interpreter class can be overridden to customize
the behavior of execution. The map of overrideable methods
in terms of call hierarchy::
run()
+-- run_node
+-- placeholder()
+-- get_attr()
+-- call_function()
+-- call_method()
+-- call_module()
+-- output()
Example:
Suppose we want to swap all instances of ``torch.neg`` with
``torch.sigmoid`` and vice versa (including their ``Tensor``
method equivalents). We could subclass Interpreter like so::
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target,
args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target,
args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
torch.testing.assert_allclose(result, torch.neg(input).sigmoid())
Args:
module (GraphModule): The module to be executed
garbage_collect_values (bool): Whether to delete values after their last
use within the Module's execution. This ensures optimal memory usage during
execution. This can be disabled to, for example, examine all of the intermediate
values in the execution by looking at the ``Interpreter.env`` attribute.
"""
@compatibility(is_backward_compatible=True)
def __init__(self, module : GraphModule, garbage_collect_values : bool = True):
assert isinstance(module, GraphModule)
self.module = module
self.submodules = dict(self.module.named_modules())
self.env : Dict[Node, Any] = {}
self.garbage_collect_values = garbage_collect_values
if self.garbage_collect_values:
# Run through reverse nodes and record the first instance of a use
# of a given node. This represents the *last* use of the node in the
# execution order of the program, which we will use to free unused
# values
node_to_last_use : Dict[Node, Node] = {}
self.user_to_last_uses : Dict[Node, List[Node]] = {}
def register_last_uses(n : Node, user : Node):
if n not in node_to_last_use:
node_to_last_use[n] = user
self.user_to_last_uses.setdefault(user, []).append(n)
for node in reversed(self.module.graph.nodes):
map_arg(node.args, lambda n: register_last_uses(n, node))
map_arg(node.kwargs, lambda n: register_last_uses(n, node))
@compatibility(is_backward_compatible=True)
def run(self, *args, initial_env : Optional[Dict[Node, Any]] = None, enable_io_processing : bool = True) -> Any:
"""
Run `module` via interpretation and return the result.
Args:
*args: The arguments to the Module to run, in positional order
initial_env (Optional[Dict[Node, Any]]): An optional starting environment for execution.
This is a dict mapping `Node` to any value. This can be used, for example, to
pre-populate results for certain `Nodes` so as to do only partial evaluation within
the interpreter.
enable_io_processing (bool): If true, we process the inputs and outputs with graph's process_inputs and
process_outputs function first before using them.
Returns:
Any: The value returned from executing the Module
"""
self.env = initial_env if initial_env else {}
# Positional function args are consumed left-to-right by
# `placeholder` nodes. Use an iterator to keep track of
# position and extract those values.
if enable_io_processing:
args = self.module.graph.process_inputs(*args)
self.args_iter : Iterator[Any] = iter(args)
for node in self.module.graph.nodes:
if node in self.env:
# Short circuit if we have this value. This could
# be used, for example, for partial evaluation
# where the caller has pre-populated `env` with
# values for a subset of the program.
continue
self.env[node] = self.run_node(node)
if self.garbage_collect_values:
for to_delete in self.user_to_last_uses.get(node, []):
del self.env[to_delete]
if node.op == 'output':
output_val = self.env[node]
return self.module.graph.process_outputs(output_val) if enable_io_processing else output_val
@contextmanager
def _set_current_node(self, node):
with fx_traceback.append_stack_trace(node.stack_trace):
yield
@compatibility(is_backward_compatible=True)
def run_node(self, n : Node) -> Any:
"""
Run a specific node ``n`` and return the result.
Calls into placeholder, get_attr, call_function,
call_method, call_module, or output depending
on ``node.op``
Args:
n (Node): The Node to execute
Returns:
Any: The result of executing ``n``
"""
with fx_traceback.append_stack_trace(n.stack_trace):
args, kwargs = self.fetch_args_kwargs_from_env(n)
assert isinstance(args, tuple)
assert isinstance(kwargs, dict)
return getattr(self, n.op)(n.target, args, kwargs)
# Main Node running APIs
@compatibility(is_backward_compatible=True)
def placeholder(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``placeholder`` node. Note that this is stateful:
``Interpreter`` maintains an internal iterator over
arguments passed to ``run`` and this method returns
next() on that iterator.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Returns:
Any: The argument value that was retrieved.
"""
assert isinstance(target, str)
if target.startswith('*'):
# For a starred parameter e.g. `*args`, retrieve all
# remaining values from the args list.
return list(self.args_iter)
else:
try:
return next(self.args_iter)
except StopIteration as si:
if len(args) > 0:
return args[0]
else:
raise RuntimeError(f'Expected positional argument for parameter {target}, but one was not passed in!')
@compatibility(is_backward_compatible=True)
def get_attr(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``get_attr`` node. Will retrieve an attribute
value from the ``Module`` hierarchy of ``self.module``.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return:
Any: The value of the attribute that was retrieved
"""
assert isinstance(target, str)
return self.fetch_attr(target)
@compatibility(is_backward_compatible=True)
def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``call_function`` node and return the result.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return
Any: The value returned by the function invocation
"""
assert not isinstance(target, str)
# Execute the function and return the result
return target(*args, **kwargs)
@compatibility(is_backward_compatible=True)
def call_method(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``call_method`` node and return the result.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return
Any: The value returned by the method invocation
"""
# args[0] is the `self` object for this method call
self_obj, *args_tail = args
# Execute the method and return the result
assert isinstance(target, str)
return getattr(self_obj, target)(*args_tail, **kwargs)
@compatibility(is_backward_compatible=True)
def call_module(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute a ``call_module`` node and return the result.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return
Any: The value returned by the module invocation
"""
# Retrieve executed args and kwargs values from the environment
# Execute the method and return the result
assert isinstance(target, str)
submod = self.fetch_attr(target)
return submod(*args, **kwargs)
@compatibility(is_backward_compatible=True)
def output(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
"""
Execute an ``output`` node. This really just retrieves
the value referenced by the ``output`` node and returns it.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
Return:
Any: The return value referenced by the output node
"""
return args[0]
# Helper methods
@compatibility(is_backward_compatible=True)
def fetch_attr(self, target : str):
"""
Fetch an attribute from the ``Module`` hierarchy of ``self.module``.
Args:
target (str): The fully-qualfiied name of the attribute to fetch
Return:
Any: The value of the attribute.
"""
target_atoms = target.split('.')
attr_itr = self.module
for i, atom in enumerate(target_atoms):
if not hasattr(attr_itr, atom):
raise RuntimeError(f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}")
attr_itr = getattr(attr_itr, atom)
return attr_itr
@compatibility(is_backward_compatible=True)
def fetch_args_kwargs_from_env(self, n : Node) -> Tuple[Tuple, Dict]:
"""
Fetch the concrete values of ``args`` and ``kwargs`` of node ``n``
from the current execution environment.
Args:
n (Node): The node for which ``args`` and ``kwargs`` should be fetched.
Return:
Tuple[Tuple, Dict]: ``args`` and ``kwargs`` with concrete values for ``n``.
"""
args = self.map_nodes_to_values(n.args, n)
assert isinstance(args, tuple)
kwargs = self.map_nodes_to_values(n.kwargs, n)
assert isinstance(kwargs, dict)
return args, kwargs
@compatibility(is_backward_compatible=True)
def map_nodes_to_values(self, args : Argument, n : Node) -> Argument:
"""
Recursively descend through ``args`` and look up the concrete value
for each ``Node`` in the current execution environment.
Args:
args (Argument): Data structure within which to look up concrete values
n (Node): Node to which ``args`` belongs. This is only used for error reporting.
"""
def load_arg(n_arg : Node) -> Any:
if n_arg not in self.env:
raise RuntimeError(f'Node {n} referenced nonexistent value {n_arg}! Run Graph.lint() '
f'to diagnose such issues')
return self.env[n_arg]
return map_arg(args, load_arg)
@compatibility(is_backward_compatible=True)
class Transformer(Interpreter):
"""
``Transformer`` is a special type of interpreter that produces a
new ``Module``. It exposes a ``transform()`` method that returns
the transformed ``Module``. ``Transformer`` does not require
arguments to run, as ``Interpreter`` does. ``Transformer`` works
entirely symbolically.
Example:
Suppose we want to swap all instances of ``torch.neg`` with
``torch.sigmoid`` and vice versa (including their ``Tensor``
method equivalents). We could subclass ``Transformer`` like so::
class NegSigmSwapXformer(Transformer):
def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
transformed : torch.nn.Module = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
torch.testing.assert_allclose(transformed(input), torch.neg(input).sigmoid())
Args:
module (GraphModule): The ``Module`` to be transformed.
"""
@compatibility(is_backward_compatible=True)
def __init__(self, module):
super().__init__(module)
self.new_graph = Graph()
self.new_graph.set_codegen(module.graph._codegen)
class TransformerTracer(Tracer):
def __init__(self, graph: Graph):
super().__init__()
self.graph = graph
def is_leaf_module(self, _, __) -> bool:
return True
self.tracer = TransformerTracer(self.new_graph)
self.tracer.root = module
@compatibility(is_backward_compatible=True)
def placeholder(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Proxy:
"""
Execute a ``placeholder`` node. In ``Transformer``, this is
overridden to insert a new ``placeholder`` into the output
graph.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
"""
assert isinstance(target, str)
default_value = next(iter(args)) if args else inspect.Signature.empty
return Proxy(self.new_graph.placeholder(target, default_value=default_value), self.tracer)
@compatibility(is_backward_compatible=True)
def get_attr(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Proxy:
"""
Execute a ``get_attr`` node. In ``Transformer``, this is
overridden to insert a new ``get_attr`` node into the output
graph.
Args:
target (Target): The call target for this node. See
`Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for
details on semantics
args (Tuple): Tuple of positional args for this invocation
kwargs (Dict): Dict of keyword arguments for this invocation
"""
assert isinstance(target, str)
return Proxy(self.new_graph.get_attr(target), self.tracer)
@compatibility(is_backward_compatible=True)
def call_module(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
# Override so that the leaf module policy from `self.tracer` is respected.
assert isinstance(target, str)
submod = self.fetch_attr(target)
return self.tracer.call_module(submod, submod.forward, args, kwargs)
@compatibility(is_backward_compatible=True)
def call_function(self, target : 'Target', args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
# Override so that functions that were wrapped are still wrapped.
return self.tracer.create_proxy('call_function', target, args, kwargs)
@compatibility(is_backward_compatible=True)
def transform(self) -> GraphModule:
"""
Transform ``self.module`` and return the transformed
``GraphModule``.
"""
with fx_traceback.override_stack_trace():
result = super().run(enable_io_processing=False)
if result is not None:
def strip_proxy(a : Union[Argument, Proxy]) -> Any:
return a.node if isinstance(a, Proxy) else a
self.new_graph.output(map_aggregate(result, strip_proxy))
return GraphModule(self.module, self.new_graph)
| pytorch-master | torch/fx/interpreter.py |
import builtins
import copy
import functools
import inspect
import math
import os
import warnings
from itertools import chain
from types import CodeType, FunctionType, ModuleType
from typing import (
Any,
Callable,
Dict,
List,
NamedTuple,
Optional,
Set,
Tuple,
Type,
Union,
)
import torch
import torch.utils._pytree as pytree
from torch._C import ScriptObject # type: ignore[attr-defined]
from ._compatibility import compatibility
from .graph import _PyTreeCodeGen, _PyTreeInfo, Graph
from .graph_module import GraphModule
from .node import Argument, base_types, map_aggregate
from .proxy import ParameterProxy, Proxy, TracerBase
HAS_VARSTUFF = inspect.CO_VARARGS | inspect.CO_VARKEYWORDS
# These need to run in global scope to handle nested calls correctly
_orig_module_call: Callable = torch.nn.Module.__call__
_orig_module_getattr: Callable = torch.nn.Module.__getattr__
_proxyable_classes: Dict[Type, None] = {}
_is_fx_tracing_flag = False
def is_fx_tracing():
return _is_fx_tracing_flag
@compatibility(is_backward_compatible=True)
class ProxyableClassMeta(type):
"""
ProxyableClassMeta allows you to make construction of a given Python class
symbolically traceable. For example::
import torch
import torch.fx
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = torch.fx.symbolic_trace(use_tensor_pair_ctor)
print(traced.code)
'''
def forward(self, x : __main___TensorPair, y : torch.Tensor):
tensor_pair = __main___TensorPair(y, y); y = None
add = x.add(tensor_pair); tensor_pair = None
mul = add.mul(x); add = x = None
return mul
'''
From this example, we can see that contruction of a class (``TensorPair``)
defined with ``ProxyableClassMeta`` as metaclass can be recorded in symbolic
tracing.
"""
def __init__(cls, name, bases, attrs):
_proxyable_classes.setdefault(cls)
super().__init__(name, bases, attrs)
def __call__(cls, *args, **kwargs):
instance = cls.__new__(cls) # type: ignore[call-overload]
found_proxies = []
def check_proxy(a):
if isinstance(a, Proxy):
found_proxies.append(a)
map_aggregate(args, check_proxy)
map_aggregate(kwargs, check_proxy)
if len(found_proxies) != 0:
tracer = found_proxies[0].tracer
return tracer.create_proxy("call_function", cls, args, kwargs)
else:
cls.__init__(instance, *args, **kwargs) # type: ignore[misc]
return instance
def _patch_function(fn: FunctionType, nargs: int) -> FunctionType:
co = fn.__code__
co_flags = co.co_flags & ~HAS_VARSTUFF
co_args: tuple
if hasattr(co, "co_posonlyargcount"):
co_args = (
nargs,
0,
0,
co.co_nlocals,
co.co_stacksize,
co_flags,
co.co_code,
co.co_consts,
co.co_names,
co.co_varnames,
co.co_filename,
co.co_name,
co.co_firstlineno,
co.co_lnotab,
co.co_freevars,
co.co_cellvars,
)
else:
co_args = (
nargs,
0,
co.co_nlocals,
co.co_stacksize,
co_flags,
co.co_code,
co.co_consts,
co.co_names,
co.co_varnames,
co.co_filename,
co.co_name,
co.co_firstlineno,
co.co_lnotab,
co.co_freevars,
co.co_cellvars,
)
new_code = CodeType(*co_args) # type: ignore[arg-type]
return FunctionType(
new_code, fn.__globals__, fn.__name__, fn.__defaults__, fn.__closure__
)
# we need to insert placeholder nodes for *args and **kwargs
# we can't call this function normally, otherwise it would try to unpack them
# instead, let's make python think that args and kwargs are normal variables
@compatibility(is_backward_compatible=False)
class PHBase(object):
"""
Object representing an input placeholder to `concrete_args`
"""
def __repr__(self):
return "PH"
PH = PHBase()
@compatibility(is_backward_compatible=True)
class Tracer(TracerBase):
# Reference: https://github.com/pytorch/pytorch/issues/54354
# The first line of this docstring overrides the one Sphinx generates for the
# documentation. We need it so that Sphinx doesn't leak `math`s path from the
# build environment (e.g. `<module 'math' from '/leaked/path').
"""Tracer(autowrap_modules=(math,), autowrap_functions=())
``Tracer`` is the class that implements the symbolic tracing functionality
of ``torch.fx.symbolic_trace``. A call to ``symbolic_trace(m)`` is equivalent
to ``Tracer().trace(m)``.
Tracer can be subclassed to override various behaviors of the tracing
process. The different behaviors that can be overridden are described
in the docstrings of the methods on this class.
"""
# Not checking BC on this API because the default value for `autowrap_modules`
# includes the local filepath to the `math` module, which would jitter
# across machines.
@compatibility(is_backward_compatible=True)
def __init__(
self,
autowrap_modules: Tuple[ModuleType] = (math,),
autowrap_functions: Tuple[Callable, ...] = (),
param_shapes_constant: bool = False,
) -> None:
# This method's signature is overridden by the first line of this class'
# docstring. If this method's signature is modified, the signature that
# overrides it also should be modified accordingly.
"""
Construct a Tracer object.
Args:
autowrap_modules (Tuple[ModuleType]): defaults to `(math, )`,
Python modules whose functions should be wrapped automatically
without needing to use fx.wrap(). Backward-compatibility for
this parameter is guaranteed.
autowrap_function (Tuple[Callable, ...]): defaults to `()`,
Python functions that should be wrapped automatically without
needing to use fx.wrap(). Backward compabilibility for this
parameter is guaranteed.
param_shapes_constant (bool): When this flag is set, calls to shape,
size and a few other shape like attributes of a module's parameter
will be evaluted directly, rather than returning a new Proxy value
for an attribute access. Backward compatibility for this parameter
is guaranteed.
"""
super().__init__()
# Functions we will eagerly wrap when we see them while tracing
# this captures both `math.sqrt()` and `from math import sqrt` automatically
self._autowrap_function_ids: Set[int] = {
id(value)
for name, value in chain(*[m.__dict__.items() for m in autowrap_modules])
if not name.startswith("_") and callable(value)
}
self._autowrap_function_ids.update(set([id(f) for f in autowrap_functions]))
# Python modules to apply autowrap to at the start, in addition to
# modules we see while tracing
self._autowrap_search: List[ModuleType] = list(autowrap_modules)
self.param_shapes_constant = param_shapes_constant
self.submodule_paths: Optional[Dict[torch.nn.Module, str]] = None
@compatibility(is_backward_compatible=True)
def create_arg(self, a: Any) -> "Argument":
"""
A method to specify the behavior of tracing when preparing values to
be used as arguments to nodes in the ``Graph``.
By default, the behavior includes:
#. Iterate through collection types (e.g. tuple, list, dict) and recursively
call ``create_args`` on the elements.
#. Given a Proxy object, return a reference to the underlying IR ``Node``
#. Given a non-Proxy Tensor object, emit IR for various cases:
* For a Parameter, emit a ``get_attr`` node referring to that Parameter
* For a non-Parameter Tensor, store the Tensor away in a special
attribute referring to that attribute.
This method can be overridden to support more types.
Args:
a (Any): The value to be emitted as an ``Argument`` in the ``Graph``.
Returns:
The value ``a`` converted into the appropriate ``Argument``
"""
# The base tracer is used to construct Graphs when there is no associated
# module hierarchy, so it can never create parameter references.
# The default tracer adds the ability to refer to parameters when
# tracing modules.
if isinstance(a, torch.nn.Parameter):
for n, p in self.root.named_parameters():
if a is p:
return self.create_node("get_attr", n, (), {})
raise NameError("parameter is not a member of this module")
elif isinstance(a, torch.Tensor):
for n_, p_ in self.root.named_buffers():
if a is p_:
return self.create_node("get_attr", n_, (), {})
elif isinstance(a, torch.nn.Module):
for n_, p_ in self.root.named_modules():
if a is p_:
return self.create_node("get_attr", n_, (), {})
# For NamedTuple instances that appear literally as args, we emit
# a node to construct the NamedTuple and use that Node as the argument.
if isinstance(a, tuple) and hasattr(a, "_fields"):
args = tuple(self.create_arg(elem) for elem in a)
return self.create_node("call_function", a.__class__, args, {})
# Tensors do not have a reliable string repr() from which they can be
# constructed (and we probably don't want to rely on that, either), so
# for any constant Tensor values we encounter, first search for if they
# are an attribute of some module in the module hierarchy. If so, emit
# a get_attr to retrieve that tensor. Otherwise, we'll store away the
# tensor value into a special attribute on the Module s.t. we can
# retrieve it with a get_attr.
if isinstance(a, (torch.Tensor, ScriptObject)):
qualname: Optional[str] = self.tensor_attrs.get(a)
# Tensor was not found in the Module hierarchy, stow it away in a
# special attribute and set the qualname to refer to that
if not qualname:
i = 0
while True:
qualname = f"_tensor_constant{i}"
if not hasattr(self.root, qualname):
break
i += 1
self.tensor_attrs[a] = qualname
setattr(self.root, qualname, a)
return self.create_node("get_attr", qualname, (), {})
if type(a) in _proxyable_classes:
# This is an instance of a proxyable class for which we did not
# witness its construction. Intern this as a constant attribute
# TODO: binary search
i = 0
while True:
qualname = f"_{a.__class__.__name__}_constant_{i}"
if not hasattr(self.root, qualname):
break
i += 1
setattr(self.root, qualname, a)
return self.create_node("get_attr", qualname, (), {})
return super().create_arg(a)
@compatibility(is_backward_compatible=True)
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:
"""
A method to specify whether a given ``nn.Module`` is a "leaf" module.
Leaf modules are the atomic units that appear in
the IR, referenced by ``call_module`` calls. By default,
Modules in the PyTorch standard library namespace (torch.nn)
are leaf modules. All other modules are traced through and
their constituent ops are recorded, unless specified otherwise
via this parameter.
Args:
m (Module): The module being queried about
module_qualified_name (str): The path to root of this module. For example,
if you have a module hierarchy where submodule ``foo`` contains
submodule ``bar``, which contains submodule ``baz``, that module will
appear with the qualified name ``foo.bar.baz`` here.
"""
return m.__module__.startswith("torch.nn") and not isinstance(
m, torch.nn.Sequential
)
@compatibility(is_backward_compatible=True)
def path_of_module(self, mod: torch.nn.Module) -> str:
"""
Helper method to find the qualified name of ``mod`` in the Module hierarchy
of ``root``. For example, if ``root`` has a submodule named ``foo``, which has
a submodule named ``bar``, passing ``bar`` into this function will return
the string "foo.bar".
Args:
mod (str): The ``Module`` to retrieve the qualified name for.
"""
# Prefer the O(1) algorithm
if self.submodule_paths:
path = self.submodule_paths.get(mod)
if path is None:
raise NameError("module is not installed as a submodule")
assert isinstance(path, str)
return path
# O(N^2) fallback in the case that we didn't store the submodule
# paths.
else:
for n, p in self.root.named_modules():
if mod is p:
return n
raise NameError("module is not installed as a submodule")
@compatibility(is_backward_compatible=True)
def call_module(
self,
m: torch.nn.Module,
forward: Callable[..., Any],
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
) -> Any:
"""
Method that specifies the behavior of this ``Tracer`` when it encounters
a call to an ``nn.Module`` instance.
By default, the behavior is to check if the called module is a leaf module
via ``is_leaf_module``. If it is, emit a ``call_module`` node referring to
``m`` in the ``Graph``. Otherwise, call the ``Module`` normally, tracing through
the operations in its ``forward`` function.
This method can be overridden to--for example--create nested traced
GraphModules, or any other behavior you would want while tracing across
``Module`` boundaries.
Args:
m (Module): The module for which a call is being emitted
forward (Callable): The forward() method of the ``Module`` to be invoked
args (Tuple): args of the module callsite
kwargs (Dict): kwargs of the module callsite
Return:
The return value from the Module call. In the case that a ``call_module``
node was emitted, this is a ``Proxy`` value. Otherwise, it is whatever
value was returned from the ``Module`` invocation.
"""
module_qualified_name = self.path_of_module(m)
if not self.is_leaf_module(m, module_qualified_name):
return forward(*args, **kwargs)
return self.create_proxy("call_module", module_qualified_name, args, kwargs)
# This method will be refactored
@compatibility(is_backward_compatible=False)
def create_args_for_root(self, root_fn, is_module, concrete_args=None):
"""
Create ``placeholder`` nodes corresponding to the signature of the ``root``
Module. This method introspects root's signature and emits those
nodes accordingly, also supporting ``*args`` and ``**kwargs``.
"""
# In some cases, a function or method has been decorated with a wrapper
# defined via ``functools.wraps``. In this case, the outer code object
# will likely not contain the actual parameters we care about, so unwrap
# the function to get to the innermost callable.
fn_for_analysis = inspect.unwrap(root_fn)
co = fn_for_analysis.__code__
total_args = co.co_argcount + co.co_kwonlyargcount
orig_args = list(co.co_varnames)
names_iter = iter(co.co_varnames)
args: List[Any] = []
skip_arg_idx = 0
if is_module:
if total_args == 0:
raise RuntimeError(
"``self`` argument cannot be part of *args expansion!"
)
skip_arg_idx = 1
next(names_iter) # skip self
args.append(self.root)
sig = inspect.signature(fn_for_analysis)
def proxy_placeholder(name: str):
if concrete_args is not None and name in concrete_args:
cnt = 0
def replace_ph(x):
nonlocal cnt
cnt += 1
param = sig.parameters[name]
default = (
()
if param.default is inspect.Parameter.empty
else (param.default,)
)
out = self.create_proxy(
"placeholder", f"{name}_{str(cnt)}", default, {}
)
if x == PH:
return out
# Union[int, bool] == bool in Python <= 3.6
if (
type(x) == bool
or type(x) in base_types
and type(x) != torch.Tensor
):
torch._assert(
out == x,
f"{name} has been specialized to have value {x} but got another value",
)
elif type(x) == type(None):
args = (
out,
f"{name} has been specialized to have value None but got another value",
)
self.create_proxy("call_function", _assert_is_none, args, {})
else:
warnings.warn(
f"Was not able to add assertion to guarantee correct input {name} to "
f"specialized function. It is up to the user to make sure that your inputs match the "
f"inputs you specialized the function with."
)
return x
return pytree.tree_map(replace_ph, concrete_args[name])
if name[0] == "*":
default = ()
else:
param = sig.parameters[name]
default = () if param.default is inspect.Parameter.empty else (param.default,) # type: ignore[assignment]
return self.create_proxy(
"placeholder",
name,
default,
{},
type_expr=fn_for_analysis.__annotations__.get(name, None),
)
arg_names = [next(names_iter) for idx in range(skip_arg_idx, total_args)]
if isinstance(concrete_args, tuple):
if len(arg_names) != len(concrete_args):
raise RuntimeError(
f"Tracing expected {len(arg_names)} arguments but got {len(concrete_args)} concrete arguments"
)
concrete_args = {name: val for name, val in zip(arg_names, concrete_args)}
args.extend(proxy_placeholder(names) for names in arg_names)
if co.co_kwonlyargcount > 0 or co.co_flags & HAS_VARSTUFF:
# TODO: type annotations for *args and **kwargs
if co.co_flags & inspect.CO_VARARGS:
args.append(proxy_placeholder("*" + next(names_iter)))
if co.co_flags & inspect.CO_VARKEYWORDS:
args.append(proxy_placeholder("**" + next(names_iter)))
root_fn = _patch_function(root_fn, len(args))
flat_args, in_spec = pytree.tree_flatten(tuple(args))
if any(not isinstance(i, pytree.LeafSpec) for i in in_spec.children_specs):
# In the case that we have pytree-flattened inputs in
# `concrete_args`, generate a flattening wrapper around the
# original root function and return that.
self.graph._codegen = _PyTreeCodeGen(
_PyTreeInfo(orig_args[:total_args], in_spec, None)
)
def flatten_fn(*args):
tree_args = pytree.tree_unflatten(list(args), in_spec)
tree_out = root_fn(*tree_args)
out_args, out_spec = pytree.tree_flatten(tree_out)
assert isinstance(self.graph._codegen, _PyTreeCodeGen)
self.graph._codegen.pytree_info = (
self.graph._codegen.pytree_info._replace(out_spec=out_spec)
)
return out_args
return flatten_fn, flat_args
return root_fn, args
def _module_getattr(self, attr, attr_val, parameter_proxy_cache):
def maybe_get_proxy_for_attr(
attr_val, collection_to_search, parameter_proxy_cache
):
for n, p in collection_to_search:
if attr_val is p:
if n not in parameter_proxy_cache:
kwargs = {}
if (
"proxy_factory_fn"
in inspect.signature(self.create_proxy).parameters
):
kwargs["proxy_factory_fn"] = (
None
if not self.param_shapes_constant
else lambda node: ParameterProxy(
self, node, n, attr_val
)
)
val_proxy = self.create_proxy("get_attr", n, (), {}, **kwargs) # type: ignore[arg-type]
parameter_proxy_cache[n] = val_proxy
return parameter_proxy_cache[n]
return None
if isinstance(attr_val, torch.nn.Parameter):
maybe_parameter_proxy = maybe_get_proxy_for_attr(
attr_val, self.root.named_parameters(), parameter_proxy_cache
)
if maybe_parameter_proxy is not None:
return maybe_parameter_proxy
if self.proxy_buffer_attributes and isinstance(attr_val, torch.Tensor):
maybe_buffer_proxy = maybe_get_proxy_for_attr(
attr_val, self.root.named_buffers(), parameter_proxy_cache
)
if maybe_buffer_proxy is not None:
return maybe_buffer_proxy
return attr_val
@compatibility(is_backward_compatible=True)
def trace(
self,
root: Union[torch.nn.Module, Callable[..., Any]],
concrete_args: Optional[Dict[str, Any]] = None,
) -> Graph:
"""
Trace ``root`` and return the corresponding FX ``Graph`` representation. ``root``
can either be an ``nn.Module`` instance or a Python callable.
Note that after this call, ``self.root`` may be different from the ``root`` passed
in here. For example, when a free function is passed to ``trace()``, we will
create an ``nn.Module`` instance to use as the root and add embedded constants
to.
Args:
root (Union[Module, Callable]): Either a ``Module`` or a function to be
traced through. Backwards-compatibility for this parameter is
guaranteed.
concrete_args (Optional[Dict[str, any]]): Concrete arguments that should
not be treated as Proxies. This parameter is experimental and
its backwards-compatibility is *NOT* guaranteed.
Returns:
A ``Graph`` representing the semantics of the passed-in ``root``.
"""
global _is_fx_tracing_flag
old_is_fx_tracing_flag = _is_fx_tracing_flag
_is_fx_tracing_flag = True
try:
if isinstance(root, torch.nn.Module):
self.root = root
assert hasattr(
type(root), self.traced_func_name
), f"traced_func_name={self.traced_func_name} doesn't exist in {type(root).__name__}"
fn = getattr(type(root), self.traced_func_name)
self.submodule_paths = {mod: name for name, mod in root.named_modules()}
else:
self.root = torch.nn.Module()
fn = root
tracer_cls: Optional[Type["Tracer"]] = getattr(self, "__class__", None)
self.graph = Graph(tracer_cls=tracer_cls)
# When we encounter a Tensor value that's not a parameter, we look if it
# is some other attribute on the model. Construct a dict mapping Tensor
# values to the qualified name here for efficiency. This is used downstream
# in create_arg
self.tensor_attrs: Dict[Union[torch.Tensor, ScriptObject], str] = {}
def collect_tensor_attrs(m: torch.nn.Module, prefix_atoms: List[str]):
for k, v in m.__dict__.items():
if isinstance(v, (torch.Tensor, ScriptObject)):
self.tensor_attrs[v] = ".".join(prefix_atoms + [k])
for k, v in m.named_children():
collect_tensor_attrs(v, prefix_atoms + [k])
collect_tensor_attrs(self.root, [])
assert isinstance(fn, FunctionType)
fn_globals = fn.__globals__ # run before it gets patched
fn, args = self.create_args_for_root(
fn, isinstance(root, torch.nn.Module), concrete_args
)
parameter_proxy_cache: Dict[
str, Proxy
] = {} # Reduce number of get_attr calls
# Method dispatch on parameters is not recorded unless it's directly used.
# Thus, we need to insert a proxy when __getattr__ requests a parameter.
@functools.wraps(_orig_module_getattr)
def module_getattr_wrapper(mod, attr):
attr_val = _orig_module_getattr(mod, attr)
return self._module_getattr(attr, attr_val, parameter_proxy_cache)
@functools.wraps(_orig_module_call)
def module_call_wrapper(mod, *args, **kwargs):
def forward(*args, **kwargs):
return _orig_module_call(mod, *args, **kwargs)
_autowrap_check(
patcher,
getattr(getattr(mod, "forward", mod), "__globals__", {}),
self._autowrap_function_ids,
)
return self.call_module(mod, forward, args, kwargs)
with _Patcher() as patcher:
# allow duplicate patches to support the case of nested calls
patcher.patch_method(
torch.nn.Module,
"__getattr__",
module_getattr_wrapper,
deduplicate=False,
)
patcher.patch_method(
torch.nn.Module, "__call__", module_call_wrapper, deduplicate=False
)
_patch_wrapped_functions(patcher)
_autowrap_check(patcher, fn_globals, self._autowrap_function_ids)
for module in self._autowrap_search:
_autowrap_check(
patcher, module.__dict__, self._autowrap_function_ids
)
self.create_node(
"output",
"output",
(self.create_arg(fn(*args)),),
{},
type_expr=fn.__annotations__.get("return", None),
)
self.submodule_paths = None
finally:
_is_fx_tracing_flag = old_is_fx_tracing_flag
return self.graph
def __deepcopy__(self, memo):
# _autowrap_search contains modules, which cannot be deepcopied.
new_tracer = Tracer.__new__(Tracer)
for k, v in self.__dict__.items():
if k in {'_autowrap_search'}:
new_obj = copy.copy(v)
else:
new_obj = copy.deepcopy(v, memo)
new_tracer.__dict__[k] = new_obj
return new_tracer
# List of pairs of (global dict, function name) functions
# to patch for the purposes of the wrap() API.
_wrapped_fns_to_patch: List[Tuple[dict, str]] = []
# List of methods on classes to wrap (class type, function name)
# this currently only works for Tensor.* methods that aren't traced properly
_wrapped_methods_to_patch: List[Tuple[type, str]] = []
if os.environ.get("FX_PATCH_GETITEM") == "1":
# This change is needed to trace models like PositionalEmbedding from BERT:
# https://github.com/pytorch/benchmark/blob/master/torchbenchmark/models/BERT_pytorch/bert_pytorch/model/embedding/position.py
# but causes issues in quantization documented here:
# https://github.com/pytorch/pytorch/issues/50710
# once that is fixed we can make this the default behavior.
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
def _find_proxy(*objects_to_search):
"""
Recursively search a data structure for a Proxy() and return it,
return None if not found.
"""
proxy = None
def find_proxy(x):
nonlocal proxy
if isinstance(x, Proxy):
proxy = x
map_aggregate(objects_to_search, find_proxy)
return proxy
def _create_wrapped_func(orig_fn):
@functools.wraps(orig_fn)
def wrapped(*args, **kwargs):
"""
Given an closed-over ``orig_function`` to invoke, search the args and kwargs for
a Proxy object. If there is one, emit a ``call_function`` node to preserve the
call to this leaf function directly. Otherwise, just return the results of
this function call, as this function is not being traced.
"""
proxy = _find_proxy(args, kwargs)
if proxy is not None:
return_proxy = proxy.tracer.create_proxy(
"call_function", orig_fn, args, kwargs
)
return_proxy.node.meta["is_wrapped"] = True
return return_proxy
return orig_fn(*args, **kwargs)
return wrapped
def _create_wrapped_method(cls, name):
orig_fn = getattr(cls, name)
@functools.wraps(orig_fn)
def wrapped(*args, **kwargs):
"""
Search the args and kwargs for a Proxy object. If there is one,
emit a ``call_method`` node to preserve the call to this method
directly. Otherwise, just return the results of this function
call, as this function is not being traced.
"""
proxy = _find_proxy(args, kwargs)
if proxy is not None:
return proxy.tracer.create_proxy("call_method", name, args, kwargs)
return orig_fn(*args, **kwargs)
return wrapped
class _PatchedFn(NamedTuple):
frame_dict: Any
fn_name: str
orig_fn: Any
def revert(self):
raise NotImplementedError()
class _PatchedFnSetItem(_PatchedFn):
def revert(self):
self.frame_dict[self.fn_name] = self.orig_fn
class _PatchedFnDel(_PatchedFn):
def revert(self):
del self.frame_dict[self.fn_name]
class _PatchedFnSetAttr(_PatchedFn):
def revert(self):
setattr(self.frame_dict, self.fn_name, self.orig_fn)
class _Patcher(object):
def __init__(self):
super(_Patcher, self).__init__()
self.patches_made: List[_PatchedFn] = []
self.visited: Set[int] = set()
def patch(
self,
frame_dict: Dict[str, Any],
name: str,
new_fn: Callable,
deduplicate: bool = True,
):
"""
Replace frame_dict[name] with new_fn until we exit the context manager.
"""
new_fn.__fx_already_patched = deduplicate # type: ignore[attr-defined]
if name not in frame_dict and hasattr(builtins, name):
self.patches_made.append(_PatchedFnDel(frame_dict, name, None))
elif getattr(frame_dict[name], "__fx_already_patched", False):
return # already patched, no need to do it again
else:
self.patches_made.append(
_PatchedFnSetItem(frame_dict, name, frame_dict[name])
)
frame_dict[name] = new_fn
def patch_method(
self, cls: type, name: str, new_fn: Callable, deduplicate: bool = True
):
"""
Replace object_or_dict.name with new_fn until we exit the context manager.
"""
new_fn.__fx_already_patched = deduplicate # type: ignore[attr-defined]
orig_fn = getattr(cls, name)
if getattr(orig_fn, "__fx_already_patched", False):
return # already patched, no need to do it again
self.patches_made.append(_PatchedFnSetAttr(cls, name, orig_fn))
setattr(cls, name, new_fn)
def visit_once(self, thing: Any):
"""Return True on the first call to with thing, otherwise false"""
idx = id(thing)
if idx in self.visited:
return False
self.visited.add(idx)
return True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Undo all the changes made via self.patch() and self.patch_method()
"""
while self.patches_made:
# unpatch in reverse order to handle duplicates correctly
self.patches_made.pop().revert()
self.visited.clear()
def _patch_wrapped_functions(patcher: _Patcher):
"""
Go through ``_wrapped_fn_patch_table`` and, for each frame object, wrap
the listed global functions in the `_create_wrapped_func` wrapper.
"""
for frame_dict, name in _wrapped_fns_to_patch:
if name not in frame_dict and hasattr(builtins, name):
orig_fn = getattr(builtins, name)
else:
orig_fn = frame_dict[name]
patcher.patch(frame_dict, name, _create_wrapped_func(orig_fn))
for cls, name in _wrapped_methods_to_patch:
patcher.patch_method(cls, name, _create_wrapped_method(cls, name))
def _autowrap_check(
patcher: _Patcher, frame_dict: Dict[str, Any], function_ids: Set[int]
):
"""
Some methods, like `math.sqrt` are common enough we want to automatically wrap them as we see them.
This method searches a scope for them and patches them if found.
"""
if patcher.visit_once(frame_dict):
for name, value in frame_dict.items():
if (
not name.startswith("_")
and callable(value)
and id(value) in function_ids
):
patcher.patch(frame_dict, name, _create_wrapped_func(value))
@compatibility(is_backward_compatible=True)
def wrap(fn_or_name: Union[str, Callable]):
"""
This function can be called at module-level scope to register fn_or_name as a "leaf function".
A "leaf function" will be preserved as a CallFunction node in the FX trace instead of being
traced through::
# foo/bar/baz.py
def my_custom_function(x, y):
return x * x + y * y
torch.fx.wrap('my_custom_function')
def fn_to_be_traced(x, y):
# When symbolic tracing, the below call to my_custom_function will be inserted into
# the graph rather than tracing it.
return my_custom_function(x, y)
This function can also equivalently be used as a decorator::
# foo/bar/baz.py
@torch.fx.wrap
def my_custom_function(x, y):
return x * x + y * y
A wrapped function can be thought of a "leaf function", analogous to the concept of
"leaf modules", that is, they are functions that are left as calls in the FX trace
rather than traced through.
Args:
fn_or_name (Union[str, Callable]): The function or name of the global function to insert into the
graph when it's called
"""
if not callable(fn_or_name) and not isinstance(fn_or_name, str):
raise RuntimeError(
"Unsupported type for global function! Must be either a callable or "
"string name"
)
if hasattr(fn_or_name, "__code__"):
assert not isinstance(fn_or_name, str) # to make mypy happy
fn_name = fn_or_name.__code__.co_name
else:
assert isinstance(
fn_or_name, str
), "fn_or_name must be a global function or string name"
fn_name = fn_or_name
currentframe = inspect.currentframe()
assert currentframe is not None
f = currentframe.f_back
assert f is not None
if f.f_code.co_name != "<module>":
raise NotImplementedError("wrap must be called at the top level of a module")
# consider implementing Callable version of this via _autowrap_function_ids / _autowrap_search
# semantics would be slightly different, but would add support `from x import wrapped_function`
_wrapped_fns_to_patch.append((f.f_globals, fn_name))
return fn_or_name
@compatibility(is_backward_compatible=True)
def symbolic_trace(
root: Union[torch.nn.Module, Callable[..., Any]],
concrete_args: Optional[Dict[str, Any]] = None,
) -> GraphModule:
"""
Symbolic tracing API
Given an ``nn.Module`` or function instance ``root``, this function will return a ``GraphModule``
constructed by recording operations seen while tracing through ``root``.
``concrete_args`` allows you to partially specialize your function, whether it's to remove control flow or data structures.
For example::
def f(a, b):
if b == True:
return a
else:
return a*2
FX can typically not trace through this due to the presence of control
flow. However, we can use `concrete_args` to specialize on the value of
`b` to trace through this.
f = fx.symbolic_trace(f, concrete_args={'b': False})
assert f(3, False) == 6
Note that although you can still pass in different values of `b`, they will be ignored.
We can also use `concrete_args` to eliminate data-structure handling from
our function. This will use pytrees to flatten your input. To avoid
overspecializing, pass in `fx.PH` for values that shouldn't be
specialized. For example::
def f(x):
out = 0
for v in x.values():
out += v
return out
f = fx.symbolic_trace(f, concrete_args={'x': {'a': fx.PH, 'b': fx.PH, 'c': fx.PH}})
assert f({'a': 1, 'b': 2, 'c': 4}) == 7
Args:
root (Union[torch.nn.Module, Callable]): Module or function to be traced and converted
into a Graph representation.
concrete_args (Optional[Dict[str, any]]): Inputs to be partially specialized
Returns:
GraphModule: a Module created from the recorded operations from ``root``.
"""
tracer = Tracer()
graph = tracer.trace(root, concrete_args)
name = (
root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__
)
return GraphModule(tracer.root, graph, name)
@wrap
def _assert_is_none(value, msg):
assert value is None, msg
| pytorch-master | torch/fx/_symbolic_trace.py |
# Nodes represent a definition of a value in our graph of operators.
from typing import TYPE_CHECKING, Union, Callable, Any, Tuple, List, Optional, Dict, Set
from ._compatibility import compatibility
from .immutable_collections import immutable_dict, immutable_list
import torch
import builtins
import types
import warnings
from torch.fx.operator_schemas import normalize_function, normalize_module, ArgsKwargsPair
if TYPE_CHECKING:
from .graph import Graph
__all__ = ['Node', 'map_arg', 'map_aggregate']
BaseArgumentTypes = Union[str, int, float, bool, complex, torch.dtype,
torch.Tensor, torch.device, torch.memory_format, torch.layout]
base_types = BaseArgumentTypes.__args__ # type: ignore[attr-defined]
Target = Union[Callable[..., Any], str]
Argument = Optional[Union[
Tuple[Any, ...], # actually Argument, but mypy can't represent recursive types
List[Any], # actually Argument
Dict[str, Any], # actually Argument
slice, # Slice[Argument, Argument, Argument], but slice is not a templated type in typing
'Node',
BaseArgumentTypes
]]
_side_effectful_functions: Set[Callable] = {
torch._assert,
torch.ops.profiler._record_function_enter,
torch.ops.profiler._record_function_enter_new,
torch.ops.profiler._record_function_exit}
# this is fixed on master, WAR for 1.5
def _find_module_of_method(orig_method: Callable[..., Any]) -> str:
name = orig_method.__name__
module = orig_method.__module__
if module is not None:
return module
for guess in [torch, torch.nn.functional]:
if getattr(guess, name, None) is orig_method:
return guess.__name__
raise RuntimeError(f'cannot find module for {orig_method}')
# Borrowed from CPython typing module
# https://github.com/python/cpython/blob/f90dc36c15d7fee0efaf6d39e97be0bdf2683e93/Lib/typing.py#L156
def _type_repr(obj):
"""Return the repr() of an object, special-casing types (internal helper).
If obj is a type, we return a shorter version than the default
type.__repr__, based on the module and qualified name, which is
typically enough to uniquely identify a type. For everything
else, we fall back on repr(obj).
"""
if isinstance(obj, type):
if obj.__module__ == 'builtins':
return obj.__qualname__
return f'{obj.__module__}.{obj.__qualname__}'
if obj is ...:
return('...')
if isinstance(obj, types.FunctionType):
return obj.__name__
return repr(obj)
def _get_qualified_name(func: Callable[..., Any]) -> str:
# things like getattr just appear in builtins
if getattr(builtins, func.__name__, None) is func:
return func.__name__
name = func.__name__
module = _find_module_of_method(func)
module = module.replace('torch._ops', 'torch.ops') # WAR for bug in how torch.ops assigns module
return f'{module}.{name}'
def _format_arg(arg, max_list_len=float('inf')) -> str:
if hasattr(arg, '_custom_fx_repr_fn'):
return arg._custom_fx_repr_fn()
elif isinstance(arg, list):
items = ', '.join(_format_arg(a) for idx, a in enumerate(arg) if idx < max_list_len)
maybe_len = '' if len(arg) < max_list_len + 1 else f', ...[total_len={len(arg)}]'
return f'[{items}{maybe_len}]'
elif isinstance(arg, tuple):
items = ', '.join(_format_arg(a) for idx, a in enumerate(arg) if idx < max_list_len)
maybe_len = '' if len(arg) < max_list_len + 1 else f', ...[total_len={len(arg)}]'
maybe_comma = ',' if len(arg) == 1 else ''
return f'({items}{maybe_comma}{maybe_len})'
elif isinstance(arg, dict):
items_str = ', '.join(f'{k}: {_format_arg(v)}' for k, v in arg.items())
return f'{{{items_str}}}'
if isinstance(arg, Node):
return '%' + str(arg)
else:
return str(arg)
@compatibility(is_backward_compatible=True)
class Node:
"""
``Node`` is the data structure that represents individual operations within
a ``Graph``. For the most part, Nodes represent callsites to various entities,
such as operators, methods, and Modules (some exceptions include nodes that
specify function inputs and outputs). Each ``Node`` has a function specified
by its ``op`` property. The ``Node`` semantics for each value of ``op`` are as follows:
- ``placeholder`` represents a function input. The ``name`` attribute specifies the name this value will take on.
``target`` is similarly the name of the argument. ``args`` holds either: 1) nothing, or 2) a single argument
denoting the default parameter of the function input. ``kwargs`` is don't-care. Placeholders correspond to
the function parameters (e.g. ``x``) in the graph printout.
- ``get_attr`` retrieves a parameter from the module hierarchy. ``name`` is similarly the name the result of the
fetch is assigned to. ``target`` is the fully-qualified name of the parameter's position in the module hierarchy.
``args`` and ``kwargs`` are don't-care
- ``call_function`` applies a free function to some values. ``name`` is similarly the name of the value to assign
to. ``target`` is the function to be applied. ``args`` and ``kwargs`` represent the arguments to the function,
following the Python calling convention
- ``call_module`` applies a module in the module hierarchy's ``forward()`` method to given arguments. ``name`` is
as previous. ``target`` is the fully-qualified name of the module in the module hierarchy to call.
``args`` and ``kwargs`` represent the arguments to invoke the module on, *including the self argument*.
- ``call_method`` calls a method on a value. ``name`` is as similar. ``target`` is the string name of the method
to apply to the ``self`` argument. ``args`` and ``kwargs`` represent the arguments to invoke the module on,
*including the self argument*
- ``output`` contains the output of the traced function in its ``args[0]`` attribute. This corresponds to the "return" statement
in the Graph printout.
"""
@compatibility(is_backward_compatible=True)
def __init__(self, graph: 'Graph', name: str, op: str, target: 'Target',
args: Tuple['Argument', ...], kwargs: Dict[str, 'Argument'],
return_type : Optional[Any] = None) -> None:
"""
Instantiate an instance of ``Node``. Note: most often, you want to use the
Graph APIs, i.e. ``Graph.call_module``, ``Graph.call_method``, etc. rather
than instantiating a ``Node`` directly.
Args:
graph (Graph): The ``Graph`` to which this ``Node`` should belong.
name (str): The name to which the output of this ``Node`` should be assigned
op (str): The opcode for this ``Node``. Can be one of 'placeholder',
'call_method', 'call_module', 'call_function', 'get_attr',
'output'
target ('Target'): The target this op should call. See the broader
``Node`` docstring for more details.
args (Tuple['Argument']): The args to be passed to ``target``
kwargs (Dict[str, 'Argument']): The kwargs to be passed to ``target``
return_type (Optional[Any]): The python type expression representing the
type of the output of this node. This field can be used for
annotation of values in the generated code or for other types
of analyses.
"""
self.graph = graph
self.name = name # unique name of value being created
assert op in ['placeholder', 'call_method', 'call_module', 'call_function', 'get_attr', 'output', 'root']
self.op = op # the kind of operation = placeholder|call_method|call_module|call_function|get_attr
if op == 'call_function':
if not callable(target):
raise ValueError(f'Node [graph = {graph}, name = \'{name}\'] target {target} has type {torch.typename(target)} '
'but a Callable is expected')
else:
if not isinstance(target, str):
raise ValueError(f'Node [graph = {graph}, name = \'{name}\'] target {target} has type {torch.typename(target)} '
'but a str is expected')
self.target = target # for method/module/function, the name of the method/module/function/attr
# being invoked, e.g add, layer1, or torch.add
# All `Node`-valued inputs. Key is the Node, value is don't-care.
# The public API for this is `all_input_nodes`, this private attribute
# should not be accessed directly.
self._input_nodes : Dict[Node, None] = {}
self.__update_args_kwargs(map_arg(args, lambda x: x), map_arg(kwargs, lambda x: x)) # type: ignore[arg-type]
# All of the nodes that use the value produced by this Node
# Note one user may correspond to several uses, e.g. the node fo ``x + x``
# would appear once here, but represents two uses.
#
# Is a dict to act as an "ordered set". Keys are significant, value dont-care
self.users : Dict['Node', None] = {}
# Type expression representing the output value of this node.
# This should contain the same class of Type objects that would appear
# as type annotations for function inputs/outputs.
#
# For placeholder nodes, this value will be used to type-annotate the
# generated function parameters.
# For the return node, this value will be used to type-annotate the
# generated function return type. (Note this is a special case. ``return``
# does not produce a value, it's more of a notation. Thus, this value
# describes the type of args[0] in the ``return`` node.
self.type : Optional[Any] = return_type
self._prev = self
self._next = self
self._erased = False
# If set, use this fn to print this node
self._repr_fn : Optional[Callable[[Node], str]] = None
# Dictionary to store metadata passes need to do their
# transformations. This metadata is preserved across node copies
self.meta : Dict[str, Any] = {}
@property
def next(self) -> 'Node':
"""
Returns the next ``Node`` in the linked list of Nodes.
Returns:
The next ``Node`` in the linked list of Nodes.
"""
return self._next
@property
def prev(self) -> 'Node':
"""
Returns the previous ``Node`` in the linked list of Nodes.
Returns:
The previous ``Node`` in the linked list of Nodes.
"""
return self._prev
@compatibility(is_backward_compatible=True)
def prepend(self, x: 'Node') -> None:
"""
Insert x before this node in the list of nodes in the graph. Example::
Before: p -> self
bx -> x -> ax
After: p -> x -> self
bx -> ax
Args:
x (Node): The node to put before this node. Must be a member of the same graph.
"""
assert self.graph == x.graph, "Attempting to move a Node into a different Graph"
if self == x:
warnings.warn("Trying to prepend a node to itself. This behavior has no effect on the graph.")
return
x._remove_from_list()
p = self._prev
p._next, x._prev = x, p
x._next, self._prev = self, x
@compatibility(is_backward_compatible=True)
def append(self, x: 'Node') -> None:
"""
Insert ``x`` after this node in the list of nodes in the graph.
Equivalent to ``self.next.prepend(x)``
Args:
x (Node): The node to put after this node. Must be a member of the same graph.
"""
self._next.prepend(x)
def _remove_from_list(self):
p, n = self._prev, self._next
p._next, n._prev = n, p
@property
def args(self) -> Tuple[Argument, ...]:
"""
The tuple of arguments to this ``Node``. The interpretation of arguments
depends on the node's opcode. See the :class:`Node` docstring for more
information.
Assignment to this property is allowed. All accounting of uses and users
is updated automatically on assignment.
"""
return self._args
@args.setter
def args(self, a : Tuple[Argument, ...]):
"""
Set the tuple of arguments to this Node. The interpretation of arguments
depends on the node's opcode. See the ``fx.Graph`` docstring for more
information.
"""
# DO NOT CALL `__update_args_kwargs` directly. The correct way to
# set `args` is via direct assignment, i.e. `node.args = new_args`
self.__update_args_kwargs(map_arg(a, lambda x: x), self._kwargs) # type: ignore[arg-type]
@property
def kwargs(self) -> Dict[str, Argument]:
"""
The dict of keyword arguments to this ``Node``. The interpretation of arguments
depends on the node's opcode. See the :class:`Node` docstring for more
information.
Assignment to this property is allowed. All accounting of uses and users
is updated automatically on assignment.
"""
return self._kwargs
@kwargs.setter
def kwargs(self, k : Dict[str, Argument]):
"""
Set the dict of kwargs to this Node. The interpretation of arguments
depends on the node's opcode. See the ``fx.Graph`` docstring for more
information.
"""
# DO NOT CALL `__update_args_kwargs` directly. The correct way to
# set `args` is via direct assignment, i.e. `node.kwargs = new_kwargs`
self.__update_args_kwargs(self._args, map_arg(k, lambda x: x)) # type: ignore[arg-type]
@property
def all_input_nodes(self) -> List['Node']:
"""
Return all Nodes that are inputs to this Node. This is equivalent to
iterating over ``args`` and ``kwargs`` and only collecting the values that
are Nodes.
Returns:
List of ``Nodes`` that appear in the ``args`` and ``kwargs`` of this
``Node``, in that order.
"""
return list(self._input_nodes.keys())
@compatibility(is_backward_compatible=True)
def update_arg(self, idx : int, arg : Argument) -> None:
"""
Update an existing positional argument to contain the new value
``arg``. After calling, ``self.args[idx] == arg``.
Args:
idx (int): The index into ``self.args`` of the element to update
arg (Argument): The new argument value to write into ``args``
"""
args = list(self.args)
args[idx] = arg
self.args = tuple(args)
@compatibility(is_backward_compatible=True)
def update_kwarg(self, key : str, arg : Argument) -> None:
"""
Update an existing keyword argument to contain the new value
``arg``. After calling, ``self.kwargs[key] == arg``.
Args:
key (str): The key in ``self.kwargs`` of the element to update
arg (Argument): The new argument value to write into ``kwargs``
"""
kwargs = dict(self.kwargs)
kwargs[key] = arg
self.kwargs = kwargs
@property
def stack_trace(self) -> Optional[str]:
"""
Return the Python stack trace that was recorded during tracing, if any.
This property is usually populated by `Tracer.create_proxy`. To record
stack traces during tracing for debug purposes, set
`record_stack_traces = True` on the `Tracer` instance.
"""
return self.meta.get("stack_trace", None)
@stack_trace.setter
def stack_trace(self, trace : Optional[str]):
self.meta["stack_trace"] = trace
def __update_args_kwargs(self, new_args : Tuple['Argument', ...], new_kwargs : Dict[str, 'Argument']):
"""
This API is internal. Do *not* call it directly.
"""
self._args = new_args
self._kwargs = new_kwargs
for old_use in self._input_nodes.keys():
old_use.users.pop(self)
self._input_nodes = {}
map_arg(self._args, lambda n: self._input_nodes.setdefault(n))
map_arg(self._kwargs, lambda n: self._input_nodes.setdefault(n))
for new_use in self._input_nodes.keys():
new_use.users.setdefault(self)
def __repr__(self) -> str:
if self._repr_fn:
return self._repr_fn(self)
return self.name
def _pretty_print_target(self, target):
"""
Make target printouts more user-friendly.
1) builtins will be printed as `builtins.xyz`
2) operators will be printed as `operator.xyz`
3) other callables will be printed with qualfied name, e.g. torch.add
"""
if isinstance(target, str):
return target
if hasattr(target, '__module__'):
if not hasattr(target, '__name__'):
# Just to be defensive, if we don't have `__name__`, get the
# qualname. Not sure if this happens for any members of `operator`
# or `builtins`. This fallback path is not as good, since e.g.
# things in `operator` have `_operator` as their __module__.
return _get_qualified_name(target)
if target.__module__ == 'builtins':
return f'builtins.{target.__name__}'
elif target.__module__ == '_operator':
return f'operator.{target.__name__}'
return _get_qualified_name(target)
@compatibility(is_backward_compatible=True)
def format_node(self,
placeholder_names: Optional[List[str]] = None,
maybe_return_typename: Optional[List[str]] = None) -> Optional[str]:
"""
Return a descriptive string representation of ``self``.
This method can be used with no arguments as a debugging
utility.
This function is also used internally in the ``__str__`` method
of ``Graph``. Together, the strings in ``placeholder_names``
and ``maybe_return_typename`` make up the signature of the
autogenerated ``forward`` function in this Graph's surrounding
GraphModule. ``placeholder_names`` and ``maybe_return_typename``
should not be used otherwise.
Args:
placeholder_names: A list that will store formatted strings
representing the placeholders in the generated
``forward`` function. Internal use only.
maybe_return_typename: A single-element list that will store
a formatted string representing the output of the
generated ``forward`` function. Internal use only.
Returns:
str: If 1) we're using ``format_node`` as an internal helper
in the ``__str__`` method of ``Graph``, and 2) ``self``
is a placeholder Node, return ``None``. Otherwise,
return a descriptive string representation of the
current Node.
"""
if self.op == 'placeholder':
assert isinstance(self.target, str)
arg_str = self.target
arg_str += arg_str + f': {_type_repr(self.type)}' if self.type else ''
if placeholder_names:
placeholder_names.append(arg_str)
return None
maybe_typename = f'{_type_repr(self.type)} ' if self.type else ''
default_val = '(default=' + str(self.args[0]) + ')' if self.args else ''
return f'%{self.name} : {maybe_typename}[#users={len(self.users)}] = {self.op}[target={self.target}]{default_val}'
elif self.op == 'get_attr':
maybe_typename = f'{_type_repr(self.type)} ' if self.type is not None else ''
return f'%{self.name} : {maybe_typename}[#users={len(self.users)}] = ' \
f'{self.op}[target={self._pretty_print_target(self.target)}]'
elif self.op == 'output':
if self.type and maybe_return_typename:
maybe_return_typename[0] = f' -> {_type_repr(self.type)}'
return f'return {self.args[0]}'
else:
maybe_typename = f'{_type_repr(self.type)} ' if self.type is not None else ''
return f'%{self.name} : {maybe_typename}[#users={len(self.users)}] = ' \
f'{self.op}[target={self._pretty_print_target(self.target)}](' \
f'args = {_format_arg(self.args)}, kwargs = {_format_arg(self.kwargs)})'
@compatibility(is_backward_compatible=True)
def replace_all_uses_with(self,
replace_with : 'Node',
delete_user_cb: Callable[['Node'], bool] = lambda user: True
) -> List['Node']:
"""
Replace all uses of ``self`` in the Graph with the Node ``replace_with``.
Args:
replace_with (Node): The node to replace all uses of ``self`` with.
delete_user_cb (Callable): Callback that is called to determine
whether a given user of the self node should be removed.
Returns:
The list of Nodes on which this change was made.
"""
to_process = list(self.users)
skipped = []
for use_node in to_process:
if not delete_user_cb(use_node):
skipped.append(use_node)
continue
def maybe_replace_node(n : Node) -> Node:
if n == self:
return replace_with
else:
return n
new_args = map_arg(use_node.args, maybe_replace_node)
new_kwargs = map_arg(use_node.kwargs, maybe_replace_node)
assert isinstance(new_args, tuple)
assert isinstance(new_kwargs, dict)
use_node.__update_args_kwargs(new_args, new_kwargs)
assert len(self.users) - len(skipped) == 0
return [n for n in to_process if n not in skipped]
@compatibility(is_backward_compatible=False)
def is_impure(self):
"""
Returns whether this op is impure, i.e. if its op is a placeholder or
output, or if a call_function or call_module which is impure.
Returns:
bool: If the op is impure or not.
"""
if self.op in {"placeholder", "output"}:
return True
# Check if an impure function.
if self.op == "call_function":
return self.target in _side_effectful_functions
# Check if an impure module.
if self.op == "call_module":
assert (
self.graph.owning_module is not None
), "self.graph.owning_module not set for purity check"
target_mod = self.graph.owning_module.get_submodule(self.target)
assert (
target_mod is not None
), f"Did not find expected submodule target {self.target}"
return getattr(target_mod, "_is_impure", False)
return False
@compatibility(is_backward_compatible=False)
def normalized_arguments(
self, root : torch.nn.Module, arg_types : Optional[Tuple[Any]] = None,
kwarg_types : Optional[Dict[str, Any]] = None,
normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]:
"""
Returns normalized arguments to Python targets. This means that
`args/kwargs` will be matched up to the module/functional's
signature and return exclusively kwargs in positional order
if `normalize_to_only_use_kwargs` is true.
Also populates default values. Does not support positional-only
parameters or varargs parameters.
Supports module calls.
May require `arg_types` and `kwarg_types` in order to disambiguate overloads.
Args:
root (torch.nn.Module): Module upon which to resolve module targets.
arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args
kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs
normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
Returns:
Returns NamedTuple ArgsKwargsPair, or `None` if not successful.
"""
if self.op == 'call_function':
assert callable(self.target)
return normalize_function(self.target, self.args, self.kwargs, arg_types, kwarg_types) # type: ignore[arg-type]
elif self.op == 'call_module':
assert isinstance(self.target, str)
return normalize_module(root, self.target, self.args, self.kwargs) # type: ignore[arg-type]
return None
@compatibility(is_backward_compatible=True)
def replace_input_with(self, old_input: 'Node', new_input: 'Node'):
"""
Loop through input nodes of ``self``, and replace all instances of
``old_input`` with ``new_input``.
Args:
old_input (Node): The old input node to be replaced.
new_input (Node): The new input node to replace ``old_input``.
"""
def maybe_replace_node(n : Node) -> Node:
return new_input if n == old_input else n
new_args = map_arg(self.args, maybe_replace_node)
new_kwargs = map_arg(self.kwargs, maybe_replace_node)
assert isinstance(new_args, tuple)
assert isinstance(new_kwargs, dict)
self.__update_args_kwargs(new_args, new_kwargs)
@compatibility(is_backward_compatible=True)
def map_arg(a: Argument, fn: Callable[[Node], Argument]) -> Argument:
"""
Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys.
"""
assert callable(fn), "torch.fx.map_arg(a, fn): fn must be a callable"
return map_aggregate(a, lambda x: fn(x) if isinstance(x, Node) else x)
@compatibility(is_backward_compatible=True)
def map_aggregate(a: Argument, fn: Callable[[Argument], Argument]) -> Argument:
"""
Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys.
"""
if isinstance(a, tuple):
t = tuple(map_aggregate(elem, fn) for elem in a)
# Support NamedTuple (if it has `_fields`) by repacking into original type.
return t if not hasattr(a, '_fields') else type(a)(*t)
elif isinstance(a, list):
return immutable_list(map_aggregate(elem, fn) for elem in a)
elif isinstance(a, dict):
return immutable_dict((k, map_aggregate(v, fn)) for k, v in a.items())
elif isinstance(a, slice):
return slice(map_aggregate(a.start, fn), map_aggregate(a.stop, fn), map_aggregate(a.step, fn))
else:
return fn(a)
| pytorch-master | torch/fx/node.py |
from typing import Any, Dict
import textwrap
_BACK_COMPAT_OBJECTS : Dict[Any, None] = {}
_MARKED_WITH_COMATIBLITY : Dict[Any, None] = {}
def compatibility(is_backward_compatible : bool):
if is_backward_compatible:
def mark_back_compat(fn):
docstring = textwrap.dedent(getattr(fn, '__doc__', None) or '')
docstring += """
.. note::
Backwards-compatibility for this API is guaranteed.
"""
fn.__doc__ = docstring
_BACK_COMPAT_OBJECTS.setdefault(fn)
_MARKED_WITH_COMATIBLITY.setdefault(fn)
return fn
return mark_back_compat
else:
def mark_not_back_compat(fn):
docstring = textwrap.dedent(getattr(fn, '__doc__', None) or '')
docstring += """
.. warning::
This API is experimental and is *NOT* backward-compatible.
"""
fn.__doc__ = docstring
_MARKED_WITH_COMATIBLITY.setdefault(fn)
return fn
return mark_not_back_compat
| pytorch-master | torch/fx/_compatibility.py |
import torch
import torch.fx
import warnings
import functools
import builtins
from typing import Any, Callable, Dict, Optional, Union
def embedding_override(self, input):
return torch.empty(*input.shape, self.weight.shape[-1], device='meta')
def nn_layernorm_override(self, input):
return input
def torch_relu_override(x):
return x
def torch_nn_relu_override(self, x):
return x
def functional_relu_override(x, inplace=False):
assert not inplace, 'dont support inplace functional.relu for metatensor analysis'
return x
def torch_where_override(condition, x, y):
# torch.where returns the broadcasted tensor of condition, x, and y,
# so hack it by using addition
return condition.to(device='meta') + x.to(device='meta') + y.to(device='meta')
def torch_abs_override(input, *, out=None):
assert out is None, 'Dont support in-place abs for MetaTensor analysis'
return input
manual_meta_overrides : Dict[Callable, Callable] = {
torch.nn.Embedding: embedding_override,
torch.nn.LayerNorm: nn_layernorm_override,
torch.relu: torch_relu_override,
torch.nn.functional.relu: functional_relu_override,
torch.nn.ReLU: torch_nn_relu_override,
torch.where: torch_where_override,
torch.abs: torch_abs_override,
}
def gen_constructor_wrapper(target):
@functools.wraps(target)
def wrapper(*args, **kwargs):
proxy = None
def check_has_proxy(v):
if isinstance(v, torch.fx.Proxy):
nonlocal proxy
proxy = v
torch.fx.node.map_aggregate(args, check_has_proxy)
torch.fx.node.map_aggregate(kwargs, check_has_proxy)
if proxy is not None:
return proxy.tracer.create_proxy('call_function', target, args, kwargs)
else:
return target(*args, **kwargs)
return wrapper, target
class MetaProxy(torch.fx.Proxy):
def install_tensor_meta(self, tensor_meta):
self._tensor_meta = tensor_meta
def size(self, dim=None):
if hasattr(self, '_tensor_meta') and self._tensor_meta is not None:
return self._tensor_meta.size(*[dim] if dim else [])
return self.tracer.create_proxy('call_method', 'size', (self, dim) if dim else (self,), {})
def dim(self):
if hasattr(self, '_tensor_meta') and self._tensor_meta is not None:
return self._tensor_meta.dim()
return self.tracer.create_proxy('call_method', 'dim', (self,), {})
@property
def shape(self):
if hasattr(self, '_tensor_meta') and self._tensor_meta is not None:
return self._tensor_meta.shape
return self.tracer.create_proxy('call_function', builtins.getattr, (self, 'shape'), {})
@property
def dtype(self):
if hasattr(self, '_tensor_meta') and self._tensor_meta is not None:
return self._tensor_meta.dtype
return self.tracer.create_proxy('call_function', builtins.getattr, (self, 'dtype'), {})
@property
def device(self):
# Hack so we can track when devices are used. During meta-tensor propagation,
# replace these values with a constant 'meta'
return MetaDeviceAttribute(self, 'device')
def __getattr__(self, k):
if k == '_tensor_meta':
return self.__getattribute__(k)
# note: not added to the graph yet, if this is a method call
# we peephole optimize to the method invocation
return MetaAttribute(self, k)
class MetaAttribute(MetaProxy):
def __init__(self, root, attr: str):
self.root = root
self.attr = attr
self.tracer = root.tracer
self._node = None
@property
def node(self):
# the node for attributes is added lazily, since most will just be method calls
# which do not rely on the getitem call
if self._node is None:
self._node = self.tracer.create_proxy('call_function', getattr, (self.root, self.attr), {}).node
return self._node
def __call__(self, *args, **kwargs):
return self.tracer.create_proxy('call_method', self.attr, (self.root,) + args, kwargs)
class MetaDeviceAttribute(MetaAttribute):
pass
def proxys_to_metas(v):
if isinstance(v, MetaDeviceAttribute):
return 'meta'
if isinstance(v, torch.fx.Proxy):
assert isinstance(v, MetaProxy), f'Expected MetaProxy but got {type(v)}'
assert hasattr(v, '_tensor_meta'), 'MetaProxy does not have an associated meta'
return v._tensor_meta
return v
class MetaTracer(torch.fx.Tracer):
allow_insert_stateless_mods : bool = True
_TORCH_METHODS_TO_PATCH = ['arange', 'zeros', 'ones', 'full_like', 'eye']
def create_proxy(self, kind, target, args, kwargs, name=None, type_expr=None, proxy_factory_fn=None):
rv = super().create_proxy(kind, target, args, kwargs, name, type_expr, proxy_factory_fn)
if kind == 'placeholder' and target in self.meta_args:
rv.install_tensor_meta(self.meta_args[target])
return rv
if target in self.orig_fns:
# NOTE: tensor constructors in PyTorch define the `device` argument as
# *kwargs-only*. That is why this works. If you add methods to
# _TORCH_METHODS_TO_PATCH that do not define `device` as kwarg-only,
# this will break and you will likely see issues where we cannot infer
# the size of the output.
if 'device' in kwargs:
kwargs['device'] = 'meta'
try:
args_metas = torch.fx.node.map_aggregate(args, proxys_to_metas)
kwargs_metas = torch.fx.node.map_aggregate(kwargs, proxys_to_metas)
if kind == 'call_function':
meta_target = manual_meta_overrides.get(target, target)
meta_out = meta_target(*args_metas, **kwargs_metas)
elif kind == 'call_method':
meta_out = getattr(args_metas[0], target)(*args_metas[1:], **kwargs_metas)
elif kind == 'call_module':
assert hasattr(self, 'orig_forward')
self._disable_module_getattr = True
try:
mod = self.root.get_submodule(target)
mod_type = type(mod)
if mod_type in manual_meta_overrides:
meta_out = manual_meta_overrides[mod_type](mod, *args_metas, **kwargs_metas)
else:
meta_out = self.orig_forward(*args_metas, **kwargs_metas)
finally:
self._disable_module_getattr = False
elif kind == 'get_attr':
self._disable_module_getattr = True
try:
attr_itr = self.root
atoms = target.split('.')
for atom in atoms:
attr_itr = getattr(attr_itr, atom)
assert isinstance(attr_itr, torch.Tensor)
meta_out = attr_itr.to(device='meta')
finally:
self._disable_module_getattr = False
else:
return rv
# TODO
assert isinstance(rv, torch.fx.Proxy), 'Dont support composite output yet'
rv.install_tensor_meta(meta_out)
except Exception as e:
warnings.warn(f'Could not compute metadata for {kind} target {target}: {e}')
return rv
def _module_getattr(self, attr, attr_val, parameter_proxy_cache):
if getattr(self, '_disable_module_getattr', False):
return attr_val
else:
return super()._module_getattr(attr, attr_val, parameter_proxy_cache)
def call_module(self, m, forward, args, kwargs):
self.orig_forward = forward
return super().call_module(m, forward, args, kwargs)
def _insert_module_as_submodule(self, mod: torch.nn.Module) -> str:
"""
Helper method which tries to insert a module that was not declared as submodule.
"""
idx = 0
mod_name = mod.__class__.__name__.lower()
path = f"{mod_name}_{idx}"
while hasattr(self.root, path):
path = f"{mod_name}_{idx}"
idx += 1
self.root.add_module(path, mod)
return path
def path_of_module(self, mod: torch.nn.Module) -> str:
try:
return super().path_of_module(mod)
except NameError as e:
if self.allow_insert_stateless_mods and len(list(mod.parameters())) == 0 and len(list(mod.buffers())) == 0:
path = self._insert_module_as_submodule(mod)
self.prev_module = path
return path
raise
def proxy(self, node):
return MetaProxy(node, self)
def trace(self, root, meta_args : Dict[str, torch.Tensor], concrete_args=None):
assert isinstance(meta_args, dict)
self.meta_args = meta_args
self.patched_torch_methods = {
target: gen_constructor_wrapper(getattr(torch, target)) for target in self._TORCH_METHODS_TO_PATCH
}
self.orig_fns = set()
for name, (wrapper, orig) in self.patched_torch_methods.items():
setattr(torch, name, wrapper)
self.orig_fns.add(orig)
try:
graph = super().trace(root, concrete_args)
graph._tracer_extras = {'meta_args': meta_args}
return graph
finally:
for name, (_, orig) in self.patched_torch_methods.items():
setattr(torch, name, orig)
def symbolic_trace(root : Union[torch.nn.Module, Callable[..., Any]],
meta_args : Dict[str, torch.Tensor] = None,
concrete_args: Optional[Dict[str, Any]] = None) -> torch.fx.GraphModule:
tracer = MetaTracer()
graph = tracer.trace(root, meta_args, concrete_args)
name = root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__
gm = torch.fx.GraphModule(tracer.root, graph, name)
return gm
| pytorch-master | torch/fx/experimental/meta_tracer.py |
from enum import Enum
from typing import NamedTuple, Dict, List, Set
from torch.fx.node import Node, map_arg
class Partition:
"""Partition class contains all the information about an individual partition.
It also provides necessary methods for manipulation the partition.
"""
def __init__(self, partition_id: int) -> None:
self.nodes: Set[Node] = set()
self.partition_id = partition_id
self.parents: Set["Partition"] = set()
self.children: Set["Partition"] = set()
self.bfs_level: int = -1
self.used_mem_bytes: int = 0
self.logical_device_ids: List[int] = []
def __str__(self):
return str(self.partition_id)
def recalculate_mem_size(self):
self.used_mem_bytes = 0
for node in self.nodes:
self.used_mem_bytes += get_extra_size_of(node, self.nodes)
def add_node(self, node):
input_nodes: Dict[Node, None] = {}
map_arg(node.args, lambda n: input_nodes.setdefault(n))
map_arg(node.kwargs, lambda n: input_nodes.setdefault(n))
# Add current node's input nodes if they are placeholder or constants
for n in input_nodes:
if n.op in {"placeholder", "get_attr"}:
self.nodes.add(n)
self.nodes.add(node)
self.recalculate_mem_size()
def remove_node(self, node):
# Remove a node only if the node is in the partition
if node in self.nodes:
self.nodes.remove(node)
# Collect the node's input nodes
input_nodes: Dict[Node, None] = {}
map_arg(node.args, lambda n: input_nodes.setdefault(n))
map_arg(node.kwargs, lambda n: input_nodes.setdefault(n))
# Check if an input node is a placeholder or get_attr,
# and this input node is not used by some other nodes in this partition,
# the remove this input node
for input_node in input_nodes:
if all(
[n not in self.nodes for n in input_node.users]
) and input_node.op in {"placeholder", "get_attr"}:
self.nodes.remove(input_node)
self.recalculate_mem_size()
class Device(NamedTuple):
name: str
available_mem_bytes: int
logical_id: int
class NodeLatency(NamedTuple):
# Latency due to the memory bandwidth
mem_latency_sec: float
# Latency due to the computation
computer_latency_sec: float
class PartitionLatency(NamedTuple):
# Sum of all nodes' memory latency on the critical path
mem_latency_sec: float
# Sum of all nodes' compute latency on the critical path
computer_latency_sec: float
# Latency of the critical path
overall_latency_sec: float
class PartitionMode(Enum):
size_based = 0
sparse_nn = 1
cost_aware = 2
kl_based = 3
aot_based = 4
class PartitionerConfig(NamedTuple):
devices: List[Device]
mode: PartitionMode = PartitionMode.size_based
transfer_rate_bytes_per_sec: float = 0.0
node_to_latency_mapping: Dict[Node, NodeLatency] = {}
node_to_partition_mapping: Dict[Node, int] = {}
partition_to_logical_device_mapping: Dict[int, List[int]] = {}
# Saturate host by replicating partitions to the remaining idle devices.
saturate_host: bool = False
def get_extra_size_of(node: Node, nodes: Set[Node]) -> int:
"""Given a node and a set of nodes,
this function return the extra size that needed
if this node is included in this set.
"""
# Find all its input nodes
input_nodes: Dict[Node, None] = {}
map_arg(node.args, lambda n: input_nodes.setdefault(n))
map_arg(node.kwargs, lambda n: input_nodes.setdefault(n))
# Calculate total size of related nodes
total_size_of_input_nodes = 0
for n in input_nodes:
# Make sure this node hasn't been in this set yet
if n not in nodes:
size_bytes = getattr(n, "size_bytes", None)
if size_bytes:
total_size_of_input_nodes += size_bytes.output_size
else:
raise RuntimeError("node has no size_bytes attr")
# Don't forget the op node itself
size_bytes = getattr(node, "size_bytes", None)
if size_bytes:
total_size_of_input_nodes += size_bytes.total_size
else:
raise RuntimeError("node has no size_bytes attr")
return total_size_of_input_nodes
def get_latency_of_one_partition(
partition: Partition, node_to_latency_mapping: Dict[Node, NodeLatency]
) -> PartitionLatency:
"""Given a partiton and its nodes' latency, return a PartitionLatency for this partition"""
def get_top_nodes(partition: Partition) -> List[Node]:
"""Given a partition, return a list of nodes on the top bfs level"""
top_nodes: List[Node] = []
for node in partition.nodes:
# Skip placeholder and get_attr nodes
if node.op in {"placeholder", "get_attr"}:
continue
input_nodes: Dict[Node, None] = {}
map_arg(node.args, lambda n: input_nodes.setdefault(n))
map_arg(node.kwargs, lambda n: input_nodes.setdefault(n))
# If a node has no input nodes in this partition,
# or its input nodes in this partition are placeholders and get_attrs
# this node is on the top bfs level in this partition
if not any(
[
n in partition.nodes and n.op not in {"placeholder", "get_attr"}
for n in input_nodes
]
):
top_nodes.append(node)
return top_nodes
def dfs_helper(node: Node, partition_latency) -> PartitionLatency:
"""Given a top node of a partition, this function returns
the latency of the critical path in the partition
"""
node_latency = node_to_latency_mapping[node]
# Calculate the current overall latency of the partition
overall_latency_sec = partition_latency.overall_latency_sec + max(
node_latency.computer_latency_sec, node_latency.mem_latency_sec
)
# Update the mem latency of this path
mem_latency_sec = (
partition_latency.mem_latency_sec + node_latency.mem_latency_sec
)
# Update the compute latency of this path
computer_latency_sec = (
partition_latency.computer_latency_sec + node_latency.computer_latency_sec
)
# Get all users of this node that are in this partition
users = set(node.users).intersection(partition.nodes)
if users:
max_latency = PartitionLatency(
mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0
)
for n in users:
# Get new partition latency recursively
new_partition_latency = dfs_helper(
n,
PartitionLatency(
mem_latency_sec, computer_latency_sec, overall_latency_sec
),
)
if (
new_partition_latency.overall_latency_sec
> max_latency.overall_latency_sec
):
max_latency = new_partition_latency
return max_latency
# If there is no user, the node is at bottom of the partition
return PartitionLatency(
mem_latency_sec, computer_latency_sec, overall_latency_sec
)
# Main part starts
# Get all top level nodes of this partition
top_nodes = get_top_nodes(partition)
critical_path_latency = PartitionLatency(
mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0
)
# Go through all top nodes and find the largest latency (critical pass latency)
for node in top_nodes:
partition_latency = dfs_helper(
node,
PartitionLatency(
mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0
),
)
if (
partition_latency.overall_latency_sec
> critical_path_latency.overall_latency_sec
):
critical_path_latency = partition_latency
return critical_path_latency
def get_partition_to_latency_mapping(
partitions: List[Partition], node_to_latency_mapping: Dict[Node, NodeLatency]
) -> Dict[Partition, PartitionLatency]:
"""Given all the partitions and node_to_latency_mapping dictionary,
return a mapping dictionary of each partition to its overall latency
"""
partition_to_latency_mapping: Dict[Partition, PartitionLatency] = {}
# Go through each partition and get its latency
for partition in partitions:
partition_latency = get_latency_of_one_partition(
partition, node_to_latency_mapping
)
partition_to_latency_mapping[partition] = partition_latency
return partition_to_latency_mapping
def get_comm_latency_between(
parent_partition: Partition,
child_partition: Partition,
transfer_rate_bytes_per_sec: float,
):
"""Given two partitions (parent and child),
calculate the communication latency between the two.
"""
# If two partitions are on the same device, the comm latency is 0.
if (
parent_partition.logical_device_ids != []
and child_partition.logical_device_ids != []
and parent_partition.logical_device_ids == child_partition.logical_device_ids
):
return 0.0
# Keep tracking the communication size between parent and child
comm_size = 0
# Keep tracking all the counted node
visited_nodes = set()
# Go through all nodes in the child partition
# If a node has input nodes from the parent partition,
# the output size of those input nodes will be counted
# and added to comm_size
for node in child_partition.nodes:
input_nodes: Dict[Node, None] = {}
map_arg(node.args, lambda n: input_nodes.setdefault(n))
map_arg(node.kwargs, lambda n: input_nodes.setdefault(n))
for n in input_nodes:
if n in parent_partition.nodes and n not in visited_nodes:
size_bytes = getattr(n, "size_bytes", None)
if size_bytes is not None:
comm_size += size_bytes.output_size
visited_nodes.add(n)
return comm_size / transfer_rate_bytes_per_sec
def get_latency_of_partitioned_graph(
partitions: List[Partition],
partition_to_latency_mapping: Dict[Partition, PartitionLatency],
transfer_rate_bytes_per_sec: float,
):
"""Given all paritions in a graph, find the critical path among all partitions
and return its latency as the latency of the whole graph
"""
def dfs_helper(partition: Partition, latency_so_far_sec: float) -> float:
"""This function helps to recursively get the latency of a path of partitions"""
# Update latency by adding current partition's latency
latency_so_far_sec += partition_to_latency_mapping[
partition
].overall_latency_sec
children = partition.children
if partition.children:
max_latency_sec = 0.0
for child in partition.children:
# Calculate latency between
comm_latency_sec = get_comm_latency_between(
partition, child, transfer_rate_bytes_per_sec
)
new_latency_sec = dfs_helper(
child, latency_so_far_sec + comm_latency_sec
)
if new_latency_sec > max_latency_sec:
max_latency_sec = new_latency_sec
return max_latency_sec
return latency_so_far_sec
def get_top_partitions(partitions: List[Partition]) -> List[Partition]:
"""This function is to return all the partitions without parents
as the starting points of all the paths
"""
top_partitions = []
for partition in partitions:
# If a partition has no parents, then it is a top partition
if len(partition.parents) == 0:
top_partitions.append(partition)
return top_partitions
top_partitions = get_top_partitions(partitions)
critical_path_latency_sec = 0.0
for partition in top_partitions:
latency_sec = dfs_helper(partition, 0.0)
if latency_sec > critical_path_latency_sec:
critical_path_latency_sec = latency_sec
return critical_path_latency_sec
| pytorch-master | torch/fx/experimental/partitioner_utils.py |
import ast
import inspect
import textwrap
import copy
import functools
from types import FunctionType
from typing import cast, Union, Callable, Dict, Optional, Any
from torch.fx._symbolic_trace import Tracer
from torch.fx.graph import Graph
from torch._sources import normalize_source_lines
import torch
class AST_Rewriter(ast.NodeTransformer):
"""
Take a FunctionType object representing a `forward` method, then
perform an AST rewrite to swap out nodes that are not symbolically
traceable with a callsite to the FX alternative.
To support swapping out an AST node, define a new `visit` method on
that node. For more details, see:
https://docs.python.org/3/library/ast.html#ast.NodeTransformer
"""
def rewrite(self, fn: FunctionType):
# Normalize the source lines
sourcelines, _ = inspect.getsourcelines(fn)
sourcelines = normalize_source_lines(sourcelines)
source = ''.join(sourcelines)
normalized_str = textwrap.dedent(source)
# Rewrite the original AST
source_ast = ast.parse(normalized_str)
dest_ast = ast.fix_missing_locations(self.visit(source_ast))
# Pull out the compiled fucntion from the newly-created Module
code = compile(dest_ast, "", "exec")
globals_dict = copy.copy(fn.__globals__)
keys_before = set(globals_dict.keys())
exec(code, globals_dict)
new_keys = list(set(globals_dict.keys()) - keys_before)
assert len(new_keys) == 1
fn_compiled = globals_dict[new_keys[0]]
# return the compiled function with the original globals
def change_func_globals(f, globals):
"""Based on https://stackoverflow.com/a/13503277/2988730 (@unutbu)"""
# __globals__ is a private member of the function class
# so we have to copy the function, f, all of its member, except f.__globals__
g = FunctionType(
f.__code__,
globals,
name=f.__name__,
argdefs=f.__defaults__,
closure=f.__closure__,
)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = copy.copy(f.__kwdefaults__)
return g
# Return the correct FunctionType object
return change_func_globals(fn_compiled, globals=fn.__globals__)
def visit_Assert(self, node):
"""
Swap out the Assert node (Python's `assert`) with a callsite to the
symbolically-traceable torch._assert function
"""
# Create the Call node
n = ast.parse('torch._assert()', mode='eval')
assert isinstance(n, ast.Expression)
call_node = n.body
assert isinstance(call_node, ast.Call)
msg = node.msg if node.msg else ast.Constant(value="", kind=None)
call_node.args = [node.test, msg]
# Ensure that the new node conforms to the Python AST grammar
expr_wrapper = ast.Expr(value=call_node)
# Return the new Call node to signify that we want to use it as
# a replacement for the original _assert node
return ast.copy_location(expr_wrapper, node)
def visit_AnnAssign(self, node):
"""
Swap out Python's AnnAssign with an Assign node where the annotation function is called.
Example:
Original:
y: Tensor_Type(1,2,3, Dyn) = f2(x)
Output:
y = annotate(f2(x),Tensor_Type((1,2,3,Dyn)))
"""
return ast.Assign(targets=[node.target], value=ast.Call(
func=ast.Name(id='annotate', ctx=ast.Load()),
args=[node.value, node.annotation], keywords=[]))
class RewritingTracer(Tracer):
def trace(self, root: Union[torch.nn.Module, Callable], concrete_args: Optional[Dict[str, Any]] = None) -> Graph:
return super().trace(_rewrite(root), concrete_args)
def _rewrite(fn: Union[torch.nn.Module, Callable]) -> Union[torch.nn.Module, Callable]:
if isinstance(fn, torch.nn.Module):
# Rewrite this module's `forward` as well as the `forward`s of
# all of this module's recursive descendents. Return the new,
# rewritten module hierarchy.
def rewrite_module(m : torch.nn.Module):
class RewrittenModule(torch.nn.Module):
def __init__(self, orig):
super().__init__()
for k, v in orig.__dict__.items():
if isinstance(v, torch.nn.Module):
self.__dict__[k] = copy.copy(rewrite_module(v))
else:
self.__dict__[k] = copy.copy(v)
RewrittenModule.forward = AST_Rewriter().rewrite(cast(FunctionType, m.forward))
return RewrittenModule(m)
return rewrite_module(fn)
else:
# Rewrite this single free function
return AST_Rewriter().rewrite(cast(FunctionType, fn))
| pytorch-master | torch/fx/experimental/rewriter.py |
import re
from typing import Callable, Dict, Optional, Set, Union
import torch.fx
from torch.fx.node import map_arg
from torch.fx.passes.split_module import split_module
class FoldedGraphModule(torch.fx.GraphModule):
"""
FoldedGraphModule is a GraphModule which also contains another
`const_subgraph_module` representing a subgraph which has all const attr
inputs and which can be run once before running the main standard
`graph`. The `const_output_names` are the ordered list names of attrs which
represent what each respective output from the const_subgraph should be set
on which attrs.
"""
def __init__(
self,
root: torch.nn.Module,
graph: torch.fx.Graph,
const_subgraph: Optional[torch.fx.Graph] = None,
fx_const_folded_attrs_name: str = None,
device_for_folded_attrs: str = "cuda",
):
# In init, we set graph's owning module to root which will make graph's
# owning module be None because graph already have a owning module. We
# need owning module to run DCE. To work around we set the number of
# graph's owners to 0.
graph._owners = 0
super().__init__(root, graph)
self.const_subgraph_module = (
None
if const_subgraph is None
else torch.fx.GraphModule(root, const_subgraph)
)
self.has_folding_been_run = False
self.fx_const_folded_attrs_name = fx_const_folded_attrs_name
self.device_for_folded_attrs = device_for_folded_attrs
def __call__(self, *args, **kwargs):
if not self.has_folding_been_run:
self.run_folding()
return super().__call__(*args)
def run_folding(self):
# If there's no const subgraph module or attr output names to use, return
# early as there is no const folding to perform.
if (
self.const_subgraph_module is None
or self.fx_const_folded_attrs_name is None
):
return
assert not self.has_folding_been_run
self.has_folding_been_run = True
# Actually run const folding subgraph. Note that single attr const fold
# subgraphs output a single Tensor while multiple outputs are returned as
# Tuple[Tensor,].
folded_attrs = self.const_subgraph_module()
def _create_param(i):
return torch.nn.Parameter(
i
if not isinstance(i, int)
else torch.Tensor([i]).to(device=self.device_for_folded_attrs),
requires_grad=i.requires_grad if isinstance(i, torch.Tensor) else False,
)
params = (
torch.nn.ParameterList([_create_param(i) for i in folded_attrs])
if isinstance(folded_attrs, tuple)
else _create_param(folded_attrs)
)
setattr(self, self.fx_const_folded_attrs_name, params)
def _inline_module(gm: torch.fx.GraphModule, inline_mod_name: str):
"""
Given `gm` and some graph module which is called with target name `inline_mod_name`,
this helper will inline all of the nodes from that called graph module into `gm`.
"""
# Fetch the inner graph module that we want to inline inside `gm`.
inline_mod = dict(gm.named_modules())[inline_mod_name]
assert isinstance(inline_mod, torch.fx.GraphModule)
call_mod_node_to_replace = None
for node in gm.graph.nodes:
if node.op == "call_module" and node.target == inline_mod_name:
call_mod_node_to_replace = node
break
assert call_mod_node_to_replace is not None
# Now actually do the swap. Note that we have to keep track of new nodes that are
# copied into `gm` -- we do this via replacement_mapping.
call_mod_args = call_mod_node_to_replace.args
replacement_mapping: Dict[torch.fx.Node, torch.fx.Node] = {}
ph_count = 0
def replacement_fn(node):
new_node = replacement_mapping[node]
new_node.meta = node.meta.copy()
return new_node
for inline_node in inline_mod.graph.nodes:
if inline_node.op == "placeholder":
replacement_mapping[inline_node] = call_mod_args[ph_count]
ph_count += 1
continue
if inline_node.op == "output":
outputs = inline_node.args[0]
output_replacements = map_arg(outputs, replacement_fn)
call_mod_node_to_replace.replace_all_uses_with(output_replacements)
continue
with gm.graph.inserting_before(call_mod_node_to_replace):
new_node = gm.graph.node_copy(inline_node, replacement_fn)
replacement_mapping[inline_node] = new_node
gm.graph.eliminate_dead_code()
def get_unique_attr_name_in_module(mod_traced: torch.fx.GraphModule, name: str) -> str:
"""
Make sure the name is unique (in a module) and can represents an attr.
"""
# Delete all characters that are illegal in a Python identifier.
name = re.sub("[^0-9a-zA-Z_]+", "_", name)
if name[0].isdigit():
name = f"_{name}"
# Now make sure it is in fact unique to the module by incrementing suffix value.
while hasattr(mod_traced, name):
match = re.match(r"(.*)_(\d+)$", name)
if match is None:
name = name + "_1"
else:
base, num = match.group(1, 2)
name = f"{base}_{int(num) + 1}"
return name
def split_const_subgraphs(
module: Union[torch.nn.Module, torch.fx.GraphModule],
skip_folding_node_fn: Optional[Callable[[torch.fx.Node], bool]] = None,
device_for_folded_attrs: str = "cpu",
) -> FoldedGraphModule:
"""
Looks through `module` for any nodes that have all constant attribute inputs
and separates them out into their own constant subgraph, and returns a
FoldedGraphModule which runs that constant subgraph on the first run to set
attributes on the module prior to running the non-constant portion of the
graph.
"""
if not isinstance(module, torch.fx.GraphModule):
mod_traced = torch.fx.symbolic_trace(module)
else:
mod_traced = module
# Build up a list of const_nodes, defined as nodes that are themselves
# get_attrs, or have all get_attr or other constant node inputs.
const_nodes: Set[torch.fx.Node] = set()
found_const_folding = False
for node in mod_traced.graph.nodes:
# Skip over placeholders/outputs because they can't be const folded and
# we don't want to add tags to them.
if node.op in {"placeholder", "output"}:
continue
# If the node itself is constant, or all of its inputs are constant,
# then tag it as constant.
if node.op != "get_attr" and not set(node.all_input_nodes).issubset(
const_nodes
):
continue
# If provided skip folding function says to skip, then skip.
if skip_folding_node_fn and skip_folding_node_fn(node):
continue
# Must be a constant foldable node at this point.
const_nodes.add(node)
if node.op != "get_attr":
found_const_folding = True
# If we did not find any const folding then return early without a const fold subgraph.
if not found_const_folding:
return FoldedGraphModule(mod_traced, mod_traced.graph)
# Partition the module into two: submod_0 for constant folding subgraph, and
# submod_1 for the rest.
def mod_partition(node: torch.fx.Node):
return 0 if node in const_nodes else 1
split = split_module(mod_traced, module, mod_partition)
const_gm, non_const_gm = split.submod_0, split.submod_1
const_mod_name, non_const_mod_name = "submod_0", "submod_1"
# The module that a call_module node refers to gets copied to submodules during split.
# The path to the module also gets inlined, i.e. mod.a.b -> mod_a_b. Here we need to
# attach inlined modules to `split` as it's the owning module now.
for node in non_const_gm.graph.nodes:
if node.op == "call_module":
setattr(split, node.target, getattr(non_const_gm, node.target))
for node in const_gm.graph.nodes:
if node.op == "call_module":
setattr(split, node.target, getattr(const_gm, node.target))
# split_module currently does not use get_attrs for attrs. Instead it passes
# them in as args from the parent module, which used get_attrs. Here we set
# them as get_attrs inside const_gm, allowing for running folding without
# somehow a priori knowing the attrs that should be passed as args. We can
# unconditionally do this for all placeholders because we know all
# placeholders to const_gm must be constants accessible via get_attr.
call_const_gm_args = None
for node in split.graph.nodes:
if node.op == "call_module":
if node.target == const_mod_name:
call_const_gm_args = node.args
break
assert call_const_gm_args is not None
# Here we do the actual replacement of placeholders to get_attrs. Note that here we
# set the const_gm.graph into a new root_const_gm with split as the root module,
# because we are fetching attributes directly from the root module, instead of
# fetching them from const_gm. Example: The const_gm must have some format like:
# graph():
# %inp : [#users=1] = placeholder[target=const_inp]
# %add : [#users=1] = call_function[target=operator.add](args = (%inp, %inp), kwargs = {})
# return add
# We replace that with the following, which does not have any placeholders:
# graph():
# %inp_1 : [#users=1] = get_attr[target=const_inp]
# %add : [#users=1] = call_function[target=operator.add](args = (%inp_1, %inp_1), kwargs = {})
# return add
root_const_gm = torch.fx.GraphModule(split, const_gm.graph)
for node in root_const_gm.graph.nodes:
if node.op == "output":
multiple_outputs = isinstance(node.args[0], tuple)
continue
if node.op != "placeholder":
continue
in_node = next(n for n in call_const_gm_args if n.name == node.target)
assert in_node.op == "get_attr"
with root_const_gm.graph.inserting_before(node):
new_node = root_const_gm.graph.get_attr(in_node.target)
new_node.meta = node.meta.copy()
node.replace_all_uses_with(new_node)
root_const_gm.graph.erase_node(node)
assert "multiple_outputs" in locals()
# Now find the call to const_gm inside split, and replace it with a getattr to the
# folded tensor(s) that result from constant folding. Note that we don't need to
# worry about whether this is one or more tensors because the original graph
# correctly uses getitem to extract individual tensors if there are multiple folded.
fx_const_folded_attrs_name = get_unique_attr_name_in_module(
split, "_FX_CONST_FOLDED_ATTRS"
)
setattr(
split,
fx_const_folded_attrs_name,
torch.nn.ParameterList() if multiple_outputs else torch.nn.Parameter(),
)
for node in split.graph.nodes:
if node.op == "call_module" and node.target == const_mod_name:
with node.graph.inserting_before(node):
folded_attrs = node.graph.get_attr(fx_const_folded_attrs_name)
folded_attrs.meta = node.meta.copy()
node.replace_all_uses_with(folded_attrs)
break
split.graph.eliminate_dead_code()
# Finally, inline the non-constant submod into the split submod. This is so that the
# original caller who may have passed in a graph module will get back out a graph
# module whose graph is traced to the same granularity.
_inline_module(split, non_const_mod_name)
return FoldedGraphModule(
split,
split.graph,
root_const_gm.graph,
fx_const_folded_attrs_name,
device_for_folded_attrs,
)
| pytorch-master | torch/fx/experimental/const_fold.py |
import operator
from typing import Dict, List, Set, NamedTuple, Tuple
import torch
from torch.fx.passes.graph_manipulation import get_size_of_all_nodes
from torch.fx.experimental.partitioner_utils import (
Partition,
Device,
PartitionerConfig,
get_partition_to_latency_mapping,
get_latency_of_partitioned_graph,
NodeLatency,
get_extra_size_of,
PartitionMode,
)
from torch.fx.graph_module import GraphModule
from torch.fx.node import Node, map_arg
from torch.fx.passes.split_module import split_module
class DAGNode:
"""DAGNode class maintains useful information for a partition (submodule),
and its input submodules and output submodules.
"""
def __init__(
self,
submodule_node: Node,
input_nodes: List[Node],
output_nodes: List[Node],
logical_device_ids: List[int],
size_bytes: int,
) -> None:
self.submodule_node: Node = submodule_node
self.input_nodes: List[Node] = input_nodes
self.output_nodes: List[Node] = output_nodes
self.logical_device_ids: List[int] = logical_device_ids
self.size_bytes = size_bytes
def __str__(self) -> str:
return str(self.submodule_node)
class DAG:
"""DAG class contains all the DAG nodes"""
def __init__(self) -> None:
self.nodes: List[DAGNode] = []
def create_node(
self,
submodule_node: Node,
input_nodes: List[Node],
output_nodes: List[Node],
logical_devices: List[int],
size_bytes: int,
) -> None:
node = DAGNode(
submodule_node, input_nodes, output_nodes, logical_devices, size_bytes
)
self.nodes.append(node)
class PartitionResult(NamedTuple):
"""NameTuple used for returning DAG and a new fx module"""
dag: DAG
module_with_submodules: GraphModule
"""Followings are some helper functions for partition manipulation"""
def reset_partition_device(partitions):
for partition in partitions:
partition.logical_device_ids = []
def combine_two_partitions(
partition_0: Partition, partition_1: Partition, partitions: List[Partition]
) -> None:
"""Given a list of partitions and its two partitions,
combine these two partitions into a new one appending to the partitions
and remove the previous two partitions from the list of partitions
"""
partition = Partition(len(partitions))
partition.nodes = partition_0.nodes.union(partition_1.nodes)
partition.recalculate_mem_size()
partitions.append(partition)
partitions.remove(partition_0)
partitions.remove(partition_1)
reorganize_partitions(partitions)
return
def set_parents_and_children(partitions: List[Partition]) -> None:
"""Given a list of partitions, mark parents and children for each partition"""
# Go through all nodes in a partition.
# If a node's user is in other partition,
# then the other partition is this partition's children.
# This partition is the other partition's parent
for partition in partitions:
partition.children = set()
partition.parents = set()
for partition in partitions:
for node in partition.nodes:
# For each node in the current partition, find its users
users = node.users
for n in users:
# Find which the partition the user node belongs to.
# Note that if the node itself is also belongs to that partition,
# that partition is not the child of the current partition
for p in partitions:
if p != partition and n in p.nodes and node not in p.nodes:
partition.children.add(p)
p.parents.add(partition)
return
def reorganize_partitions(partitions: List[Partition]) -> None:
"""Given a list of partitions, reorganzie partiton id,
its parents and its children for each partition
"""
# Rearrange partition ids
for i, partition in enumerate(partitions):
partition.partition_id = i
set_parents_and_children(partitions)
return
def get_bfs_level_partition(partitions: List[Partition]) -> None:
"""Given a list of partitions,
mark the bfs level for each partition
"""
current_level: Set[Partition] = set()
visited: Set[Partition] = set()
for partition in partitions:
# If a partition has no parent, it should be in root level
if len(partition.parents) == 0:
current_level.add(partition)
next_level: Set[Partition] = set()
level = 0
# bfs
while current_level:
partition = current_level.pop()
partition.bfs_level = level
visited.add(partition)
children = partition.children
for child in children:
if child not in next_level:
next_level.add(child)
if not current_level:
current_level = next_level.copy()
next_level = set()
level += 1
return
def get_node_to_partition_mapping(partitions: List[Partition]) -> Dict[Node, int]:
"""Given a list of partitions,return node to partition mapping"""
node_to_partition: Dict[Node, int] = {}
for partition in partitions:
for node in partition.nodes:
node_to_partition[node] = partition.partition_id
return node_to_partition
def get_logical_id_to_device(devices: List[Device]) -> Dict[int, Device]:
"""Get a mapping from device logical ID to Device object."""
logical_id_to_device: Dict[int, Device] = {}
for d in devices:
logical_id_to_device[d.logical_id] = d
return logical_id_to_device
def get_device_partition_stats(
partitions: List[Partition], devices: List[Device]
) -> Tuple[Dict[Device, List[Partition]], Dict[Device, int], List[Partition]]:
"""Given a list of partitions and a list of devices, returns:
1. A mapping from device to partitions on it;
2. A mapping from device to its remaining memory size;
3. A list of partitions that do not have a device.
"""
# logical id to device
logical_id_to_device = get_logical_id_to_device(devices)
# Track partitions on device
device_to_partitions: Dict[Device, List[Partition]] = {}
# Track device's left mem size
device_to_left_mem_bytes: Dict[Device, int] = {}
for d in devices:
device_to_partitions[d] = []
device_to_left_mem_bytes[d] = d.available_mem_bytes
# Deal with the partitions that already have a device
# and also collect all partitions without a device (no_device_partitions)
no_device_partitions = []
for partition in partitions:
if partition.logical_device_ids != []:
for logical_id in partition.logical_device_ids:
device = logical_id_to_device[logical_id]
device_to_partitions[device].append(partition)
device_to_left_mem_bytes[device] -= partition.used_mem_bytes
else:
no_device_partitions.append(partition)
return (
device_to_partitions,
device_to_left_mem_bytes,
no_device_partitions,
)
def get_device_to_partitions_mapping(
partitions: List[Partition], devices: List[Device]
):
"""Given a list of partitions and a list of devices,
map each partition into a device.
"""
def calculate_extra_mem_bytes_needed_for(
partition: Partition, partitions: List[Partition]
):
all_nodes: Set[Node] = set()
for p in partitions:
all_nodes = all_nodes.union(p.nodes)
if len(all_nodes) == 0:
return partition.used_mem_bytes
all_nodes = all_nodes.union(partition.nodes)
extra_size_needed = 0
for node in partition.nodes:
extra_size_needed += get_extra_size_of(node, all_nodes)
return extra_size_needed
def find_device_for(partition: Partition):
"""Given a partition, find a logical device for the partition
The algorithm is to put the partition on the device
that has just enough mem left for that partition.
device_to_left_mem_bytes is a dictionary between device and its left mem size
sorted by its left mem size
"""
for d in device_to_left_mem_bytes:
extra_size_needed = calculate_extra_mem_bytes_needed_for(
partition, device_to_partitions[d]
)
if extra_size_needed < device_to_left_mem_bytes[d]:
device_to_partitions[d].append(partition)
partition.logical_device_ids.append(d.logical_id)
device_to_left_mem_bytes[d] -= extra_size_needed
return True
return False
(
device_to_partitions,
device_to_left_mem_bytes,
no_device_partitions,
) = get_device_partition_stats(partitions, devices)
# Find devices for all the partitions without a device
found_device = True
for partition in no_device_partitions:
device_to_left_mem_bytes = {
d: left_mem_bytes
for d, left_mem_bytes in sorted(
device_to_left_mem_bytes.items(), key=lambda item: item[1]
)
}
found_device = find_device_for(partition)
if not found_device:
break
return found_device
def check_dependency(partition):
"""Given a partition,check if there is a circular dependency on
this partition using bfs
"""
visited: Set[Partition] = set([partition])
queue: List[Partition] = [partition]
while queue:
p = queue.pop(0)
for child in p.children:
if child == partition:
return True
else:
if child not in visited:
visited.add(child)
queue.append(child)
return False
class Partitioner:
"""A fx module may not fit into one device.
Partitioner class helps partition one fx module into submodules (partitions),
so that the submodules can be executed crossing different accelerators.
The main function of this class is self.partition_graph.
It partitions the fx module based on the scheme specified in partition_config
A DAG structure is returned
along with a new fx module with submodule nodes.
"""
def __init__(self) -> None:
self.partitions: List[Partition] = []
self.node_to_partition: Dict[Node, int] = {}
self.devices: List[Device] = []
def partition_graph(
self,
fx_module: GraphModule,
torch_module: torch.nn.Module,
partitioner_config: PartitionerConfig,
) -> PartitionResult:
"""Given the fx module, torch module and partitioner_config,
find the partitions, do the partitions,
and then return a DAG and a new fx module with submodule nodes (partitions)
"""
self.graph_module = fx_module
self.torch_module = torch_module
self.devices = partitioner_config.devices
if len(self.devices) == 0:
raise RuntimeError("No devices")
# Tag the size in bytes to all nodes in the graph_module.
get_size_of_all_nodes(self.graph_module)
# Check if there are op nodes in the fx module
nodes = self.graph_module.graph.nodes
if all(node.op in {"placeholder", "get_attr", "output"} for node in nodes):
raise RuntimeError("No Partition since no operations in the module")
# Calculate total size of the fx module
total_size_of_graph = 0
for node in nodes:
if node.op == "output":
break
total_size_of_graph += node.size_bytes.total_size
# Find the device with the max mem size
device_with_max_mem = max(self.devices, key=lambda d: d.available_mem_bytes)
# AOT based partition
if partitioner_config.mode == PartitionMode.aot_based:
self.aot_based_partition(
partitioner_config.node_to_partition_mapping,
partitioner_config.partition_to_logical_device_mapping,
)
# Single partition if the whole module can be fit into one device
elif total_size_of_graph <= device_with_max_mem.available_mem_bytes:
self.find_single_partition(
total_size_of_graph, logical_device_id=device_with_max_mem.logical_id
)
elif total_size_of_graph > sum([d.available_mem_bytes for d in self.devices]):
raise RuntimeError("Devices have no enough memory for the module")
else:
# Sparse nn based partition
if partitioner_config.mode == PartitionMode.sparse_nn:
available_mem_bytes = self.devices[0].available_mem_bytes
if not all(
device.available_mem_bytes == available_mem_bytes
for device in self.devices
):
raise RuntimeError("All devices must have same memory size!")
# sparse_nn_partition only support same memory size
# TODO: add different size support for sparse_nn_partition
self.sparse_nn_partition(available_mem_bytes)
# Cost aware partition
elif partitioner_config.mode == PartitionMode.cost_aware:
self.cost_aware_partition(
partitioner_config.transfer_rate_bytes_per_sec,
partitioner_config.node_to_latency_mapping,
)
# KL based partition
elif partitioner_config.mode == PartitionMode.kl_based:
self.kl_based_partition(
partitioner_config.transfer_rate_bytes_per_sec,
partitioner_config.node_to_latency_mapping,
)
else:
self.size_based_partition()
# Saturate host if possible.
if partitioner_config.saturate_host:
self.saturate_host()
# Partition the graph module based on the partition assignment.
module_with_submodules = self.do_partition()
# The DAG contains DAGNodes with info of each partition's input nodes, output nodes
# and how partitions are connected.
dag = self.dump_dag(module_with_submodules)
ret = PartitionResult(dag, module_with_submodules)
return ret
def find_single_partition(
self, total_size_of_graph, logical_device_id: int = 0
) -> None:
"""Fit the whole fx module into one device"""
partition_0 = self.create_partition()
for node in self.graph_module.graph.nodes:
if node.op == "output":
# Skip the output node, but there can
# be nodes after the output in certain cases.
continue
partition_0.nodes.add(node)
partition_0.used_mem_bytes = total_size_of_graph
partition_0.logical_device_ids = [logical_device_id]
# Get the node to partition mapping
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
return
def size_based_partition(self) -> None:
"""This method is to partition the fx module based on memory size.
It uses greedy approach. The result may not be the best.
The basic idea is:
Step 1:
Find a device which has enough memory to fit the current node, create a empty partition
with the size of that device.
Then keep adding the following nodes into the partition until the partition is full.
Step 2:
Repeat Step 1 until no device left
Step 3:
If some nodes are left, create a partition for each left node (single node partition).
and then try to map those partitions into logical devices with enough mem left.
"""
def find_device_based_on_size(node) -> Device:
"""Given a node, this function is to find a logical device
that could fit the node.
"""
mem_size_needed = get_extra_size_of(node, set())
device = Device("", -1, -1)
for d in self.devices:
if (
d not in occupied_devices
and d.available_mem_bytes >= mem_size_needed
):
device = d
break
if device.available_mem_bytes < 0:
raise RuntimeError(str(node) + "is too large to fit any device")
occupied_devices.append(device)
return device
# Track partition and its left mem size
partition_to_left_mem_bytes: Dict[Partition, int] = {}
# Track all the devices that have been used
occupied_devices: List[Device] = []
partition = self.create_partition()
for node in self.graph_module.graph.nodes:
if node.op in {"call_module", "call_method", "call_function"}:
# Check if there are devices left
if len(self.partitions) <= len(self.devices):
total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
# Check if the current partition is the very first partition
if partition.used_mem_bytes == 0:
# Find a device to fit the first node, return available mem size
device = find_device_based_on_size(node)
occupied_devices.append(device)
# Update partition and its left mem size
partition_to_left_mem_bytes[
partition
] = device.available_mem_bytes
# Update available mem for the current partition
partition.logical_device_ids.append(device.logical_id)
else:
# The current partition is not the first partition
# Check if the current node can fit into current partition
if (
partition_to_left_mem_bytes[partition]
< total_size_of_input_nodes
):
# Check if no device is left
if len(self.partitions) == len(self.devices):
# No device is left
# Put the previous partitions into a list (non_single_node_partitions)
non_single_node_partitions = self.partitions[:]
# Create the first single node partition for the current node
self.create_single_node_partition(node)
continue
# Some devices are still left
# Create a new partition with a mem size that is enough for the current node
device = find_device_based_on_size(node)
partition = self.create_partition()
total_size_of_input_nodes = get_extra_size_of(
node, partition.nodes
)
partition_to_left_mem_bytes[
partition
] = device.available_mem_bytes
partition.logical_device_ids.append(device.logical_id)
partition.add_node(node)
partition_to_left_mem_bytes[partition] -= total_size_of_input_nodes
# Create single node partitions if no device is left
else:
self.create_single_node_partition(node)
reorganize_partitions(self.partitions)
# Get the node to partition mapping
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
# Mapping all partitions into device
found_partition_to_device_mapping = get_device_to_partitions_mapping(
self.partitions, self.devices
)
if not found_partition_to_device_mapping:
raise RuntimeError("Cannot Get a Valid Partition to Logical Device Mapping")
return
def saturate_host(self) -> None:
"""Saturate host by assigning replicates to unused devices with enough memory.
It uses a greedy approach to find a next available set of devices to place all split
partitions: For each used device, it searches for an idle device with minimal memory
size that can hold all the partition located on that device; If the search is successful
for all used devices, it then assigns the new devices' logical ID to the corresponding
partition.
"""
(
device_to_partitions,
device_to_left_mem_bytes,
no_device_partitions,
) = get_device_partition_stats(self.partitions, self.devices)
assert (
len(no_device_partitions) == 0
), f"Expect no_device_partitions has 0 device, but get {len(no_device_partitions)}"
# Devices that hold partitions
used_devices = [d for d in self.devices if len(device_to_partitions[d]) > 0]
# Track replicates of the assigned devices
replicated_device_to_used_device: Dict[Device, Device] = {}
while len(used_devices) * 2 + len(replicated_device_to_used_device) <= len(
self.devices
):
# Success flag for this round
success = True
# Devices that have not been assigned
idle_devices = [
d
for d in self.devices
if d not in used_devices and d not in replicated_device_to_used_device
]
# Temporary mapping from replicated device to original device
temp_replicate_mapping = {}
# Find a new device to replicate all partitions on an used device
for used_device in used_devices:
# Idle devices that have enough memory
available_devices = [
d
for d in idle_devices
if d.available_mem_bytes
>= used_device.available_mem_bytes
- device_to_left_mem_bytes[used_device]
]
if len(available_devices) == 0:
success = False
break
new_device = min(available_devices, key=lambda d: d.available_mem_bytes)
idle_devices.remove(new_device)
temp_replicate_mapping[new_device] = used_device
if not success:
break
replicated_device_to_used_device.update(temp_replicate_mapping)
# Update logical device IDs assigned to the partitions
for (
replicate_device,
original_device,
) in replicated_device_to_used_device.items():
logical_id = replicate_device.logical_id
for partition in device_to_partitions[original_device]:
partition.logical_device_ids.append(logical_id)
for p in self.partitions:
print(p.logical_device_ids)
def do_partition(self) -> GraphModule:
"""Return a new fx module with submodule nodes (partitions)."""
module_with_submodules = split_module(
self.graph_module,
self.torch_module,
lambda node: self.node_to_partition[node],
)
return module_with_submodules
def dump_dag(self, module_with_submodules: GraphModule) -> DAG:
"""Return the dag structure and the new fx module with submodules."""
dag = DAG()
for node in module_with_submodules.graph.nodes:
if node.op == "output":
break
if node.op in {"placeholder", "get_attr"}:
continue
if node.target == operator.__getitem__:
continue
input_nodes: Dict[Node, None] = {}
map_arg(node.args, lambda n: input_nodes.setdefault(n))
map_arg(node.kwargs, lambda n: input_nodes.setdefault(n))
# When a node has two or more output nodes,
# it outputs its result to 'getitem' nodes.
# Those 'getitem' nodes are the output node for this node.
# Otherwise, the output node is this node itself.
if len(node.users) > 1:
output_nodes = list(node.users)
else:
output_nodes = [node]
partition_id = int(node.name.rsplit("_", 1)[-1])
device_ids = self.partitions[partition_id].logical_device_ids
size_bytes = self.partitions[partition_id].used_mem_bytes
dag.create_node(
node, list(input_nodes), output_nodes, device_ids, size_bytes
)
return dag
def create_partition(self) -> Partition:
"""Create a partition and append it to self.partitions."""
partition_id = len(self.partitions)
partition = Partition(partition_id)
self.partitions.append(partition)
return partition
def create_single_node_partition(self, node):
"""Create a partition for a single node"""
partition = self.create_partition()
partition.add_node(node)
return
def sparse_nn_partition(self, available_mem_bytes: int) -> None:
"""This method partition a sparse nn module.
It is size based partition but different from size_based_partition,
it only works when all the devices have same memory size (available_mem_bytes).
In the future, devices with different mem sizes will be supported like size_based_partition.
It first traverse all the nodes and do the partitions based on the same memory size.
If the current partition has no enough memory left for a new op node
(call_module, call_method, call_function), a new partition is created.
When crossing the boundary between non-embedding nodes and embedding nodes,
a new partition is created regardlessly.
For example, if the current node is a non-embedding node but the next node is an
embedding node, a new partition is created for the next node.
After the partition, the partitions are combined as much as possible.
The rule is that a non-embedding partition only
combines with another non-embedding one.
So as the embedding partitions.
"""
def combine_partitions_based_on_size(
partitions: List[Partition], available_mem_bytes: int
) -> None:
"""Combining small partitions together to keep as less partitions as possible.
Here is an example of the algorithm to do this:
Assume some partitions, we first sort them based on partiiton used memory size.
[(partition_4, 1), (partition_3, 1), (partition_2, 2), (partition_1, 7), (partition_0, 9)]
The available memory is 10.
step 1: self.find_partition_to_combine_based_on_size()
First, mark bfs level for each partition
Second, look the smallest partition, partition_4: 10 - 1 = 9
It means any partition has a used memory equal or less than 9 could combine this partition
We go from the largest and selection partition_0.
Check the bfs level for two partitions, if the level difference is less than 2,
it can be combined.
step 2: repeat step 1 until no partitions can be combined
"""
find_combination = True
while find_combination:
# Sort partitions based on memory size
sorted_partitions = sorted(partitions, key=lambda p: p.used_mem_bytes)
# Mark bfs level
get_bfs_level_partition(self.partitions)
find_combination, partitions = find_partition_to_combine_based_on_size(
sorted_partitions, available_mem_bytes, partitions
)
return
def calculate_mem_bytes_needed(p1, p2):
"""Given two partitions, calculate how many mem bytes
are needed if two partitions are combined
"""
nodes = p1.nodes.union(p2.nodes)
mem_bytes_needed = 0
for node in nodes:
mem_bytes_needed += get_extra_size_of(node, nodes)
return mem_bytes_needed
def find_partition_to_combine_based_on_size(
sorted_partitions: List[Partition],
available_mem_bytes: int,
partitions: List[Partition],
) -> Tuple[bool, List[Partition]]:
"""step 1 in combine_partition_based_on_size()"""
find_combination = False
smallest_partition = sorted_partitions.pop(0)
for p in sorted_partitions[::-1]:
if abs(smallest_partition.bfs_level - p.bfs_level) <= 1:
# Calculate how many bytes needed if combined
mem_bytes_needed = calculate_mem_bytes_needed(p, smallest_partition)
if mem_bytes_needed <= available_mem_bytes:
combine_two_partitions(p, smallest_partition, self.partitions)
partitions.remove(smallest_partition)
partitions.remove(p)
partitions.append(self.partitions[-1])
find_combination = True
break
return find_combination, partitions
def reset_partition_in_sparse_nn(partition, new_partition=True):
"""If crossing the boudary between non-embedding nodes and
embedding nodes, create a new partition
"""
if in_embedding_region:
embedding_partitions.append(partition)
else:
non_embedding_partitions.append(partition)
if new_partition:
partition = self.create_partition()
partition.left_mem_bytes = available_mem_bytes
return partition
return None
def is_embedding_node(node: Node) -> bool:
"""Check if a node is an embedding node"""
if node.op == "call_module":
submodule = self.graph_module
for atom in str(node.target).split("."):
if not hasattr(submodule, atom):
raise RuntimeError(
f"Module {submodule} has no attribute {atom}"
)
submodule = getattr(submodule, atom)
if "Embedding" in str(submodule):
return True
return False
# Track embedding partitons and non-embedding partitions separately
embedding_partitions: List[Partition] = []
non_embedding_partitions: List[Partition] = []
# A Flag to check the boundary
in_embedding_region: bool = False
partition = self.create_partition()
for node in self.graph_module.graph.nodes:
if node.op in {"call_module", "call_method", "call_function"}:
# Check if crossing the boundary between embedding nodes and non embedding nodes
if is_embedding_node(node) != in_embedding_region:
# Crossing the boundary
# Check if the current partition is an empty partition
if partition.used_mem_bytes != 0:
# The current partition isn't an empty partition. Create a new one.
partition = reset_partition_in_sparse_nn(partition)
in_embedding_region = not in_embedding_region
total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
if (
total_size_of_input_nodes + partition.used_mem_bytes
> available_mem_bytes
):
partition = reset_partition_in_sparse_nn(partition)
total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
if total_size_of_input_nodes > available_mem_bytes:
raise RuntimeError(
node.target + "is too large to fit into a device"
)
partition.add_node(node)
reset_partition_in_sparse_nn(partition, new_partition=False)
# Set parents and children for partitions
set_parents_and_children(self.partitions)
# Combining non-embedding partitions
combine_partitions_based_on_size(non_embedding_partitions, available_mem_bytes)
# Combining embedding partitions
combine_partitions_based_on_size(embedding_partitions, available_mem_bytes)
total_size_of_non_embedding_partitions = 0
for partition in non_embedding_partitions:
total_size_of_non_embedding_partitions += partition.used_mem_bytes
# Check if devices are enough for all partitions
if len(embedding_partitions) > len(self.devices):
msg = (
"Need "
+ str(len(embedding_partitions))
+ " devices, but only "
+ str(len(self.devices))
+ " provided"
)
raise RuntimeError(msg)
occupied_devices = []
for i, partition in enumerate(embedding_partitions):
# Check if all non-embedding partitions can fit into embedding partition devices
if (
total_size_of_non_embedding_partitions + partition.used_mem_bytes
> available_mem_bytes
):
raise RuntimeError(
"partition_"
+ str(partition.partition_id)
+ "(embedding partition) and non embedding partitions can not fit into one device"
)
else:
# Add logical device to the partition
partition.logical_device_ids = [self.devices[i].logical_id]
occupied_devices.append(self.devices[i].logical_id)
# Add logical devices to the non_embedding_partitions
for partition in non_embedding_partitions:
partition.logical_device_ids = occupied_devices
# Get the node to partition mapping
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
return
def cost_aware_partition(
self,
transfer_rate_bytes_per_sec: float,
node_to_latency_mapping: Dict[Node, NodeLatency],
) -> None:
"""This method is to partition the fx module based on the cost.
The cost is the total latency of running the whole fx module.
In partitioner_utils.py, the cost model is built.
The cost aware partition algorithm is:
#1. At every begining, each node is a partition.
Then we map all the partitions to the devices
and calculate the cost
#2. Then try to pre-combine any two of the partitions if the two
partitions can be combined.
(the bfs level is less than 2 or two partitions are connected and
can find partition to device mapping)
See if any partition pair could reduce the current cost.
Choose the pair that shows the minimum cost and then combine them
#3. Repeat #2 until the cost cannot be reduced.
"""
def try_combining_partitions(p0_index, p1_index, partitions) -> float:
"""Given two partitions and a list of partitions, combine these two partitions
and see what is the cost of the modified partition list
"""
p0 = partitions[p0_index]
p1 = partitions[p1_index]
"""If two partitions' bfs level are less than 2 or two partitions are connected to each other,
then they can be combined
"""
if (
(abs(p0.bfs_level - p1.bfs_level) <= 1)
or (p0 in p1.parents)
or p0 in (p1.children)
):
combine_two_partitions(p0, p1, partitions)
# Check if a circular dependency exists after combining
if check_dependency(partitions[-1]):
return float("inf")
# Check if the modified partition list can be mapped to devices after combination
reset_partition_device(partitions)
found_deivce = get_device_to_partitions_mapping(
partitions, self.devices
)
if not found_deivce:
return float("inf")
# Calculate the new cost
partition_to_latency_mapping = get_partition_to_latency_mapping(
partitions, node_to_latency_mapping
)
cost = get_latency_of_partitioned_graph(
partitions,
partition_to_latency_mapping,
transfer_rate_bytes_per_sec,
)
return cost
# If two partition can not be combined, the cost is inf
return float("inf")
def search_combination(
transfer_rate_bytes_per_sec, node_to_latency_mapping
) -> bool:
"""Given transfer rate between partitions and each node's latency,
find two partitions to combine so the cost of the partitions can
be reduced.
The algorithm is :
1. Go through all the partition pairs and see
if any pair of partitions can be combined.
2. Calculate the cost after the combination.
3. Select the minimum cost and combine its cooresponding partition pair.
"""
partition_to_latency_mapping = get_partition_to_latency_mapping(
self.partitions, node_to_latency_mapping
)
cost = get_latency_of_partitioned_graph(
self.partitions,
partition_to_latency_mapping,
transfer_rate_bytes_per_sec,
)
if len(self.partitions) == 1:
return False
partition_pair: List[int] = []
for i in range(len(self.partitions) - 1):
for j in range(i + 1, len(self.partitions)):
# Try to combine the partition pair
# and see the new cost after combination
new_cost = try_combining_partitions(i, j, self.partitions[:])
if new_cost <= cost:
partition_pair = [i, j]
cost = new_cost
reorganize_partitions(self.partitions)
# If a partition pair is found, combine them
if len(partition_pair) != 0:
p0 = self.partitions[partition_pair[0]]
p1 = self.partitions[partition_pair[1]]
combine_two_partitions(p0, p1, self.partitions)
get_bfs_level_partition(self.partitions)
reset_partition_device(self.partitions)
get_device_to_partitions_mapping(self.partitions, self.devices)
return len(partition_pair) != 0
for node in self.graph_module.graph.nodes:
if node.op not in {"placeholder", "get_attr", "output"}:
self.create_single_node_partition(node)
# Set up parent partitions and children partitions for each partition
set_parents_and_children(self.partitions)
# Get bfs level for each partition
get_bfs_level_partition(self.partitions)
find_combination = True
while find_combination:
# Search for a pair partition to generate the minimum new cost,
# then combine them
find_combination = search_combination(
transfer_rate_bytes_per_sec, node_to_latency_mapping
)
# Make sure all partitions are set up correctly
reorganize_partitions(self.partitions)
# Set up node to partition mapping
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
return
def kl_based_partition(
self,
transfer_rate_bytes_per_sec: float,
node_to_latency_mapping: Dict[Node, NodeLatency],
) -> None:
"""This function is a cost aware partition based
on Kernighan-Lin algorithm.
First, the graph is partitioned using size_based_partition.
Then, each node is swapped with any other node in a different
partition, and at the same time, the cost is estimated after
the swapping.
For example, we have nodes n0, n1, n2, n3 and n4.
Using size_based_partition, n0 and n1 are in Partition p0.
n2, n3 and n4 in Partition p1. The current cost is esimated.
We first tried using n0 to swap with n2 from the other partiton.
Then we see that swapping n0 and n2 shows a lower cost
than the current cost and it is the minimum among other pairs like
(n0, None)(This means moving n0 to Partition without swapping other nodes),
(n0, n3) and (n0, n4). We swap n0 and n2 and set the new cost
as the current cost.
Then We repeat this process for all the other nodes until all swapping pairs
are tried.
"""
def swap_nodes(n0, n1, p0, p1):
# Either n0 or n1 could be None
# That means we simply move the node
# to another partition
if n0 is not None:
p0.remove_node(n0)
p1.add_node(n0)
if n1 is not None:
p0.add_node(n1)
p1.remove_node(n1)
def try_swap_nodes(
n0, n1, p0, p1, node_to_latency_mapping, transfer_rate_per_sec
):
cost = float("inf")
swap_nodes(n0, n1, p0, p1)
# Reorganize partitions after swapping
reorganize_partitions(self.partitions)
# Check if there is a circular dependency after swapping
if (not check_dependency(p0)) and (not check_dependency(p1)):
reset_partition_device(self.partitions)
partition_to_latency_mapping = get_partition_to_latency_mapping(
self.partitions, node_to_latency_mapping
)
# Check if all partitions can be mapped to logical devices after swapping
found_device = get_device_to_partitions_mapping(
self.partitions, self.devices
)
if not found_device:
cost = float("inf")
else:
cost = get_latency_of_partitioned_graph(
self.partitions,
partition_to_latency_mapping,
transfer_rate_bytes_per_sec,
)
# Swap back and reset all partitions back to original
swap_nodes(n1, n0, p0, p1)
reorganize_partitions(self.partitions)
reset_partition_device(self.partitions)
get_device_to_partitions_mapping(self.partitions, self.devices)
return cost
def swap_node_to_partition(
node, p0, p1, node_to_latency_mapping, transfer_rate_per_sec
):
"""This function helps to swap one node from partition p0
with all the nodes in another partition p1
"""
p1_nodes = list(p1.nodes) + [None]
min_cost = float("inf")
node_pair: List[Node] = []
for n1 in p1_nodes:
# Ignore the node if it is not a op node
if n1 is not None and n1.op in {"placeholder", "get_attr"}:
continue
# Try swapping node in p0 with n1 in p1
cost = try_swap_nodes(
node, n1, p0, p1, node_to_latency_mapping, transfer_rate_per_sec
)
if cost < min_cost:
node_pair = [node, n1]
min_cost = cost
return cost, node_pair
# First use size_base_partition
self.size_based_partition()
partition_to_latency_mapping = get_partition_to_latency_mapping(
self.partitions, node_to_latency_mapping
)
# Calculate the cost of the partitions
cost = get_latency_of_partitioned_graph(
self.partitions, partition_to_latency_mapping, transfer_rate_bytes_per_sec
)
# Keep tracking the node pair that shows the better cost
node_pair: List[Node] = []
# Keep tracking the partition pair of node pair
partition_pair: List[Partition] = []
# Collect all the op nodes from the graph
op_nodes = []
for n in self.graph_module.graph.nodes:
if n.op not in {"placeholder", "get_attr", "output"}:
op_nodes.append(n)
for node in op_nodes:
# Find which partition the current node belongs
p0_index = self.node_to_partition[node]
p0 = self.partitions[p0_index]
# Go through all the other partitions to swap
# with other nodes from those partitions
for p1_index, _ in enumerate(self.partitions):
if p0_index != p1_index:
p1 = self.partitions[p1_index]
new_cost, new_node_pair = swap_node_to_partition(
node,
p0,
p1,
node_to_latency_mapping,
transfer_rate_bytes_per_sec,
)
# Update the cost
# Track the swapped node pair and their partitions
if new_cost < cost:
cost = new_cost
node_pair = new_node_pair
partition_pair = [p0, p1]
# Do the swapping after trying all the nodes from a partition
if len(node_pair) != 0:
swap_nodes(
node_pair[0], node_pair[1], partition_pair[0], partition_pair[1]
)
reorganize_partitions(self.partitions)
get_device_to_partitions_mapping(self.partitions, self.devices)
reorganize_partitions(self.partitions)
# Mapping the device to the partition
get_device_to_partitions_mapping(self.partitions, self.devices)
return
def aot_based_partition(
self, node_to_partition_mapping, partition_to_logical_device_mapping
):
"""This function helps to rebuild the partitions given the nodes and its
corresponding partition id
"""
partition_id_to_partition_mapping: Dict[int, Partition] = {}
self.node_to_partition = node_to_partition_mapping
for node in self.node_to_partition:
partition_id = self.node_to_partition[node]
# If the requested partition has not been created, create the partition
if partition_id not in partition_id_to_partition_mapping:
partition = Partition(partition_id)
self.partitions.append(partition)
partition_id_to_partition_mapping[partition_id] = partition
partition.logical_device_ids = partition_to_logical_device_mapping[
partition_id
]
else:
partition = partition_id_to_partition_mapping[
self.node_to_partition[node]
]
# Add the current node into the partition
partition.add_node(node)
| pytorch-master | torch/fx/experimental/accelerator_partitioner.py |
import operator
from typing import Any, Callable, Dict, Tuple, Optional
import torch
import torch.fx
import torch.fx as fx
from torch.fx import Transformer, Proxy
from torch.fx.node import Argument, Target, Node, map_aggregate
from torch.fx.operator_schemas import (
normalize_module,
normalize_function,
create_type_hint,
)
from .schema_type_annotation import AnnotateTypesWithSchema
class NormalizeArgs(Transformer):
"""
Normalize arguments to Python targets. This means that
`args/kwargs` will be matched up to the module/functional's
signature and rewritten to exclusively kwargs in positional order
if `normalize_to_only_use_kwargs` is true. Also populates default
values. Does not support positional-only parameters or varargs
parameters (*args, **kwargs).
If the nodes have 'type' metadata, it will use it to disambiguate
overloads. Otherwise, it will throw an error.
Example usage:
m = torchvision.models.resnet18()
traced = torch.fx.symbolic_trace(m)
traced = NormalizeArgs(traced).transform()
"""
def __init__(
self, module: torch.fx.GraphModule, normalize_to_only_use_kwargs: bool = True
):
super().__init__(module)
self.node_map: Dict[Proxy, Node] = {}
self.normalize_to_only_use_kwargs = normalize_to_only_use_kwargs
def run_node(self, n: Node) -> Any:
args, kwargs = self.fetch_args_kwargs_from_env(n)
def get_type(arg):
if isinstance(arg, fx.Node):
return n.meta["type"] if "type" in n.meta else None
return type(arg)
arg_types = map_aggregate(n.args, get_type)
assert isinstance(arg_types, tuple)
arg_types = tuple([create_type_hint(i) for i in arg_types])
kwarg_types = {k: get_type(v) for k, v in kwargs.items()}
if n.op == "call_function":
out = self.call_function(n.target, args, kwargs, arg_types, kwarg_types)
else:
out = super().run_node(n)
if n.op != "output":
self.node_map[out] = n
out.node.meta = n.meta
return out
def call_function(
self,
target: Target,
args: Tuple[Argument, ...],
kwargs: Dict[str, Any],
arg_types: Optional[Tuple[Any, ...]] = None,
kwarg_types: Optional[Dict[str, Any]] = None,
):
assert callable(target)
new_args_and_kwargs = normalize_function(
target,
args, # type: ignore[arg-type]
kwargs,
arg_types, # type: ignore[arg-type]
kwarg_types,
self.normalize_to_only_use_kwargs,
)
if new_args_and_kwargs:
new_args, new_kwargs = new_args_and_kwargs
return self.tracer.create_proxy(
"call_function", target, new_args, new_kwargs
)
else:
return super().call_function(target, args, kwargs)
def call_module(
self, target: Target, args: Tuple[Argument, ...], kwargs: Dict[str, Any]
):
assert isinstance(target, str)
new_args_and_kwargs = normalize_module(
self.module,
target,
args, # type: ignore[arg-type]
kwargs,
self.normalize_to_only_use_kwargs,
)
if new_args_and_kwargs:
new_args, new_kwargs = new_args_and_kwargs
return super().call_module(target, new_args, new_kwargs)
else:
return super().call_module(target, args, kwargs)
class NormalizeOperators(AnnotateTypesWithSchema):
"""
Normalize callsites that are different ways of "spelling" the same
invocation into a single, canonical call. Currently supports:
1. Normalize operators (e.g. operator.add) to the `torch` ops they
ultimately invoke (e.g. torch.add) when it is possible to statically
reason that
Example usage:
m = torchvision.models.resnet18()
traced = torch.fx.symbolic_trace(m)
traced = NormalizeOperators(traced).transform()
"""
binary_magic_method_remap: Dict[
Callable[[Any, Any], Any], Callable[[Any, Any], Any]
] = {
torch.add: operator.add,
torch.mul: operator.mul,
torch.sub: operator.sub,
torch.div: operator.truediv,
torch.floor_divide: operator.floordiv,
torch.remainder: operator.mod,
torch.eq: operator.eq,
torch.ne: operator.ne,
torch.lt: operator.lt,
torch.le: operator.le,
torch.gt: operator.gt,
torch.ge: operator.ge,
}
def call_function(
self, target: Target, args: Tuple[Argument, ...], kwargs: Dict[str, Any]
):
# Normalize operators according to the magic methods implemented on tensors here:
# https://github.com/pytorch/pytorch/blob/28c5d90b679c6b38bf4183ec99f16d933c2f1bcd/tools/autograd/templates/python_variable_methods.cpp#L1137 # noqa: B950
assert callable(target)
if target in self.binary_magic_method_remap:
if len(args) != 2:
return super().call_function(target, args, kwargs)
lhs, rhs = args
return super().call_function(
target=self.binary_magic_method_remap[target],
args=(lhs, rhs),
kwargs={},
)
return super().call_function(target, args, kwargs)
| pytorch-master | torch/fx/experimental/normalize.py |
import torch.fx as fx
from torch.fx.node import Argument, Target
from torch.nn.utils.fusion import fuse_conv_bn_eval
from typing import Type, Dict, Any, Tuple, Iterable, Optional, List, cast
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.fx.passes.shape_prop import ShapeProp
import copy
from collections import defaultdict
import torch.utils.mkldnn as th_mkldnn
import operator
import time
import logging
from enum import Enum
def _parent_name(target : str) -> Tuple[str, str]:
"""
Splits a qualname into parent path and last atom.
For example, `foo.bar.baz` -> (`foo.bar`, `baz`)
"""
*parent, name = target.rsplit('.', 1)
return parent[0] if parent else '', name
# Works for length 2 patterns with 2 modules
def matches_module_pattern(pattern: Iterable[Type], node: fx.Node, modules: Dict[str, Any]):
if len(node.args) == 0:
return False
nodes: Tuple[Any, fx.Node] = (node.args[0], node)
for expected_type, current_node in zip(pattern, nodes):
if not isinstance(current_node, fx.Node):
return False
if current_node.op != 'call_module':
return False
if not isinstance(current_node.target, str):
return False
if current_node.target not in modules:
return False
if type(modules[current_node.target]) is not expected_type:
return False
return True
def replace_node_module(node: fx.Node, modules: Dict[str, Any], new_module: torch.nn.Module):
assert(isinstance(node.target, str))
parent_name, name = _parent_name(node.target)
modules[node.target] = new_module
setattr(modules[parent_name], name, new_module)
def fuse(model: torch.nn.Module, inplace=False) -> torch.nn.Module:
"""
Fuses convolution/BN layers for inference purposes. Will deepcopy your
model by default, but can modify the model inplace as well.
"""
patterns = [(nn.Conv1d, nn.BatchNorm1d),
(nn.Conv2d, nn.BatchNorm2d),
(nn.Conv3d, nn.BatchNorm3d)]
if not inplace:
model = copy.deepcopy(model)
fx_model = fx.symbolic_trace(model)
modules = dict(fx_model.named_modules())
new_graph = copy.deepcopy(fx_model.graph)
for pattern in patterns:
for node in new_graph.nodes:
if matches_module_pattern(pattern, node, modules):
if len(node.args[0].users) > 1: # Output of conv is used by other nodes
continue
conv = modules[node.args[0].target]
bn = modules[node.target]
if not bn.track_running_stats:
continue
fused_conv = fuse_conv_bn_eval(conv, bn)
replace_node_module(node.args[0], modules, fused_conv)
node.replace_all_uses_with(node.args[0])
new_graph.erase_node(node)
return fx.GraphModule(fx_model, new_graph)
def remove_dropout(model: nn.Module) -> nn.Module:
"""
Removes all dropout layers from the module.
"""
fx_model = fx.symbolic_trace(model)
class DropoutRemover(torch.fx.Transformer):
def call_module(self, target : Target, args : Tuple[Argument, ...], kwargs : Dict[str, Any]) -> Any:
if isinstance(self.submodules[target], nn.Dropout):
assert len(args) == 1
return args[0]
else:
return super().call_module(target, args, kwargs)
return DropoutRemover(fx_model).transform()
def extract_subgraph(orig_module: nn.Module, nodes: List[fx.Node], inputs: List[fx.Node], outputs: List[fx.Node]):
"""
Given lists of nodes from an existing graph that represent a subgraph, returns a submodule that executes that subgraph.
"""
new_graph = fx.Graph()
env: Dict[fx.Node, fx.Node] = {}
for input in inputs:
new_node = new_graph.placeholder(input.name)
env[input] = new_node
for node in nodes:
new_node = new_graph.node_copy(node, lambda x: env[x])
env[node] = new_node
new_graph.output([env[output] for output in outputs])
new_graph.lint()
return fx.GraphModule(orig_module, new_graph)
mkldnn_supported = [
nn.Conv2d, nn.Linear, nn.BatchNorm2d, nn.ReLU, nn.MaxPool2d, nn.AvgPool2d, nn.AdaptiveAvgPool2d,
torch.relu, torch.transpose, torch.sigmoid,
F.relu, F.avg_pool2d, F.adaptive_avg_pool2d
]
# These are operators that may not be convertible into MKLDNN ops (e.g. the
# args are scalar values). Thus, we only include them in the subgraph if their
# arguments are already in MKLDNN.
# TODO: Determine whether this can be removed after type inference.
mkldnn_supported_unknown = [operator.add, operator.mul]
mkldnn_map = {
nn.Conv2d: th_mkldnn.MkldnnConv2d,
nn.Linear: th_mkldnn.MkldnnLinear,
nn.BatchNorm2d: lambda a, _: th_mkldnn.MkldnnBatchNorm(a)
}
def modules_to_mkldnn(nodes: List[fx.Node], modules: Dict[str, nn.Module]):
"""
For each node, if it's a module that can be preconverted into MKLDNN,
then we do so and create a mapping to allow us to convert from the MKLDNN
version of the module to the original.
"""
old_modules: Dict[nn.Module, nn.Module] = {}
for node in nodes:
if node.op == 'call_module':
assert(isinstance(node.target, str))
cur_module = modules[node.target]
if type(cur_module) in mkldnn_map:
new_module = mkldnn_map[type(cur_module)](cur_module, torch.float)
assert(isinstance(new_module, nn.Module))
old_modules[new_module] = copy.deepcopy(cur_module)
replace_node_module(node, modules, new_module)
return old_modules
def reset_modules(nodes: List[fx.Node], modules: Dict[str, nn.Module], old_modules: Dict[nn.Module, nn.Module]):
"""
Maps each module that's been changed with `modules_to_mkldnn` back to its
original.
"""
for node in nodes:
if node.op == 'call_module':
assert(isinstance(node.target, str))
cur_module = modules[node.target]
if cur_module in old_modules:
replace_node_module(node, modules, old_modules[cur_module])
class MklSubgraph:
def __init__(self, fx_graph: fx.Graph):
self.fx_graph = fx_graph
self.nodes: List[fx.Node] = []
self.start_nodes: List[fx.Node] = []
self.end_nodes: List[fx.Node] = []
def gen_mkl_autotuner(example_inputs, iters=10, warmup=1):
"""
This generates a heuristic that can be passed into `optimize_for_inference` that
determines whether a subgraph should be run in MKL by running it with the example_inputs.
Example usage:
heuristic = gen_mkl_autotuner(example_inputs, iters=10)
fast_model = optimization.optimize_for_inference(model, heuristic)
"""
fx_model = None
old_modules = None
def use_mkl_heuristic(graph: MklSubgraph) -> bool:
nonlocal fx_model, old_modules
input_nodes = graph.start_nodes
if fx_model is None:
fx_model = graph.fx_graph.owning_module
old_modules = graph.fx_graph.old_modules # type: ignore[attr-defined]
ShapeProp(fx_model).propagate(example_inputs)
sample_inputs = [torch.randn(node.shape) for node in input_nodes] # type: ignore[attr-defined]
output_args = cast(List[fx.Node], [node.args[0] for node in graph.end_nodes])
submodule = extract_subgraph(fx_model, graph.nodes, input_nodes, output_args)
def benchmark(f):
for _ in range(warmup):
f()
begin = time.time()
for _ in range(iters):
out = f()
return time.time() - begin
mkl_time = benchmark(lambda: [i.to_dense() for i in submodule(*[i.to_mkldnn() for i in sample_inputs])])
reset_modules(submodule.graph.nodes, dict(submodule.named_modules()), old_modules)
no_mkl_time = benchmark(lambda: submodule(*sample_inputs))
return mkl_time < no_mkl_time
return use_mkl_heuristic
def use_mkl_length(graph: MklSubgraph) -> bool:
"""
This is a heuristic that can be passed into `optimize_for_inference` that
determines whether a subgraph should be run in MKL by checking if there
are more than 2 nodes in it
"""
return len(graph.nodes) > 2
class UnionFind:
def __init__(self, n):
self.parent: List[Optional[int]] = [None] * n
self.size: List[int] = [0] * n
def make_set(self, v: int):
self.parent[v] = v
self.size[v] = 1
def find(self, v: int) -> int:
par = self.parent[v]
if v == par:
return v
assert(par is not None)
self.parent[v] = self.find(par)
return cast(int, self.parent[v])
def join(self, a: int, b: int):
a, b = self.find(a), self.find(b)
if a == b:
return a
if self.size[a] < self.size[b]:
a, b = b, a
self.parent[b] = a
self.size[a] += self.size[b]
def optimize_for_inference(
model: torch.nn.Module,
pass_config: Optional[Dict[str, Any]] = None,
tracer: Type[fx.Tracer] = fx.Tracer
) -> torch.nn.Module:
"""
Performs a set of optimization passes to optimize a model for the
purposes of inference. Specifically, the passes that are run are:
1. Conv/BN fusion
2. Dropout removal
3. MKL layout optimizations
The third optimization takes a function `use_mkl_heuristic` that's used
to determine whether a subgraph should be explicity run in MKL layout.
Note: As FX does not currently handle aliasing, this pass currently
assumes nothing aliases. If that isn't true, use at your own risk.
"""
default_pass_config = {
"conv_bn_fuse": True,
"remove_dropout": True,
"mkldnn_layout_optimize": {'heuristic': use_mkl_length},
}
if pass_config is None:
pass_config = {}
default_pass_config.update(pass_config)
if default_pass_config["conv_bn_fuse"]:
model = fuse(model)
if default_pass_config["remove_dropout"]:
model = remove_dropout(model)
if default_pass_config["mkldnn_layout_optimize"] is False:
return model
if not isinstance(default_pass_config["mkldnn_layout_optimize"], dict):
raise RuntimeError("mkldnn_layout_optimize config is not a dict")
if "heuristic" not in default_pass_config["mkldnn_layout_optimize"]:
raise RuntimeError("Heuristic not found in mkldnn_layout_optimize config")
use_mkl_heuristic = default_pass_config["mkldnn_layout_optimize"]["heuristic"]
cur_tracer = tracer()
fx_graph = cur_tracer.trace(copy.deepcopy(model))
fx_model = fx.GraphModule(cur_tracer.root, fx_graph)
modules: Dict[str, nn.Module] = dict(model.named_modules())
class MklSupport(Enum):
NO = 1
YES = 2
UNKNOWN = 3
# Inserts to_mkldnn and to_dense around every node we want to be a MKLDNN node.
# If the op is in `mkldnn_supported` then we always treat it as a MKLDNN node.
# However, if it's in `mkldnn_supported_unknown`, then we only treat it as
# a MKLDNN node if its inputs are MKLDNN nodes.
for node in list(fx_graph.nodes):
supports_mkldnn = MklSupport.NO
if node.op == 'call_module':
cur_module = modules[node.target]
if type(cur_module) in mkldnn_supported:
supports_mkldnn = MklSupport.YES
sample_parameter = next(cur_module.parameters(), None)
if sample_parameter is not None:
assert(sample_parameter.dtype == torch.float), "this pass is only for torch.float modules"
assert(sample_parameter.device == torch.device('cpu')), "this pass is only for CPU modules"
elif node.op == 'call_function':
if node.target in mkldnn_supported:
supports_mkldnn = MklSupport.YES
elif node.target in mkldnn_supported_unknown:
supports_mkldnn = MklSupport.UNKNOWN
if supports_mkldnn != MklSupport.NO:
if supports_mkldnn == MklSupport.UNKNOWN:
if not any([arg.target == 'to_dense' for arg in node.args]):
continue
with fx_graph.inserting_before(node):
mkldnn_args = fx.map_arg(node.args, lambda n: fx_graph.call_method('to_mkldnn', (n, )))
node.args = cast(Tuple[fx.node.Argument], mkldnn_args)
with fx_graph.inserting_after(node):
dense_x = fx_graph.create_node('call_method', 'to_dense', (node,))
node.replace_all_uses_with(dense_x)
dense_x.args = (node,)
# Does pre-conversion of all modules into MKLDNN (when possible)
old_modules = modules_to_mkldnn(list(fx_graph.nodes), modules)
fx_graph.old_modules = old_modules # type: ignore[attr-defined]
# optimizes all a -> to_dense -> to_mkldnn -> b patterns into a -> b
for node in fx_graph.nodes:
if node.op == 'call_method' and node.target == 'to_dense':
prv_node = node.args[0]
users = list(node.users)
for user in users:
if user.op == 'call_method' and user.target == 'to_mkldnn':
user.replace_all_uses_with(prv_node)
fx_graph.erase_node(user)
if len(node.users) == 0:
fx_graph.erase_node(node)
num_nodes = len(fx_graph.nodes)
uf = UnionFind(num_nodes)
def get_color(n):
if hasattr(n, 'color'): # Current node is part of a MKL subgraph
return uf.find(n.color)
if hasattr(n, 'start_color'): # Current node is input to MKL subgraph
return uf.find(n.start_color)
return None
# This code is to find each MKLDNN subgraph. Each MKLDNN subgraph consists
# of input nodes (which are only `to_mkldnn` calls), output nodes
# (`to_dense` calls), and intermediate nodes, which are run entirely on
# MKLDNN layout tensors.
#
# Specifically, this code does a flood fill on a directed acyclic graph
# (DAG), starting from each possible "start node" (i.e: `to_mkldnn` nodes).
# If every node only had one input, this would be sufficient. However, in
# the case that a node has multiple inputs coming from different start
# nodes (i.e. colors), we need to join these 2 colors into 1. That's done
# using a Disjoint Set Union.
for cur_idx, node in enumerate(fx_graph.nodes):
if node.op == 'call_method' and node.target == 'to_mkldnn':
node.start_color = cur_idx
uf.make_set(cur_idx)
elif node.op == 'call_method' and node.target == 'to_dense':
assert(get_color(node.args[0]) is not None)
node.end_color = get_color(node.args[0])
else:
cur_colors = [get_color(i) for i in node.all_input_nodes if isinstance(i, fx.Node) if get_color(i) is not None]
if len(cur_colors) == 0:
continue
assert(not any(i is None for i in cur_colors))
cur_colors = sorted(cur_colors)
node.color = cur_colors[0]
for other_color in cur_colors[1:]:
uf.join(cur_colors[0], other_color)
mkldnn_graphs: Dict[int, MklSubgraph] = defaultdict(lambda: MklSubgraph(fx_graph))
for node in fx_graph.nodes:
if hasattr(node, 'color'):
mkldnn_graphs[uf.find(node.color)].nodes.append(node)
if hasattr(node, 'start_color'):
mkldnn_graphs[uf.find(node.start_color)].start_nodes.append(node)
if hasattr(node, 'end_color'):
mkldnn_graphs[uf.find(node.end_color)].end_nodes.append(node)
# Now that we have all the subgraphs, we need to decide which MKLDNN
# subgraphs we actually want to keep in MKLDNN.
for graph in mkldnn_graphs.values():
if not use_mkl_heuristic(graph):
for node in graph.start_nodes + graph.end_nodes:
prv = node.args[0]
node.replace_all_uses_with(prv)
fx_graph.erase_node(node)
reset_modules(graph.nodes, modules, old_modules)
mkldnn_conversions = 0
for node in fx_graph.nodes:
if node.target == 'to_mkldnn' or node.target == 'to_dense':
mkldnn_conversions += 1
logging.getLogger(__name__).info(f"mkldnn conversions: {mkldnn_conversions}")
fx_graph.lint()
result = fx.GraphModule(model, fx_graph)
return result
| pytorch-master | torch/fx/experimental/optimization.py |
class Equality:
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
def __str__(self):
return f'{self.lhs} = {self.rhs}'
def __repr__(self):
return f'{self.lhs} = {self.rhs}'
def __eq__(self, other):
if isinstance(other, Equality):
return self.lhs == other.lhs and self.rhs == other.rhs
else:
return False
| pytorch-master | torch/fx/experimental/refinement_types.py |
from functools import reduce
import torch
import operator
from torch.fx.tensor_type import Dyn, is_consistent, TensorType, is_more_precise
from typing import Callable, Dict
from torch.fx.node import Target, Node
from torch.nn.modules.batchnorm import BatchNorm2d
from torch.nn.modules.conv import Conv2d
from torch.fx.experimental.refinement_types import Equality
import itertools
from torch.fx.experimental.unification import Var # type: ignore[attr-defined]
try:
import sympy # type: ignore[import]
HAS_SYMPY = True
except ImportError:
HAS_SYMPY = False
_INFERENCE_RULES: Dict[Target, Callable] = {}
_REFINEMENT_RULES: Dict[Target, Callable] = {}
_RULES: Dict[Target, Callable] = {}
def expand_to_tensor_dim(t, n):
"""
Expand a type to the desired tensor dimension if possible
Raise an error otherwise.
- t is the given type
- n is a number of dimensions to expand to
"""
if t == Dyn:
dims = [Dyn] * n
return TensorType(tuple(dims))
elif isinstance(t, TensorType):
if len(t.__args__) != n:
raise TypeError(f'Cannot extend tensor. Tensor {t} has rank {len(t.__args__)}. It should have rank {n}')
return t
else:
raise TypeError(f'Cannot match the type {t}')
def broadcast_types(t1, t2):
"""
Applies broadcasting to both given types such that they
become consistent with eachother and returns two new
resulting types
"""
# if either type is Dyn, do nothing since the types are already consistent
if t1 == Dyn or t2 == Dyn or isinstance(t1, Var) or isinstance(t2, Var):
return t1, t2
if isinstance(t1, TensorType) and isinstance(t2, TensorType):
s1 = len(t1.__args__)
s2 = len(t2.__args__)
new_t1 = list(t1.__args__)
new_t2 = list(t2.__args__)
# We make the types the same length which is the first requirement
# for consistency
if s1 > s2:
for i in range(s1 - s2):
new_t2.insert(0, 1)
elif s2 > s1:
for i in range(s2 - s1):
new_t1.insert(0, 1)
# we replace occurrences of "1" with each tensor with
# the corresponding type from the other tensor
for i, (x, y) in enumerate(zip(new_t1, new_t2)):
if x == 1:
new_t1[i] = y
elif y == 1:
new_t2[i] = x
# at this point our tensors should be consistent
# and we can apply the element-wise operation and find the right dimension
# for the output of the operation
(t1, t2) = TensorType(tuple(new_t1)), TensorType(tuple(new_t2))
return (t1, t2)
else:
raise TypeError(f'Cannot broadcast types {t1} and {t2}')
def register_inference_rule(call_target):
def register(fn):
if call_target in _INFERENCE_RULES:
raise RuntimeError(f'Inference rule already registered for {call_target}!')
_INFERENCE_RULES[call_target] = fn
return fn
return register
def register_refinement_rule(call_target):
def register(fn):
if call_target in _REFINEMENT_RULES:
raise RuntimeError(f'Refinement rule already registered for {call_target}!')
_REFINEMENT_RULES[call_target] = fn
return fn
return register
def register_algebraic_expressions_inference_rule(call_target):
def register(fn):
if call_target in _RULES:
raise RuntimeError(f'Rule already registered for {call_target}!')
_RULES[call_target] = fn
return fn
return register
@register_inference_rule(torch.add)
@register_inference_rule(operator.add)
def add_inference_rule(n: Node):
"""
Apply the addition inference rule. This includes:
- scalar addition
- broadcasting semantics
Note that we always return the least precise type between
the operands (after applying broadcasting) to be the final type of the operation
Note that we do not modify the operand types themselves after applying broadcasting
to them. We only use them to calculate the final type
"""
assert isinstance(n.args[0], Node)
assert isinstance(n.args[1], Node)
t1 = n.args[0].type
t2 = n.args[1].type
# handle scalar addition
if t1 == int and isinstance(t2, TensorType):
n.type = t2
return n.type
# handle scalar addition
elif t2 == int and isinstance(t1, TensorType):
n.type = t1
return n.type
# we bring the new types to the point where
# we can check for consistency
# any inconsistency would not have been caused
# by broadcasting at this point
(new_t1, new_t2) = broadcast_types(t1, t2)
if new_t1 != t1 or new_t2 != t2:
n.meta['broadcast'] = True
n.meta[str(n.args[0])] = new_t1
n.meta[str(n.args[1])] = new_t2
else:
n.meta['broadcast'] = False
new_t1 = t1 if not n.meta['broadcast'] else new_t1
new_t2 = t2 if not n.meta['broadcast'] else new_t2
# we check for consistency between the new types
if is_consistent(new_t1, new_t2):
# we return the less precise type because
# broadcasting may have happened
# for operands with shape [1,2,Dyn] and [1,2,1]
# we have to assign the node [1,2,Dyn]
if is_more_precise(new_t1, new_t2):
n.type = new_t2
else:
n.type = new_t1
return n.type
else:
raise TypeError(f'Cannot add arguments {n.args[0]} ({ n.args[0].type}) and {n.args[1]} ({ n.args[1].type}) in node {n}.'
f' Types should match ')
@register_inference_rule(getattr)
def get_attr_inference_rule(n: Node, traced):
"""
The current getattr rule only handles the shape attribute
Can be extended to other attributes
The most representitive type we have is "Dyn" but the system
can be extended with more types, such as a type to represent shapes
"""
attr_node = n.args[0]
attr_name = n.args[1]
if attr_name == "shape":
n.type = Dyn
else:
raise TypeError("Not yet implelemted")
# TODO. We leave it like this till we add a type to represent tensor sizes
return n.type
@register_inference_rule(torch.transpose)
def transpose_inference_rule(n: Node):
"""
We check that dimentions for the transpose operations
are within range of the tensor type of the node
"""
if n.target == torch.transpose:
assert isinstance(n.args[0], Node)
t = n.args[0].type
assert isinstance(n.args[1], int)
assert isinstance(n.args[2], int)
dim1, dim2 = n.args[1], n.args[2]
if t == Dyn:
n.type = Dyn
return n.type
elif isinstance(t, TensorType):
if 0 <= dim1 < len(t.__args__) and 0 <= dim2 < len(t.__args__):
new_type = list(t.__args__)
new_type[dim1], new_type[dim2] = new_type[dim2], new_type[dim1]
final = TensorType(new_type)
n.type = get_greatest_upper_bound(n.type, final)
return n.type
else:
raise TypeError(f'Cannot transpose {dim1} and {dim2} in type {t} for node {n}')
else:
raise TypeError(f'Cannot transpose {dim1} and {dim2} in type {t} for node {n}')
@register_inference_rule(torch.reshape)
def reshape_inference_rule(n: Node):
"""
Without dynamism, the rule checks that the
product of the elements of the argument tensor
type is equal to the product of the elements
of the required shape. We gradualize this rule
by adding a case to handle fully dynamic input
as well as input where some of the tensor dimensions
are unknown. In this case we check for divisibility
"""
assert isinstance(n.args[0], Node)
t1 = n.args[0].type
assert isinstance(n.args[1], list)
t2 = n.args[1]
t2_type = TensorType([Dyn if elem == -1 else elem for elem in t2])
# if we do not know the original tensor dimension,
# we return the required dimension
if t1 == Dyn:
n.type = t2_type
return t2_type
# if any of the dimensions are unknown,
# we check for divisibility
elif isinstance(t1, TensorType):
assert isinstance(t1, TensorType)
a = [e if e != Dyn else 1 for e in t1.__args__]
p1 = reduce(lambda x, y: x * y, a)
p2 = reduce(lambda x, y: x * y, t2)
if p1 % p2 == 0 or p2 % p1 == 0:
n.type = t2_type
return t2_type
else:
raise TypeError(f'Cannot reshape in node {n} from {t1} to {t2_type}')
else:
raise TypeError(f'Cannot reshape in node {n} from {t1} to {t2_type}')
@register_inference_rule(BatchNorm2d)
def bn2d_inference_rule(n: Node, module_instance):
"""
Given a BatchNorm2D instance and a node check the following conditions:
- the input type can be expanded to a size 4 tensor: t = (x_1, x_2, x_3, x_4)
- the current node type can be expanded to a size 4 tensor: t' = (x_1', x_2', x_3', x_4')
- t is consistent with t'
- x_2 is consistent with the module's num_features
- x_2' is consistent with the module's num_features
output type: the more precise type of t and t'
"""
assert isinstance(n.args[0], Node)
n.args[0].type = expand_to_tensor_dim(n.args[0].type, 4)
arg_type = n.args[0].type
n.type = expand_to_tensor_dim(n.type, 4)
# we check the conditions on the incoming argument
# and any existing annotation
# we also check for consistency between both annotations
if is_consistent(arg_type.__args__[1], module_instance.num_features) and \
is_consistent(n.type.__args__[1], module_instance.num_features) and \
is_consistent(arg_type, n.type):
# we choose the more precise type
# to be the node type
# so if an incoming argument has more type information
# we set this node's type to be the argument type
n.type = get_greatest_upper_bound(arg_type, n.type)
return n.type
else:
raise TypeError(f'Cannot apply {module_instance} with input type {arg_type} and existing type {n.type} on {n}')
def calculate_out_dimension(d_in, module_instance, index):
"""
For calculating h_in and w_out according to the conv2D documentation
"""
padding = (module_instance.padding, module_instance.padding) \
if isinstance(module_instance.padding, int) else module_instance.padding
kernel_size = (module_instance.kernel_size, module_instance.kernel_size) \
if isinstance(module_instance.kernel_size, int) else module_instance.kernel_size
stride = (module_instance.stride, module_instance.stride) \
if isinstance(module_instance.stride, int) else module_instance.stride
dilation = (module_instance.dilation, module_instance.dilation) \
if isinstance(module_instance.dilation, int) else module_instance.dilation
DIMENSION_TYPES = (int, sympy.Symbol) if HAS_SYMPY else (int,)
if d_in == Dyn:
return Dyn
elif isinstance(d_in, DIMENSION_TYPES):
n = d_in + 2 * padding[index] - \
dilation[index] * \
(kernel_size[index] - 1) - 1
return (n // stride[0]) + 1
else:
raise TypeError(f'{d_in} in {module_instance} must be a number or Dyn. Received {type(d_in)}')
def get_greatest_upper_bound(type1, type2):
"""
Get the most precise type that's consistent with the given types
"""
if type1 == Dyn:
return type2
elif type2 == Dyn:
return type1
elif isinstance(type1, TensorType) and isinstance(type2, TensorType):
if not is_consistent(type1, type2):
raise TypeError(f'Inconsistent types {type1}, {type2}')
gub = [t1 if is_more_precise(t1, t2) else t2 for (t1, t2) in zip(type1.__args__, type2.__args__)]
return TensorType(tuple(gub))
@register_inference_rule(Conv2d)
def conv2d_inference_rule(n: Node, module_instance):
"""
Given a Conv2D instance and a node check the following conditions:
- the input type can be expanded to a size 4 tensor: t = (x_1, x_2, H, W)
- the current node type can be expanded to a size 4 tensor: t' = (x_1', x_2', x_3', x_4')
- x_2 is consistent with the module's in_channels
- let o = (x_1, out_channels, H_out, W_out)
then the output is the greatest upper bound of o and the existing node type t'.
"""
assert isinstance(n.args[0], Node)
n.args[0].type = expand_to_tensor_dim(n.args[0].type, 4)
arg_type = n.args[0].type
curr_node_type = expand_to_tensor_dim(n.type, 4)
if is_consistent(arg_type.__args__[1], module_instance.in_channels):
w_in = arg_type.__args__[3]
h_in = arg_type.__args__[2]
h_out = calculate_out_dimension(h_in, module_instance, 0)
w_out = calculate_out_dimension(w_in, module_instance, 1)
new_type = TensorType((arg_type.__args__[0], module_instance.out_channels, h_out, w_out))
gub = get_greatest_upper_bound(new_type, curr_node_type)
n.type = gub
return n.type
else:
raise TypeError(f'Cannot apply {module_instance} with input type { arg_type} and existing type {n.type} on {n}')
@register_inference_rule(torch.nn.ReLU)
def relu_inference_rule(n: Node, module_instance):
"""
Input and output shapes should be equal.
"""
assert isinstance(n.args[0], Node)
if n.args[0].type == Dyn and isinstance(n.type, TensorType):
n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
if isinstance(n.args[0].type, TensorType):
n.type = get_greatest_upper_bound(n.args[0].type, n.type)
return n.type
def maxpool2d_check(typ, module_instance):
"""
Applies the maxpool2d shape information to the input
this affects the last two dimensions
"""
new_type_list = list(typ.__args__)
if len(new_type_list) == 4 or len(new_type_list) == 3:
w_in = new_type_list[-1]
h_in = new_type_list[-2]
h_out = calculate_out_dimension(h_in, module_instance, 0)
w_out = calculate_out_dimension(w_in, module_instance, 1)
new_type_list[-1] = w_out
new_type_list[-2] = h_out
return TensorType(tuple(new_type_list))
else:
raise TypeError(f'Wrong size {typ} for {module_instance}')
@register_inference_rule(torch.nn.MaxPool2d)
def maxpool2d_inference_rule(n: Node, module_instance):
"""
Given a MaxPool2D instance and a node check the following conditions:
- Input size matches size 3 or 4
- Current node type is consistent with the output type we will calculate
- Input size matches output size and the last two dimensions of the output
are w_out and h_out. The remaining dimensions are the same as the input
- Our final result is the greatest upper bound of the output we calculate
and the current node type.
"""
assert isinstance(n.args[0], Node)
if n.args[0].type == Dyn and isinstance(n.type, TensorType):
n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
if isinstance(n.args[0].type, TensorType):
output = maxpool2d_check(n.args[0].type, module_instance)
n.type = get_greatest_upper_bound(output, n.type)
return n.type
def linear_check(tensor_type, module_instance):
"""
Checks that an input tensor type satisfies the conditions for linear operation
and returns the output type based on in and out features given by module_instance
"""
if len(tensor_type.__args__) >= 2:
if is_consistent(module_instance.in_features, tensor_type.__args__[-1]):
new_type_args = list(tensor_type.__args__)
new_type_args[-1] = module_instance.out_features
return TensorType(tuple(new_type_args))
else:
raise TypeError(f'Inconsistent {module_instance.in_features} and {tensor_type.__args__[-1]} in {module_instance}')
else:
raise TypeError(f'Type {tensor_type} must have rank 2 or more.')
@register_inference_rule(torch.nn.Linear)
def linear_inference_rule(n: Node, module_instance):
"""
Applies the shape information to the input then gets the greatest upper bound
of the resulting type and the existing type
"""
assert isinstance(n.args[0], Node)
if n.args[0].type == Dyn and isinstance(n.type, TensorType):
n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
if isinstance(n.args[0].type, TensorType):
output_type = linear_check(n.args[0].type, module_instance)
n.type = get_greatest_upper_bound(output_type, n.type)
return n.type
def adaptiveavgpool2d_check(tensor_type, module_instance):
output_size = module_instance.output_size
if isinstance(output_size, int):
output_size = [output_size, output_size]
elif isinstance(output_size, tuple):
output_size = list(output_size)
if output_size[0] is None:
output_size[0] = output_size[1]
if output_size[1] is None:
output_size[1] = output_size[0]
new_type_list = list(tensor_type.__args__)
if len(tensor_type.__args__) == 4 or len(tensor_type.__args__) == 3:
new_type_list[-1] = output_size[1]
new_type_list[-2] = output_size[0]
return TensorType(tuple(new_type_list))
else:
raise TypeError(f'Tensor ranks must be 3 or 4. Got {tensor_type}')
@register_inference_rule(torch.nn.AdaptiveAvgPool2d)
def adaptiveavgpool2d_inference_rule(n: Node, module_instance):
"""
The input and output sizes should be the same except for the last
two dimensions taken from the input, which represent width and height
"""
assert isinstance(n.args[0], Node)
if n.args[0].type == Dyn and isinstance(n.type, TensorType):
n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
if isinstance(n.args[0].type, TensorType):
output_type = adaptiveavgpool2d_check(n.args[0].type, module_instance)
n.type = get_greatest_upper_bound(n.type, output_type)
return n.type
def flatten_check(tensor_type, start_dim, end_dim):
l = len(tensor_type.__args__)
start_dim = l if start_dim == -1 else abs(start_dim)
end_dim = l + end_dim + 1 if end_dim < 0 else end_dim + 1
if 0 <= start_dim <= (l - 1) and 0 <= end_dim <= l and start_dim < end_dim:
my_args = list(tensor_type.__args__)
lhs = my_args[0:start_dim]
rhs = my_args[end_dim:]
mid = my_args[start_dim:end_dim]
if Dyn in mid:
mid = [Dyn]
else:
mid = [reduce(lambda x, y: x * y, my_args[start_dim:end_dim])]
new_type_list = lhs + mid + rhs
return TensorType(tuple(new_type_list))
else:
raise TypeError(f'Incompatable dimentions {start_dim}, {end_dim - 1} in type {tensor_type}')
@register_inference_rule(torch.flatten)
def flatten_inference_rule(n: Node):
"""
Applies the flatten shape information to the input then gets the
greatest upper bound of the resulting type and the existing type
"""
assert isinstance(n.args[0], Node)
# set the default start and end dims
start_dim = 1
end_dim = -1
if len(n.args) > 1:
assert isinstance(n.args[1], int)
start_dim = n.args[1]
if len(n.args) > 2:
assert isinstance(n.args[2], int)
end_dim = n.args[2]
if n.args[0].type == Dyn and isinstance(n.type, TensorType):
n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))
if isinstance(n.args[0].type, TensorType):
output_type = flatten_check(n.args[0].type, start_dim, end_dim)
n.type = get_greatest_upper_bound(output_type , n.type)
return n.type
class GraphTypeChecker:
def __init__(self, env, traced):
self.env = env
self.traced = traced
def type_check(self):
"""
A gradual type checker for graphs
Effect: every node's field type will be
populated with a type after type-checking is done
"""
graph = self.traced.graph
# type check every node with gradual type rules
# if any node does not type check return false
for n in graph.nodes:
self.type_check_node(n)
return True
def type_check_node(self, n: Node):
"""
Type check a given fx node.
Current operations:
- Reshape
- Transpose
- Add
- Relu
- conv2d
- batchnorm2d
- flatten
- maxpool2d
- adaptiveavgpool2d
- linear
"""
if n.type is None:
n.type = Dyn
if n.op == 'placeholder':
return n.type
elif n.op == 'get_attr':
t = get_parameter(self.traced, n.target) # type: ignore[arg-type]
if isinstance(t.data, torch.Tensor):
n.type = TensorType(t.data.shape)
return n.type
elif n.op == 'call_function':
if n.target == getattr:
assert getattr in _INFERENCE_RULES
return _INFERENCE_RULES[n.target](n, self.traced)
elif n.target in _INFERENCE_RULES:
return _INFERENCE_RULES[n.target](n)
else:
raise RuntimeError(f'No inference rule registered for target {n.target}!')
elif n.op == 'call_module':
module_instance = self.traced.get_submodule(n.target)
if type(module_instance) in _INFERENCE_RULES:
return _INFERENCE_RULES[type(module_instance)](n, module_instance)
else:
raise RuntimeError(f'No inference rule registered for class {type(module_instance)}!')
elif n.op == 'output':
def get_node_type(a):
return a.type
n.type = torch.fx.node.map_arg(n.args[0], get_node_type)
return n.type
else:
raise NotImplementedError(f"Method {n.op} not yet implemented")
@register_refinement_rule(Conv2d)
def conv_refinement_rule(n: Node):
"""
The equality constraints are between the first dimension of
the input and output
"""
res = []
assert isinstance(n.args[0], Node)
arg_type = n.args[0].type
if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
res = [Equality(arg_type.__args__[0], n.type.__args__[0])]
return res
@register_refinement_rule(torch.nn.Linear)
def linear_refinement_rule(n: Node):
"""
The equality constraints are between the first dimension of
the input and output
"""
res = []
assert isinstance(n.args[0], Node)
arg_type = n.args[0].type
if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
res = [Equality(arg_type.__args__[0], n.type.__args__[0])]
return res
@register_refinement_rule(BatchNorm2d)
@register_refinement_rule(torch.nn.ReLU)
def all_eq(n: Node):
"""
For operations where the input shape is equal to the output shape
"""
res = []
assert isinstance(n.args[0], Node)
arg_type = n.args[0].type
if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
args1 = arg_type.__args__
args2 = n.type.__args__
res = [Equality(args1[i], args2[i]) for i in range(len(args1))]
return res
@register_refinement_rule(torch.nn.AdaptiveAvgPool2d)
@register_refinement_rule(torch.nn.MaxPool2d)
def first_two_eq(n: Node):
"""
For operations where the first two dimensions of the input and output shape
are equal
"""
res = []
assert isinstance(n.args[0], Node)
arg_type = n.args[0].type
if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
args1 = arg_type.__args__
args2 = n.type.__args__
res = [Equality(args1[0], args2[0]), Equality(args1[1], args2[1])]
return res
@register_refinement_rule(torch.add)
@register_refinement_rule(operator.add)
def element_wise_eq(n: Node):
"""
For element-wise operations and handles broadcasting.
Note that after applying broadcasting to the arguments
we are able to determine if certain dimensions have not been broadcast
if they are symbolicallu equal.
in this case, we can establish equality between those dimensions and the
corresponding output dimensions.
Note that it takes two iterations for this result. One iteration to establish
equality between certain dimensions of the operands (requiring the whole solver
including unification) and another iteration to establish equality between the operands
and the resulting type, requiring another round of constraint generation and unificaiton.
"""
res = []
if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
arg_type1 = n.args[0].type
arg_type2 = n.args[1].type
if isinstance(arg_type1, TensorType) and isinstance(arg_type2, TensorType) and isinstance(n.type, TensorType):
args1, args2 = broadcast_types(arg_type1, arg_type2)
# by this point, we know that args1 and args2 are the same size.
a1 = args1.__args__
a2 = args2.__args__
a3 = n.type.__args__
# we would be here in the second iteration where we establish equality
# between operand type dimensions and the resulting type dimensions
r = []
for x, y, z in zip(a1, a2, a3):
if x == y:
r.append(Equality(x, z))
res = r
return res
@register_refinement_rule(torch.flatten)
def flatten_refinement_rule(n: Node):
"""
Generates equality constraints between the dimensions of the input and output
that will not be involved in the flatten operation
"""
assert isinstance(n.args[0], Node)
eq_const = []
start_dim = 1
end_dim = -1
if len(n.args) > 1:
assert isinstance(n.args[1], int)
start_dim = n.args[1]
if len(n.args) > 2:
assert isinstance(n.args[2], int)
end_dim = n.args[2]
if isinstance(n.type, TensorType) and isinstance(n.args[0].type, TensorType):
l = len(n.type.__args__)
arg_type = n.args[0].type
start_dim = l if start_dim == -1 else start_dim
end_dim = l + end_dim + 1 if end_dim < 0 else end_dim + 1
for t1, t2 in zip(n.type.__args__[0:start_dim], arg_type.__args__[0:start_dim]):
eq_const.append(Equality(t1, t2))
for t1, t2 in zip(n.type.__args__[end_dim:], arg_type.__args__[end_dim:]):
eq_const.append(Equality(t1, t2))
return eq_const
@register_algebraic_expressions_inference_rule(Conv2d)
def conv_rule(n: Node, module_instance):
"""
Represents the outout in terms of an algrbraic expression w.r.t
the input when possible
"""
assert isinstance(n.args[0], Node)
arg_type = n.args[0].type
if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):
w_in = arg_type.__args__[3]
h_in = arg_type.__args__[2]
h_out = calculate_out_dimension(h_in, module_instance, 0)
w_out = calculate_out_dimension(w_in, module_instance, 1)
new_type = TensorType((n.type.__args__[0], n.type.__args__[1], h_out, w_out))
n.type = new_type
return new_type
class Refine:
"""
Symbolic shape inference.
Generates constraints over type variables.
Currently all constraints are equality constraints.
"""
def __init__(self, traced):
self.constraints = []
self.traced = traced
self.symbol_iter = itertools.count(start=0, step=1)
def refine(self):
"""
Generates constraints for
every node in the graph based on
the operation.
"""
graph = self.traced.graph
for n in graph.nodes:
self.refine_node(n)
return True
def symbolic_relations(self):
"""
Infers algebraic relations
"""
graph = self.traced.graph
for n in graph.nodes:
self.infer_symbolic_relations(n)
return True
def replace_dyn_with_fresh_var(self, typ):
"""
Replace all unknown types with fresh type variables.
"""
if typ == Dyn:
new_symbol = Var(next(self.symbol_iter))
return new_symbol
elif isinstance(typ, TensorType):
new_args = [self.replace_dyn_with_fresh_var(a) for a in typ.__args__]
return TensorType(tuple(new_args))
elif isinstance(typ, list):
return [self.replace_dyn_with_fresh_var(t) for t in typ]
elif isinstance(typ, tuple):
return (self.replace_dyn_with_fresh_var(t) for t in typ)
else:
return typ
def convert_to_sympy_symbols(self, typ):
"""
Replace all unknown types with fresh type variables.
"""
if HAS_SYMPY:
if isinstance(typ, Var):
return sympy.symbols(str(typ))
elif isinstance(typ, TensorType):
new_args = [self.convert_to_sympy_symbols(a) for a in typ.__args__]
return TensorType(tuple(new_args))
elif isinstance(typ, list):
return [self.convert_to_sympy_symbols(t) for t in typ]
elif isinstance(typ, tuple):
return (self.convert_to_sympy_symbols(t) for t in typ)
else:
return typ
else:
return typ
def refine_node(self, n: Node):
"""
Returns a list of equality constraints for
call_module and call_function nodes.
Models the relation between input and output dimensions
using constraints in case they are both tensors.
All operations used in resnet50 are defined.
"""
if n.type is None:
n.type = Dyn
n.type = self.replace_dyn_with_fresh_var(n.type)
if n.op == 'call_function':
if n.target in _REFINEMENT_RULES:
self.constraints += _REFINEMENT_RULES[n.target](n)
else:
pass
if n.op == 'call_module':
module_instance = self.traced.get_submodule(n.target)
if type(module_instance) in _REFINEMENT_RULES:
self.constraints += _REFINEMENT_RULES[type(module_instance)](n)
else:
pass
if n.op == 'output':
def get_node_type(a):
return a.type
n.type = torch.fx.node.map_arg(n.args[0], get_node_type)
return n.type
else:
pass
def infer_symbolic_relations(self, n: Node):
if HAS_SYMPY:
n.type = self.convert_to_sympy_symbols(n.type)
if n.op == 'call_function':
if n.target in _RULES:
return _RULES[n.target](n)
else:
pass
if n.op == 'call_module':
module_instance = self.traced.get_submodule(n.target)
if type(module_instance) in _RULES:
return _RULES[type(module_instance)](n, module_instance)
else:
pass
if n.op == 'output':
def get_node_type(a):
return a.type
n.type = torch.fx.node.map_arg(n.args[0], get_node_type)
return n.type
else:
pass
else:
pass
def get_parameter(traced, target: str):
"""
Returns the parameter given by ``target`` if it exists,
otherwise throws an error.
See the docstring for ``get_submodule`` for a more detailed
explanation of this method's functionality as well as how to
correctly specify ``target``.
Args:
target: The fully-qualified string name of the Parameter
to look for. (See ``get_submodule`` for how to specify a
fully-qualified string.)
Returns:
torch.nn.Parameter: The Parameter referenced by ``target``
Raises:
AttributeError: If the target string references an invalid
path or resolves to something that is not an
``nn.Parameter``
"""
module_path, _, param_name = target.rpartition(".")
mod: torch.nn.Module = traced.get_submodule(module_path)
if not hasattr(mod, param_name):
raise AttributeError(mod._get_name() + " has no attribute `" + param_name + "`")
param: torch.nn.Parameter = getattr(mod, param_name)
return param
| pytorch-master | torch/fx/experimental/graph_gradual_typechecker.py |
pytorch-master | torch/fx/experimental/__init__.py |
|
from torch.fx.experimental.graph_gradual_typechecker import Refine
from torch.fx.tensor_type import TensorType
from torch.fx.experimental.unification import Var, unify # type: ignore[attr-defined]
def infer_symbolic_types_single_pass(traced):
"""
Calls our symbolic inferencer once.
"""
r = Refine(traced)
r.refine()
mgu = unify_eq(r.constraints)
substitute_all_types(traced.graph, mgu)
def infer_symbolic_types(traced):
"""
Calls our symbolic inferencer twice.
This is useful when one pass is not enough
to infer all the information such as the case
for braodcasting.
"""
r = Refine(traced)
r.refine()
mgu = unify_eq(r.constraints)
substitute_all_types(traced.graph, mgu)
r = Refine(traced)
r.refine()
mgu = unify_eq(r.constraints)
substitute_all_types(traced.graph, mgu)
r.symbolic_relations()
def convert_eq(list_of_eq):
"""
Convert equality constraints in the right format
to be used by unification library.
"""
lhs = []
rhs = []
for eq in list_of_eq:
lhs.append(eq.lhs)
rhs.append(eq.rhs)
return tuple(lhs), tuple(rhs)
def unify_eq(list_of_eq):
"""
Apply unification to a set of
equality constraints
"""
lhs, rhs = convert_eq(list_of_eq)
return unify(lhs, rhs)
def substitute_solution_one_type(mapping, t):
"""
Apply the most general unifier to a type
"""
if isinstance(t, Var):
if t in mapping.keys():
return mapping[t]
else:
return t
elif isinstance(t, TensorType):
new_type = []
for typ in t.__args__:
if typ in mapping.keys():
new_type.append(mapping[typ])
else:
new_type.append(typ)
return TensorType(tuple(new_type))
elif isinstance(t, list):
new_type = []
for typ in t:
new_type.append(substitute_solution_one_type(mapping, typ))
return new_type
elif isinstance(t, tuple):
new_type = []
for typ in t:
new_type.append(substitute_solution_one_type(mapping, typ))
return tuple(new_type)
else:
return t
def substitute_all_types(graph, mapping):
"""
Apply the most general unifier to all types in a graph
till reaching a fixed point. If the input and output graph
are the same, we converge.
"""
flag = True
while flag:
flag = False
for k in mapping:
old_mapping_val = mapping[k]
if mapping[k] in mapping.keys():
new_key = mapping[k]
mapping[k] = mapping[new_key]
if old_mapping_val != mapping[k]:
flag = True
for n in graph.nodes:
n.type = substitute_solution_one_type(mapping, n.type)
def check_for_type_equality(g1, g2):
"""
A check equality to be used in fixed points.
We do not use graph equality but instead type
equality.
"""
for n, m in zip(g1.nodes, g2.nodes):
if n.type != m.type:
return False
return True
| pytorch-master | torch/fx/experimental/unify_refinements.py |
import torch
from torch.fx.node import Node
from torch.fx._symbolic_trace import symbolic_trace
from torch.fx.passes.tools_common import legalize_graph
import itertools
import operator
from typing import Dict, List
def split_result_tensors(result: torch.Tensor, inputs: List[torch.Tensor]) -> List[torch.Tensor]:
"""
A free function for use in the merge_matmul graph transformation below that
splits the output from a merged matmul into the individual results for each
input tensor.
Arguments:
result: The merged matmul result tensor.
inputs: The list of inputs that were merged into one for the matmul.
Returns:
List of matmul results for each input tensor.
"""
# When fx tracer is running, x.shape[0] will be torch.fx.Attribute but we
# need an int even when tracing
if isinstance(result, torch.fx.Proxy):
splits = [0] * len(inputs)
else:
splits = [x.shape[0] for x in inputs]
return torch.split(result, splits)
def may_depend_on(a: Node, b: Node, search_depth: int = 6):
"""
Determine if one node depends on another in a torch.fx.Graph.
Arguments:
a: The node that may have a dependency on b.
b: The node that a may have a dependency on.
search_depth: In the case of an indirect dependency, this function
searches upto this many nodes away in search of a
data dependency. If none is found, the function
makes the conservative assumption that there is a
dependency.
Returns:
True if a may depend on b, False if it definitely does not.
"""
# Equivalence is defined as dependence.
if a == b:
return True
# If a has no inputs, it cannot depend on b.
if len(a.all_input_nodes) == 0:
return False
# If the search depth has been exhausted and no conclusion has been
# reached, assume that there is a data dependency.
if search_depth == 0:
return True
# Recursively check all inputs of a.
for inp in a.all_input_nodes:
if may_depend_on(inp, b, search_depth - 1):
return True
return False
def are_nodes_independent(nodes: List[Node]):
"""
Check if all of the given nodes are pairwise-data independent.
Arguments:
nodes: The nodes to check for data dependencies.
Returns:
True if any pair in nodes has a data dependency.
"""
# For each pair in nodes:
for i, j in itertools.combinations(nodes, 2):
if may_depend_on(i, j) or may_depend_on(j, i):
return False
return True
def merge_matmul(in_mod: torch.nn.Module):
"""
A graph transformation that merges matrix multiplication operations that share the same right-hand
side operand into one large matrix multiplication.
____ _________ _________
---- | | | | M| A * C |
M| A | T| B | * K| C | = |---------|
---- , | | | | T| B * C |
K ---- --------- ---------
K R R
"""
gm = symbolic_trace(in_mod)
rhs_users: Dict[Node, List[Node]] = {}
lhs_users: Dict[Node, List[Node]] = {}
# Populate rhs_users and lhs_users - maps from LHS/RHS matrix multiply operands to
# the matmul of which they are the LHS/RHS.
for node in gm.graph.nodes:
if node.op != "call_function" or node.target is not torch.matmul:
continue
lhs, rhs = node.args
# TODO: Properly handle aliasing caused by get_attr. For now,
# use the attribute name as the operand if the node is a
# get_attr.
lhs = lhs.target if lhs.op == "get_attr" else lhs
rhs = rhs.target if rhs.op == "get_attr" else rhs
lhs_users.setdefault(lhs, []).append(node)
rhs_users.setdefault(rhs, []).append(node)
for rhs, mms in rhs_users.items():
# There must be at least matmuls for a merge to make sense.
if len(mms) < 2:
continue
# All matmuls must not depend on each other directly or indirectly
# in order for the merge to be possible.
if not are_nodes_independent(mms):
continue
lhs_vals = [mm.args[0] for mm in mms]
# Merge the matmul.
# Collect a list of LHS operands and the single RHS operand.
lhs = [gm.graph.get_attr(l) if isinstance(l, str) else l for l in lhs_vals]
rhs = gm.graph.get_attr(rhs) if isinstance(rhs, str) else rhs
# Concatenate all the LHS operands.
merge_mm_cat = gm.graph.call_function(torch.cat, (lhs,), {})
# Multiply the concatenated LHS operands with the one RHS. This will produce
# the same results as all the individual matmuls involving rhs in the original graph,
# but they will all be concatenated together.
merge_mm = gm.graph.call_function(torch.matmul, (merge_mm_cat, rhs,), {})
# Split the result of the merged matmul using the shapes of the LHS operands
# to ascertain how large each chunk should be.
merge_mm_split = gm.graph.call_function(
split_result_tensors, (merge_mm, lhs), {}
)
merge_mm_res = [
gm.graph.call_function(operator.getitem, (merge_mm_split, out), {})
for out in range(len(lhs))
]
# Replace all uses of the original, unmerged matmuls with the equivalent split chunk from the merged matmul.
for old, new in zip(mms, merge_mm_res):
old.replace_all_uses_with(new)
gm.graph.erase_node(old)
# All of the new nodes created above were inserted at the end, so we need to sort
# the nodes topologically to make sure all definitions precede uses.
legalize_graph(gm)
gm.recompile()
gm.graph.lint()
return gm
| pytorch-master | torch/fx/experimental/merge_matmul.py |
import torch.fx as fx
def set_trace(gm: fx.GraphModule) -> fx.GraphModule:
"""
Sets a breakpoint in `gm`'s generated python code. It drops into pdb when
`gm` gets run.
Args:
gm: graph module to insert breakpoint. It is then recompiled for it to
take effect.
Returns:
the `gm` with breakpoint inserted.
"""
def insert_pdb(body):
return ["import pdb; pdb.set_trace()\n", *body]
with gm.graph.on_generate_code(
make_transformer=lambda cur_transform: (
# new code transformer to register
lambda body: (
insert_pdb(
cur_transform(body) if cur_transform
else body
)
)
)
):
gm.recompile()
return gm
| pytorch-master | torch/fx/experimental/debug.py |
import torch
import torch.fx
import inspect
from typing import Any, Dict, Optional, Tuple
from torch.fx.node import Argument, Target
from torch._jit_internal import boolean_dispatched
from torch.fx.operator_schemas import _torchscript_type_to_python_type
from torch.fx import Transformer
class AnnotateTypesWithSchema(Transformer):
"""
Use Python function signatures to annotate types for `Nodes` within an FX graph.
This pulls out Python function signatures for:
1. Standard `torch.nn` Module calls
2. `torch.nn.functional` calls
3. Attribute fetches via `get_attr`
Example usage:
m = torchvision.models.resnet18()
traced = torch.fx.symbolic_trace(m)
traced = AnnotateTypesWithSchema(traced).transform()
"""
def __init__(self, module : torch.nn.Module, annotate_functionals : bool = True,
annotate_modules : bool = True, annotate_get_attrs : bool = True):
super().__init__(module)
self.annotate_functionals = annotate_functionals
self.annotate_modules = annotate_modules
self.annotate_get_attrs = annotate_get_attrs
def call_function(self, target : Target, args : Tuple[Argument, ...], kwargs : Dict[str, Any]):
python_ret_type = None
if self.annotate_functionals and target.__module__ == 'torch.nn.functional':
target_for_analysis = target
if target in boolean_dispatched:
# HACK: `boolean_dispatch` as used in `torch.nn.functional` makes it so that we have
# a 2-way dispatch based on a boolean value. Here we check that the `true` and `false`
# branches of the dispatch have exactly the same signature. If they do, use the `true`
# branch signature for analysis. Otherwise, leave this un-normalized
assert not isinstance(target, str)
dispatched = boolean_dispatched[target]
if_true, if_false = dispatched['if_true'], dispatched['if_false']
# TODO: can we emit the union of these? What are the implications on TorchScript
# compilation?
if inspect.signature(if_true).return_annotation != inspect.signature(if_false).return_annotation:
return super().call_function(target, args, kwargs)
target_for_analysis = if_true
python_ret_type = self._extract_python_return_type(target_for_analysis)
return_proxy = super().call_function(target, args, kwargs)
return_proxy.node.type = return_proxy.node.type if return_proxy.node.type else python_ret_type
return return_proxy
def call_module(self, target : Target, args : Tuple[Argument, ...], kwargs : Dict[str, Any]):
python_ret_type = None
assert isinstance(target, str)
submod = self.fetch_attr(target)
if self.annotate_modules and hasattr(submod.__class__, '__name__'):
classname = submod.__class__.__name__
if getattr(torch.nn, classname, None) == submod.__class__:
python_ret_type = self._extract_python_return_type(submod.forward)
return_proxy = super().call_module(target, args, kwargs)
return_proxy.node.type = return_proxy.node.type if return_proxy.node.type else python_ret_type
return return_proxy
def get_attr(self, target : torch.fx.node.Target, args : Tuple[Argument, ...], kwargs : Dict[str, Any]):
attr_proxy = super().get_attr(target, args, kwargs)
if self.annotate_get_attrs:
module_itr = self.module
assert isinstance(target, str)
atoms = target.split('.')
for i, atom in enumerate(atoms):
if not hasattr(module_itr, atom):
raise RuntimeError(f'Node referenced nonextent target {".".join(atoms[:i])}!')
module_itr = getattr(module_itr, atom)
maybe_inferred_ts_type = torch._C._jit_try_infer_type(module_itr)
if maybe_inferred_ts_type.success():
python_type = _torchscript_type_to_python_type(maybe_inferred_ts_type.type())
attr_proxy.node.type = python_type if not attr_proxy.node.type else attr_proxy.node.type
return attr_proxy
def _extract_python_return_type(self, target : Target) -> Optional[Any]:
"""
Given a Python call target, try to extract the Python return annotation
if it is available, otherwise return None
Args:
target (Callable): Python callable to get return annotation for
Returns:
Optional[Any]: Return annotation from the `target`, or None if it was
not available.
"""
assert callable(target)
try:
sig = inspect.signature(target)
except (ValueError, TypeError):
return None
return sig.return_annotation if sig.return_annotation is not inspect.Signature.empty else None
| pytorch-master | torch/fx/experimental/schema_type_annotation.py |
import torch
import torch.utils._pytree as pytree
from typing import Dict, Any, List, Type
import operator
try:
import sympy # type: ignore[import]
HAS_SYMPY = True
except ImportError:
HAS_SYMPY = False
aten = torch.ops.aten
__all__ = [
"has_symbolic_sizes_strides", "create_contiguous", "is_symbolic_op", "handle_symbolic_op", "PySymInt", "ShapeEnv",
"SymDispatchMode"
]
SYM_FUNCTION_MODE = None
# We don't bother with the metaclass as all of the dispatching logic happens
# entirely from Python
#
# Didn't bother with ancestors for now, unlikely to have multiple modes for
# symints right now
class SymDispatchMode:
def __sym_dispatch__(self, func, types, args, kwargs):
raise NotImplementedError()
def __enter__(self):
global SYM_FUNCTION_MODE
old = SYM_FUNCTION_MODE
if hasattr(self, "inner"):
raise RuntimeError(f"{self} has already been used as a mode. Please use a fresh version")
else:
self.inner = old
SYM_FUNCTION_MODE = self
return self
def __exit__(self, exc_type, exc_val, exc_tb):
global SYM_FUNCTION_MODE
SYM_FUNCTION_MODE = self.inner
def has_symbolic_sizes_strides(elem):
return any([isinstance(i, torch._C.SymIntNode) for i in elem.shape])
def create_contiguous(shape):
strides = [1]
for dim in reversed(shape[:-1]):
strides.append(dim * strides[-1])
return list(reversed(strides))
def is_symbolic_op(func):
return func in [aten.sym_size.default, aten.dim.default,
aten.is_contiguous.default, aten.sym_stride.default, aten.sym_numel.default
]
def handle_symbolic_op(func, args, kwargs):
assert is_symbolic_op(func)
if func == torch.ops.aten.sym_size.default:
return None
if func == torch.ops.aten.sym_stride.default:
return None
if func == torch.ops.aten.dim.default:
return len(args[0].shape)
if func == torch.ops.aten.sym_numel.default:
res = 1
for s in args[0].shape:
res = res * s
return res
# TODO: hack, need to make is_contiguous calls symbolic (probably through computing on symbolic strides)
if func == torch.ops.aten.is_contiguous.default:
return True
# TODO: hack, we don't currently support symbolic strides properly
# NB: this results in goop in the trace, it will be fixed when we have
# proper support
if func == torch.ops.aten.stride.default:
return create_contiguous(args[0].shape)
def _handle_sym_dispatch(func, args, kwargs):
global SYM_FUNCTION_MODE
mode = SYM_FUNCTION_MODE
assert mode
SYM_FUNCTION_MODE = mode.inner
try:
# TODO: properly compute types
types: List[Type] = []
return mode.__sym_dispatch__(func, types, args, kwargs)
finally:
SYM_FUNCTION_MODE = mode
# TODO: An incomplete list
# 1. Set variables to be equal when we do equality
# 2. Specialize on 0/1 when we do subtraction
class PySymInt(object):
"""
PySymInt objects are the primary "symbolic shape" objects that flow through
our program. They're what sit under FakeTensor, and contains our primary
implementation of symbolic shapes.
"""
def __init__(self, expr, shape_env, constant=None):
self.expr = expr
self.shape_env = shape_env
self.constant = constant
def wrap(self, num):
return PySymInt(sympy.Integer(num), self.shape_env, constant=num)
def __str__(self):
return f"PySymInt({self.expr})"
# Today we error on calling int on a symbolic shape, as this is a very accessible footgun.
# In the future we'll probably need some explicit way of allowing this
def __int__(self):
raise RuntimeError("Trying to extract a concrete int out of a symbolic int")
def __bool__(self):
return bool(self.shape_env.evaluate_expr(self.expr))
# Methods that have a `__foo__` as well as `__rfoo__`
reflectable_magic_methods = {
'add': lambda a, b: a + b,
'sub': lambda a, b: a - b,
'mul': lambda a, b: a * b,
'mod': lambda a, b: a % b,
'floordiv': lambda a, b: sympy.floor(a / b),
}
magic_methods = {
**reflectable_magic_methods,
'eq': lambda a, b: sympy.Eq(a, b),
'gt': lambda a, b: sympy.Gt(a, b),
'lt': lambda a, b: sympy.Lt(a, b),
'le': lambda a, b: sympy.Le(a, b),
'ge': lambda a, b: sympy.Ge(a, b),
}
for method, _func in magic_methods.items():
def _create_magic_impl(func):
method_name = method
def magic_impl(self, other):
if SYM_FUNCTION_MODE:
return _handle_sym_dispatch(getattr(operator, method_name), (self, other), {})
if isinstance(other, PySymInt):
other = other.expr
return PySymInt(func(self.expr, other), self.shape_env)
return magic_impl
# this should be wrapped transparently into torch._C.SymIntNode
setattr(PySymInt, method, _create_magic_impl(_func))
setattr(PySymInt, f"__{method}__", _create_magic_impl(_func))
if method in reflectable_magic_methods:
setattr(PySymInt, f"__r{method}__", _create_magic_impl(_func))
class ShapeEnv(object):
def __init__(self):
self.guards = []
self.shape_env = {}
def create_symint(self, name, val, shape_env=None):
if not HAS_SYMPY:
raise RuntimeError("Need sympy installed to create symbolic shapes")
if shape_env is None:
shape_env = self.shape_env
# Currently we don't put 0/1 specialization in guards but perhaps we should
if val == 0 or val == 1:
return val
sympy_expr = sympy.Symbol(name, positive=True)
py_sym_int = PySymInt(sympy_expr, self)
cpp_sym_int = torch._C.SymIntNode.new_symint(py_sym_int) # type: ignore[attr-defined]
shape_env[sympy_expr] = val
return cpp_sym_int
def try_constantify(self, expr):
# Simplifies assuming that shape vars > 1 (since we cache on 0/1 shape values)
new_shape_env = {k: sympy.Symbol(f'shape_{idx}', positive=True) + 1 for idx, k in enumerate(self.shape_env.keys())}
new_expr = expr.subs(new_shape_env)
new_expr = new_expr.simplify()
if len(list(new_expr.free_symbols)) == 0:
return new_expr
return None
def create_shapes_for_args(self, args, shape_env=None):
# Takes pytrees and returns a flat list
arg_cnt = 0
def create_shape(x):
nonlocal arg_cnt
if not isinstance(x, torch.Tensor):
return x
out_shape = [self.create_symint(f"s_{arg_cnt}[{idx}]", sz, shape_env) for idx, sz in enumerate(x.shape)]
arg_cnt += 1
return out_shape
return list(map(create_shape, pytree.tree_flatten(args)[0]))
def evaluate_guards_for_args(self, *args):
env: Dict[Any, Any] = {}
_ = self.create_shapes_for_args(args, shape_env=env)
return all(guard.subs(env) == value for guard, value in self.guards)
def evaluate_expr(self, expr):
const_expr = self.try_constantify(expr)
if const_expr is not None:
return const_expr
expr = expr.simplify()
concrete_val = expr.subs(self.shape_env)
self.guards.append((expr, concrete_val))
return concrete_val
| pytorch-master | torch/fx/experimental/symbolic_shapes.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import functools
from typing import Any, Dict, Optional, Tuple, Callable, Union
import torch
from torch._C import _disabled_torch_function_impl
import torch.utils._pytree as pytree
from torch.fx import Tracer, GraphModule
from torch._subclasses.fake_tensor import FakeTensorMode
import torch.fx as fx
from torch.utils._mode_utils import no_dispatch
from torch.fx.passes.shape_prop import _extract_tensor_metadata
from contextlib import contextmanager, nullcontext
import inspect
from torch.utils._python_dispatch import TorchDispatchMode, enable_torch_dispatch_mode
from torch._subclasses import FakeTensor
from .symbolic_shapes import ShapeEnv, SymDispatchMode, PySymInt
import torch.fx.experimental.symbolic_shapes as symbolic_shapes
__all__ = ["ProxyTensor", "PythonKeyTracer", "dispatch_trace", "make_fx", "DecompositionInterpreter"]
aten = torch.ops.aten
CURRENT_DECOMPOSITION_TABLE: Dict[torch._ops.OpOverload, Callable] = {}
def fake_signature(fn, nargs):
"""FX gets confused by varargs, de-confuse it"""
argnames = ",".join(f"arg{i}" for i in range(nargs))
return eval(f"lambda {argnames}: fn({argnames})", {"fn": fn})
@contextmanager
def decompose(decomposition_table):
global CURRENT_DECOMPOSITION_TABLE
old_decomposition_table = CURRENT_DECOMPOSITION_TABLE
CURRENT_DECOMPOSITION_TABLE = decomposition_table
try:
yield CURRENT_DECOMPOSITION_TABLE
finally:
CURRENT_DECOMPOSITION_TABLE = old_decomposition_table
def track_metadata(tensor, proxy, tracer):
for i, s in enumerate(tensor.shape):
if isinstance(s, SymInt):
inner_s = s.get_pyobj()
assert isinstance(inner_s, PySymInt)
# TODO: improve naming
# TODO: lazily insert this into the graph only on first
# use? Maybe complicated and DCE is a better idea
inner_s.__dict__[tracer] = proxy.size(i)
# TODO: also do stride/numel
def wrap_output(inner_res, proxy_res, *, constant, proxy_mode):
def wrap_with_proxy(e, proxy, constant):
if isinstance(e, torch.Tensor):
track_metadata(e, proxy, proxy_mode.tracer)
with no_dispatch():
return ProxyTensor(e, proxy, constant=constant, proxy_mode=proxy_mode)
else:
return e
def get_constant(idx):
if constant is None:
return None
else:
return constant[idx]
# Unfortunately, tree_map cannot directly be used here. As the resulting
# object may be a proxy that represents a tuple, we may need to
# explicitly unwrap the proxy by simulating the flattening operations.
if isinstance(inner_res, tuple):
return tuple(wrap_with_proxy(e, proxy_res[idx], get_constant(idx)) for idx, e in enumerate(inner_res))
elif isinstance(inner_res, list):
return list([wrap_with_proxy(e, proxy_res[idx], get_constant(idx)) for idx, e in enumerate(inner_res)])
elif isinstance(inner_res, torch.Tensor):
return wrap_with_proxy(inner_res, proxy_res, constant)
else:
return inner_res
def maybe_disable_fake_tensor_mode():
# TODO: figure out if this API generally makes sense and bake it into the
# library
mb_fake_mode = torch._C._get_torch_dispatch_mode()
if isinstance(mb_fake_mode, FakeTensorMode):
return enable_torch_dispatch_mode(mb_fake_mode.inner, replace=mb_fake_mode)
else:
return nullcontext()
def unwrap_elem(e):
if isinstance(e, ProxyTensor):
return e.elem
return e
def fetch_symint_proxy(tracer):
def inner(e):
n = e.get_pyobj()
if n.constant is not None:
return n.constant
else:
return n.__dict__[tracer]
return inner
def proxy_call(proxy_mode, func_overload, args, kwargs=None):
if kwargs is None:
kwargs = {}
func = func_overload.overloadpacket
if func_overload in CURRENT_DECOMPOSITION_TABLE:
with proxy_mode.restore():
return CURRENT_DECOMPOSITION_TABLE[func_overload](*args, **kwargs)
# Some of these are not "real" aten ops and will fail if we
# call _dispatch_has_kernel_for_dispatch_key on them.
# This list is probably incomplete
if func_overload not in [torch.ops.aten.size.default]:
with proxy_mode.restore():
r = func_overload.decompose(*args, **kwargs)
if r is not NotImplemented:
return r
# If there are SymInts, we also should not consider this constant.
# However, fake tensor handling of SymInts is sufficiently broken that
# I couldn't write a test for this case
all_constant = (
pytree.tree_all_only(ProxyTensor, lambda t: t.constant is not None, (args, kwargs))
# TODO: maybe constant SymInts should also be allowed? Not sure if
# this can happen
and pytree.tree_all_only(SymInt, lambda _: False, (args, kwargs))
)
if torch.Tag.data_dependent_output in func_overload.tags: # type: ignore[attr-defined]
# Check if all of the Tensor inputs are constants
if all_constant:
const_args, const_kwargs = pytree.tree_map_only(
ProxyTensor, lambda t: t.constant, (args, kwargs)
)
with maybe_disable_fake_tensor_mode():
return func_overload(*const_args, **const_kwargs)
raise RuntimeError(
"It appears that you're trying to get value out of a tracing tensor - erroring out! "
"It's likely that this is caused by data-dependent control flow or similar."
)
proxy_args, proxy_kwargs = pytree.tree_map_only(
SymInt,
fetch_symint_proxy(proxy_mode.tracer),
pytree.tree_map_only(ProxyTensor, lambda e: e.proxy, (args, kwargs))
)
proxy_res = func_overload(*proxy_args, **proxy_kwargs)
# Kind of a hacky way to test if an op is in-place or not
if func.__name__[-1] == "_" and func.__name__[0] != "_":
# This makes DCE marginally less likely to DCE inplace operations.
# It is not strictly necessary
args[0].proxy = proxy_res
proxy_res.node.meta['tensor_meta'] = _extract_tensor_metadata(args[0])
elem_args, elem_kwargs = pytree.tree_map(unwrap_elem, (args, kwargs))
inner_res = func_overload(*elem_args, **elem_kwargs)
# Needed to sync up metadata for in-place operators that modify metadata
# TODO: instead forward the metadata to the inner tensor so updating
# is not necessary
if torch.Tag.inplace_view in func_overload.tags: # type: ignore[attr-defined]
with no_dispatch():
func_overload(*args, **kwargs)
# In some circumstances, we will be tracing in a situation where a tensor
# is *statically* known to be a constant (currently, this only happens if
# you run torch.tensor; deterministic factory functions like torch.arange
# don't get this treatment). When the tensor in question is small, it's
# helpful to due constant propagation in case we call item() (in which
# case we can return the constant value that is known, rather than give
# an error.) The logic here tests if constant propagation is possible
# (because all of the inputs are constant). If so, we disable fake tensor
# mode (if it is on) and do true compute on the constant.
#
# It's worth highlighting that we're making a policy decision here.
# There is a potential that the tensor is actually quite large, and we
# don't actually want to run the compute. The tensor being quite large
# is one of the reasons why factory functions don't get this treatment
# (since they can be quite large; if a parameter is initialized to a
# constant value it will be!) Similarly, there is also a potential
# to run an operator that blows up the size of a small tensor; we don't
# protect against this case, but we could force, e.g., only single
# element constant computation by testing the numel of the result before
# propagating const-ness. Similarly, we don't require the constant to
# live on CPU, but we could.
any_constant = pytree.tree_any_only(ProxyTensor, lambda t: t.constant is not None, (args, kwargs))
constant = None
# NB: do NOT include factories as constants
if all_constant and any_constant:
with maybe_disable_fake_tensor_mode():
const_args, const_kwargs = pytree.tree_map_only(
ProxyTensor, lambda t: t.constant, (args, kwargs)
)
constant = func_overload(*const_args, **const_kwargs)
# TODO(chilli): Enable this after it's been refactored to work with wrapper tensor subclasses in general
# pytree.tree_map(lambda x: check_metadata_consistency(x, ProxyTensor), (inner_res, args, kwargs))
return wrap_output(inner_res, proxy_res, constant=constant, proxy_mode=proxy_mode)
class ProxyTensor(torch.Tensor):
proxy: fx.Proxy
elem: torch.Tensor
proxy_mode: "ProxyTorchDispatchMode"
@staticmethod
def __new__(cls, elem, proxy, *, requires_grad=None, constant=None, proxy_mode):
new_shape = elem.shape
new_strides = elem.stride()
return torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
cls,
new_shape, dtype=elem.dtype, layout=elem.layout, device=elem.device,
requires_grad=requires_grad if requires_grad is not None else False, strides=new_strides,
storage_offset=elem.storage_offset()
)
def __init__(self, elem, proxy, *, requires_grad=None, constant=None, proxy_mode):
# TODO: hack since _extract_tensor_metadata currently tries to access stride
if elem.is_sparse or symbolic_shapes.has_symbolic_sizes_strides(elem): # TODO: handle has_sym_ints
proxy.node.meta['tensor_meta'] = {}
else:
proxy.node.meta['tensor_meta'] = _extract_tensor_metadata(self)
# This detects situations where you accidentally put a ProxyTensor
# inside a ProxyTensor for the same trace; this is a layering violation
assert not (isinstance(elem, ProxyTensor) and elem.proxy.tracer is proxy.tracer)
self.elem = elem
self.proxy = proxy
self.constant = constant
self.proxy_mode = proxy_mode
def __deepcopy__(self, memo):
return self.clone()
def __repr__(self):
with no_dispatch():
return f"ProxyTensor({self.elem}, proxy={self.proxy})"
__torch_function__ = _disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, func_overload, types, args=(), kwargs=None):
raise RuntimeError(
"Should not be needed as we always trace with modes. May have entered this due to redispatching from"
"__torch_dispatch__ into another op without restoring dispatch mode"
)
class PythonKeyTracer(Tracer):
def __init__(self):
super().__init__()
# In general, we don't want to make modules leaves. In principle, users of
# this tracer might want to override this in order to turn a couple specific
# modules into leaves in the traced graph.
def call_module(
self, m: torch.nn.Module, forward: Callable[..., Any], args: Tuple[Any, ...], kwargs: Dict[str, Any]
) -> Any:
return forward(*args, **kwargs)
def create_arg(self, a: Any):
if isinstance(a, torch.nn.Parameter):
for n, p in self.root.named_parameters():
if a is p:
return self.create_node('get_attr', n, (), {})
qualname: Optional[str] = None
if not qualname:
i = 0
while True:
qualname = f'_param_constant{i}'
if not hasattr(self.root, qualname):
break
i += 1
setattr(self.root, qualname, a)
return self.create_node('get_attr', qualname, (), {})
elif isinstance(a, SymInt):
assert a.get_pyobj().constant is not None
return a.get_pyobj().constant
return super().create_arg(a)
def dispatch_trace(
root: Union[torch.nn.Module, Callable],
tracer: Tracer,
concrete_args: Optional[Tuple[Any, ...]] = None,
) -> GraphModule:
graph = tracer.trace(root, concrete_args)
name = root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__
return GraphModule(tracer.root, graph, name)
def wrap_key(f, inps, proxy_mode, tracer):
flat_inps, _ = pytree.tree_flatten(inps)
@functools.wraps(f)
def wrapped(*args):
flat_args, args_spec = pytree.tree_flatten(args)
assert (len(flat_args) == len(flat_inps))
for idx, arg in enumerate(flat_args):
if isinstance(flat_inps[idx], torch.Tensor):
with no_dispatch():
track_metadata(flat_inps[idx], arg, tracer)
flat_args[idx] = ProxyTensor(
flat_inps[idx],
arg,
requires_grad=(flat_inps[idx].is_leaf and flat_inps[idx].requires_grad),
proxy_mode=proxy_mode,
)
else:
flat_args[idx] = flat_inps[idx]
tree_args = pytree.tree_unflatten(flat_args, args_spec)
out = f(*tree_args)
flat_outs, out_spec = pytree.tree_flatten(out)
for idx in range(len(flat_outs)):
if isinstance(flat_outs[idx], torch.Tensor) and isinstance(flat_outs[idx], ProxyTensor):
flat_outs[idx] = flat_outs[idx].proxy
return pytree.tree_unflatten(flat_outs, out_spec)
return wrapped
class ProxyTorchDispatchMode(TorchDispatchMode):
def __init__(self, tracer):
self.tracer = tracer
self.enable_tracing = True
self.sym_mode = ProxySymDispatchMode(tracer)
def __torch_dispatch__(self, func_overload, types, args=(), kwargs=None):
with self.sym_mode.enable(False):
return self.inner_torch_dispatch(func_overload, types, args, kwargs)
@contextmanager
def restore(self):
with self.sym_mode.enable(True):
with super().restore():
yield
def inner_torch_dispatch(self, func_overload, types, args=(), kwargs=None):
if not self.enable_tracing:
return func_overload(*args, **kwargs)
if symbolic_shapes.is_symbolic_op(func_overload):
with self.restore():
return symbolic_shapes.handle_symbolic_op(func_overload, args, kwargs)
func = func_overload.overloadpacket
# We don't want to convert torch.tensor constants into tracing objects.
if func_overload == aten.lift.default:
return args[0]
if any(tuple(isinstance(arg, ProxyTensor) for arg in pytree.tree_flatten(args)[0])):
out = proxy_call(self, func_overload, args, kwargs)
# When we trace through a torch.tensor invocation, you never actually
# see a torch.ops.aten.tensor call. Instead, the way this function is
# implemented internally is that we allocate a plain tensor (this is
# *guaranteed* to be a plain tensor, we disable all modes when doing
# so), and then call at::lift_fresh on it (to give modes a chance to do
# their stuff). Furthermore, the tensor argument to lift_fresh is guaranteed
# to be freshly allocated, so we want lift_fresh to be a no-op (directly
# returning the input argument).
#
# Here is the basic problem: when we trace this sequence of executions
# into an FX graph, what happens to this call sequence? Traditionally,
# tensor constants get interned as buffers on the FX GraphModule. But
# this is dangerous. Consider:
#
# x = torch.tensor(1)
# x.add_(2)
#
# Naively, this traces into:
#
# t = self._tensor_constant0 # initialized to torch.tensor(1)
# x = torch.ops.aten.lift_fresh(t)
# x.add_(2)
#
# If lift_fresh returns t directly, the subsequent add_ call will
# modify the tensor constant. Really, the problem is we've violated
# the invariant the the argument to lift is fresh. So what we should
# preserve the invariant by replacing lift_fresh with lift_fresh_copy:
#
# t = self._tensor_constant0 # initialized to torch.tensor(1)
# x = torch.ops.aten.lift_fresh_copy(t)
# x.add_(2)
#
# This is what the overload modification does.
else:
flat_args = pytree.tree_flatten((args, kwargs))[0]
handled_types = [torch.Tensor, ProxyTensor, torch.nn.Parameter]
# If there are any tensor subclasses, we need to handle those tensor subclasses first
if any([isinstance(arg, torch.Tensor) and type(arg) not in handled_types for arg in flat_args]):
return NotImplemented
if func_overload is torch.ops.aten.lift_fresh.default:
func_overload = torch.ops.aten.lift_fresh_copy.default
n_args, n_kwargs = pytree.tree_map_only(SymInt, fetch_symint_proxy(self.tracer), (args, kwargs))
proxy_res = self.tracer.create_proxy('call_function', func_overload, n_args, n_kwargs,
name=self.tracer.graph._target_to_str(func.__name__))
inner_res = func_overload(*args, **kwargs)
# If this is a lift, the input tensor is guaranteed to be a
# constant, so we keep a copy of the original argument along so
# we can query it if we're asked to item() it at some later point
is_lift = func_overload is torch.ops.aten.lift_fresh_copy.default
if is_lift:
with maybe_disable_fake_tensor_mode():
constant = args[0].clone()
else:
constant = None
out = wrap_output(inner_res, proxy_res, constant=constant, proxy_mode=self)
def assert_proxy_tensor(e):
if isinstance(e, torch.Tensor):
assert isinstance(e, ProxyTensor), \
f"Internal Error: ProxyTensor is incorrectly baking a tensor constant into the graph: {str(e)}"
# When we trace factory functions, we expect that tensor outputs are *always* ProxyTensors.
# (Except for torch.tensor() constants handled through lift(), which is handled
# specially further up).
pytree.tree_map(assert_proxy_tensor, out)
return out
SymInt = torch._C.SymIntNode
class ProxySymDispatchMode(SymDispatchMode):
def __init__(self, tracer):
super().__init__()
self.tracer = tracer
self.enable_tracing = True
@contextmanager
def enable(self, b):
old = self.enable_tracing
self.enable_tracing = b
try:
yield
finally:
self.enable_tracing = old
def __sym_dispatch__(self, func, types, args, kwargs):
if not self.enable_tracing:
return func(*args, **kwargs)
p_args, p_kwargs = pytree.tree_map_only(
PySymInt,
lambda s: s.__dict__[self.tracer] if s.constant is None else s.constant,
(args, kwargs)
)
# func doesn't have a __torch_function__ that Proxy can interpose, so
# we gotta do it manually
n_args, n_kwargs = pytree.tree_map_only(fx.Proxy, lambda p: p.node, (p_args, p_kwargs))
n_out = self.tracer.create_node("call_function", func, n_args, n_kwargs)
p_out = fx.Proxy(n_out, self.tracer)
out = func(*args, **kwargs)
assert isinstance(out, PySymInt), f"{func}(*{args}, **{kwargs}) = {out}"
out.__dict__[self.tracer] = p_out
return out
class DecompositionInterpreter(torch.fx.Interpreter):
def __init__(self, module: torch.fx.GraphModule, new_graph: torch.fx.Graph, decomposition_table=None, **kwargs):
super().__init__(module, **kwargs)
self.new_graph = new_graph
self.tracer = torch.fx.proxy.GraphAppendingTracer(self.new_graph)
self.decomposition_table = decomposition_table
if self.decomposition_table is None:
self.decomposition_table = {}
self.mode = ProxyTorchDispatchMode(self.tracer)
def placeholder(self, target, args, kwargs):
out = super().placeholder(target, args, kwargs)
# TODO handle case where the first character of target is '*'
return ProxyTensor(out, torch.fx.Proxy(self.new_graph.placeholder(target), self.tracer), proxy_mode=self.mode)
def get_attr(self, target, args, kwargs):
out = super().get_attr(target, args, kwargs)
return ProxyTensor(out, torch.fx.Proxy(self.new_graph.get_attr(target), self.tracer), proxy_mode=self.mode)
# call_function, call_method, call_module get traced automatically by the ProxyTensors.
def output(self, target, args, kwargs):
out = super().output(target, args, kwargs)
def unwrap(e):
return e.proxy.node if isinstance(e, ProxyTensor) else e
self.new_graph.output(pytree.tree_map(unwrap, out))
return out
def run(self, *args, **kwargs):
# Should enter the mode at least once for being able to restore it later
# See: https://github.com/pytorch/pytorch/pull/82549#discussion_r934782025
with decompose(self.decomposition_table), self.mode:
return super().run(*args, **kwargs)
def wrapper_and_args_for_make_fx(func, args, kwargs):
# make_fx doesn't support kwargs, so we need to do this flattening
# and then unflatten the args before calling func
flat_args, spec = pytree.tree_flatten((args, kwargs))
def wrapped(flat_args):
fn_args, fn_kwargs = pytree.tree_unflatten(flat_args, spec)
return func(*fn_args, **fn_kwargs)
return wrapped, flat_args
def make_fx(f, decomposition_table=None, tracing_mode="real"):
assert tracing_mode in ["real", "fake", "symbolic"]
if decomposition_table is None:
decomposition_table = {}
@functools.wraps(f)
def wrapped(*args):
phs = pytree.tree_map(lambda _: fx.PH, args) # type: ignore[attr-defined]
fx_tracer = PythonKeyTracer()
fake_tensor_mode: Any = nullcontext()
if tracing_mode == "real":
fake_tensor_mode = nullcontext()
elif tracing_mode == "fake":
fake_tensor_mode = FakeTensorMode(allow_fallback_kernels=True)
elif tracing_mode == "symbolic":
fake_tensor_mode = FakeTensorMode(allow_fallback_kernels=False)
else:
raise AssertionError(f"Unexpected tracing type: {tracing_mode}")
proxy_mode = ProxyTorchDispatchMode(fx_tracer)
def wrap_fake_concrete(x):
if isinstance(x, torch.Tensor):
return fake_tensor_mode.from_tensor(x) # type: ignore[attr-defined]
return x
shape_env = ShapeEnv()
sym_mode = proxy_mode.sym_mode
# todo: Figure out a more informative name for symints
def wrap_fake_symbolic(x, sym_shape):
if isinstance(x, torch.Tensor):
val = FakeTensor(fake_tensor_mode, torch.empty(sym_shape, device="meta"), x.device)
return val
return x
wrap_fn_map = {
"real": lambda x: x,
"fake": wrap_fake_concrete,
}
if tracing_mode == "symbolic":
flat_shapes = shape_env.create_shapes_for_args(args)
flat_args, spec = pytree.tree_flatten(args)
args = pytree.tree_unflatten(list(map(lambda a: wrap_fake_symbolic(a[0], a[1]), zip(flat_args, flat_shapes))), spec)
else:
args = pytree.tree_map(wrap_fn_map[tracing_mode], args)
if not hasattr(f, '__code__') or inspect.unwrap(f).__code__.co_flags & inspect.CO_VARARGS:
# FX doesn't support varargs, so we gotta fake up a wrapper
# TODO: Would be nice to fix this at the source...
func = fake_signature(f, len(phs))
else:
func = f
with decompose(decomposition_table), fake_tensor_mode, sym_mode, proxy_mode: # type: ignore[attr-defined]
t = dispatch_trace(wrap_key(func, args, proxy_mode, fx_tracer), tracer=fx_tracer, concrete_args=tuple(phs))
# TODO: kind of a bad way to do it, should maybe figure out a better way
t.shape_env = shape_env # type: ignore[assignment]
return t
return wrapped
def get_torch_dispatch_modes():
modes = [torch._C._get_torch_dispatch_mode()]
if modes[-1] is None:
return list()
while modes[-1].inner is not None:
modes.append(modes[-1].inner)
return modes
@contextlib.contextmanager
def disable_proxy_modes_tracing():
modes = get_torch_dispatch_modes()
proxy_tensor_modes = [m for m in modes if isinstance(m, ProxyTorchDispatchMode)]
olds = [m.enable_tracing for m in proxy_tensor_modes]
for proxy_mode in proxy_tensor_modes:
proxy_mode.enable_tracing = False
try:
yield
finally:
for proxy_mode, old in zip(proxy_tensor_modes, olds):
proxy_mode.enable_tracing = old
def get_isolated_graphmodule(func, args, kwargs):
"""A helper function used to get the GraphModule for the given func.
It's expected to be used in the ProxyTensor tracing context.
It detaches the args and kwargs from the current tracer so that the trace of
the current graph module can be created without any side-effects.
"""
wrapped, all_args = wrapper_and_args_for_make_fx(func, args, kwargs)
unwrapped_all_args = [unwrap_elem(a) for a in all_args]
# Current implementation doesn't support the case when ProxyTensor is
# wrapped with another Tensor subclass
# See: https://github.com/pytorch/pytorch/pull/81764#issuecomment-1200472068
# TODO: Once https://github.com/pytorch/pytorch/pull/82549 is merged, we can
# remove this
assert all(
getattr(a, "elem", None) is None
for a in unwrapped_all_args
if isinstance(a, torch.Tensor)
), "ProxyTensor is wrapped with another Tensor subclass"
with disable_proxy_modes_tracing():
gm = make_fx(wrapped)(unwrapped_all_args)
return gm
| pytorch-master | torch/fx/experimental/proxy_tensor.py |
from functools import partial
from .multipledispatch import dispatch # type: ignore[import]
namespace = dict() # type: ignore[var-annotated]
dispatch = partial(dispatch, namespace=namespace)
| pytorch-master | torch/fx/experimental/unification/dispatch.py |
# type: ignore[attr-defined]
from .core import unify, reify # noqa: F403
from .more import unifiable # noqa: F403
from .variable import var, isvar, vars, variables, Var # noqa: F403
| pytorch-master | torch/fx/experimental/unification/__init__.py |
from collections.abc import Iterator # type: ignore[import]
from functools import partial
from .unification_tools import assoc # type: ignore[import]
from .utils import transitive_get as walk
from .variable import isvar
from .dispatch import dispatch
################
# Reificiation #
################
@dispatch(Iterator, dict)
def _reify(t, s):
return map(partial(reify, s=s), t)
# return (reify(arg, s) for arg in t)
_reify
@dispatch(tuple, dict) # type: ignore[no-redef]
def _reify(t, s):
return tuple(reify(iter(t), s))
_reify
@dispatch(list, dict) # type: ignore[no-redef]
def _reify(t, s):
return list(reify(iter(t), s))
_reify
@dispatch(dict, dict) # type: ignore[no-redef]
def _reify(d, s):
return dict((k, reify(v, s)) for k, v in d.items())
_reify
@dispatch(object, dict) # type: ignore[no-redef]
def _reify(o, s):
return o # catch all, just return the object
def reify(e, s):
""" Replace variables of expression with substitution
>>> # xdoctest: +SKIP
>>> x, y = var(), var()
>>> e = (1, x, (3, y))
>>> s = {x: 2, y: 4}
>>> reify(e, s)
(1, 2, (3, 4))
>>> e = {1: x, 3: (y, 5)}
>>> reify(e, s)
{1: 2, 3: (4, 5)}
"""
if isvar(e):
return reify(s[e], s) if e in s else e
return _reify(e, s)
###############
# Unification #
###############
seq = tuple, list, Iterator
@dispatch(seq, seq, dict)
def _unify(u, v, s):
if len(u) != len(v):
return False
for uu, vv in zip(u, v): # avoiding recursion
s = unify(uu, vv, s)
if s is False:
return False
return s
#
# @dispatch((set, frozenset), (set, frozenset), dict)
# def _unify(u, v, s):
# i = u & v
# u = u - i
# v = v - i
# return _unify(sorted(u), sorted(v), s)
#
#
# @dispatch(dict, dict, dict)
# def _unify(u, v, s):
# if len(u) != len(v):
# return False
# for key, uval in iteritems(u):
# if key not in v:
# return False
# s = unify(uval, v[key], s)
# if s is False:
# return False
# return s
#
#
# @dispatch(object, object, dict)
# def _unify(u, v, s):
# return False # catch all
@dispatch(object, object, dict)
def unify(u, v, s): # no check at the moment
""" Find substitution so that u == v while satisfying s
>>> x = var('x')
>>> unify((1, x), (1, 2), {})
{~x: 2}
"""
u = walk(u, s)
v = walk(v, s)
if u == v:
return s
if isvar(u):
return assoc(s, u, v)
if isvar(v):
return assoc(s, v, u)
return _unify(u, v, s)
unify
@dispatch(object, object) # type: ignore[no-redef]
def unify(u, v):
return unify(u, v, {})
| pytorch-master | torch/fx/experimental/unification/core.py |
import collections
import operator
from functools import reduce
from collections.abc import Mapping
__all__ = ('merge', 'merge_with', 'valmap', 'keymap', 'itemmap',
'valfilter', 'keyfilter', 'itemfilter',
'assoc', 'dissoc', 'assoc_in', 'update_in', 'get_in')
def _get_factory(f, kwargs):
factory = kwargs.pop('factory', dict)
if kwargs:
raise TypeError("{}() got an unexpected keyword argument "
"'{}'".format(f.__name__, kwargs.popitem()[0]))
return factory
def merge(*dicts, **kwargs):
""" Merge a collection of dictionaries
>>> merge({1: 'one'}, {2: 'two'})
{1: 'one', 2: 'two'}
Later dictionaries have precedence
>>> merge({1: 2, 3: 4}, {3: 3, 4: 4})
{1: 2, 3: 3, 4: 4}
See Also:
merge_with
"""
if len(dicts) == 1 and not isinstance(dicts[0], Mapping):
dicts = dicts[0]
factory = _get_factory(merge, kwargs)
rv = factory()
for d in dicts:
rv.update(d)
return rv
def merge_with(func, *dicts, **kwargs):
""" Merge dictionaries and apply function to combined values
A key may occur in more than one dict, and all values mapped from the key
will be passed to the function as a list, such as func([val1, val2, ...]).
>>> merge_with(sum, {1: 1, 2: 2}, {1: 10, 2: 20})
{1: 11, 2: 22}
>>> merge_with(first, {1: 1, 2: 2}, {2: 20, 3: 30}) # doctest: +SKIP
{1: 1, 2: 2, 3: 30}
See Also:
merge
"""
if len(dicts) == 1 and not isinstance(dicts[0], Mapping):
dicts = dicts[0]
factory = _get_factory(merge_with, kwargs)
result = factory()
for d in dicts:
for k, v in d.items():
if k not in result:
result[k] = [v]
else:
result[k].append(v)
return valmap(func, result, factory)
def valmap(func, d, factory=dict):
""" Apply function to values of dictionary
>>> bills = {"Alice": [20, 15, 30], "Bob": [10, 35]}
>>> valmap(sum, bills) # doctest: +SKIP
{'Alice': 65, 'Bob': 45}
See Also:
keymap
itemmap
"""
rv = factory()
rv.update(zip(d.keys(), map(func, d.values())))
return rv
def keymap(func, d, factory=dict):
""" Apply function to keys of dictionary
>>> bills = {"Alice": [20, 15, 30], "Bob": [10, 35]}
>>> keymap(str.lower, bills) # doctest: +SKIP
{'alice': [20, 15, 30], 'bob': [10, 35]}
See Also:
valmap
itemmap
"""
rv = factory()
rv.update(zip(map(func, d.keys()), d.values()))
return rv
def itemmap(func, d, factory=dict):
""" Apply function to items of dictionary
>>> accountids = {"Alice": 10, "Bob": 20}
>>> itemmap(reversed, accountids) # doctest: +SKIP
{10: "Alice", 20: "Bob"}
See Also:
keymap
valmap
"""
rv = factory()
rv.update(map(func, d.items()))
return rv
def valfilter(predicate, d, factory=dict):
""" Filter items in dictionary by value
>>> iseven = lambda x: x % 2 == 0
>>> d = {1: 2, 2: 3, 3: 4, 4: 5}
>>> valfilter(iseven, d)
{1: 2, 3: 4}
See Also:
keyfilter
itemfilter
valmap
"""
rv = factory()
for k, v in d.items():
if predicate(v):
rv[k] = v
return rv
def keyfilter(predicate, d, factory=dict):
""" Filter items in dictionary by key
>>> iseven = lambda x: x % 2 == 0
>>> d = {1: 2, 2: 3, 3: 4, 4: 5}
>>> keyfilter(iseven, d)
{2: 3, 4: 5}
See Also:
valfilter
itemfilter
keymap
"""
rv = factory()
for k, v in d.items():
if predicate(k):
rv[k] = v
return rv
def itemfilter(predicate, d, factory=dict):
""" Filter items in dictionary by item
>>> def isvalid(item):
... k, v = item
... return k % 2 == 0 and v < 4
>>> d = {1: 2, 2: 3, 3: 4, 4: 5}
>>> itemfilter(isvalid, d)
{2: 3}
See Also:
keyfilter
valfilter
itemmap
"""
rv = factory()
for item in d.items():
if predicate(item):
k, v = item
rv[k] = v
return rv
def assoc(d, key, value, factory=dict):
""" Return a new dict with new key value pair
New dict has d[key] set to value. Does not modify the initial dictionary.
>>> assoc({'x': 1}, 'x', 2)
{'x': 2}
>>> assoc({'x': 1}, 'y', 3) # doctest: +SKIP
{'x': 1, 'y': 3}
"""
d2 = factory()
d2.update(d)
d2[key] = value
return d2
def dissoc(d, *keys, **kwargs):
""" Return a new dict with the given key(s) removed.
New dict has d[key] deleted for each supplied key.
Does not modify the initial dictionary.
>>> dissoc({'x': 1, 'y': 2}, 'y')
{'x': 1}
>>> dissoc({'x': 1, 'y': 2}, 'y', 'x')
{}
>>> dissoc({'x': 1}, 'y') # Ignores missing keys
{'x': 1}
"""
factory = _get_factory(dissoc, kwargs)
d2 = factory()
if len(keys) < len(d) * .6:
d2.update(d)
for key in keys:
if key in d2:
del d2[key]
else:
remaining = set(d)
remaining.difference_update(keys)
for k in remaining:
d2[k] = d[k]
return d2
def assoc_in(d, keys, value, factory=dict):
""" Return a new dict with new, potentially nested, key value pair
>>> purchase = {'name': 'Alice',
... 'order': {'items': ['Apple', 'Orange'],
... 'costs': [0.50, 1.25]},
... 'credit card': '5555-1234-1234-1234'}
>>> assoc_in(purchase, ['order', 'costs'], [0.25, 1.00]) # doctest: +SKIP
{'credit card': '5555-1234-1234-1234',
'name': 'Alice',
'order': {'costs': [0.25, 1.00], 'items': ['Apple', 'Orange']}}
"""
return update_in(d, keys, lambda x: value, value, factory)
def update_in(d, keys, func, default=None, factory=dict):
""" Update value in a (potentially) nested dictionary
inputs:
d - dictionary on which to operate
keys - list or tuple giving the location of the value to be changed in d
func - function to operate on that value
If keys == [k0,..,kX] and d[k0]..[kX] == v, update_in returns a copy of the
original dictionary with v replaced by func(v), but does not mutate the
original dictionary.
If k0 is not a key in d, update_in creates nested dictionaries to the depth
specified by the keys, with the innermost value set to func(default).
>>> inc = lambda x: x + 1
>>> update_in({'a': 0}, ['a'], inc)
{'a': 1}
>>> transaction = {'name': 'Alice',
... 'purchase': {'items': ['Apple', 'Orange'],
... 'costs': [0.50, 1.25]},
... 'credit card': '5555-1234-1234-1234'}
>>> update_in(transaction, ['purchase', 'costs'], sum) # doctest: +SKIP
{'credit card': '5555-1234-1234-1234',
'name': 'Alice',
'purchase': {'costs': 1.75, 'items': ['Apple', 'Orange']}}
>>> # updating a value when k0 is not in d
>>> update_in({}, [1, 2, 3], str, default="bar")
{1: {2: {3: 'bar'}}}
>>> update_in({1: 'foo'}, [2, 3, 4], inc, 0)
{1: 'foo', 2: {3: {4: 1}}}
"""
ks = iter(keys)
k = next(ks)
rv = inner = factory()
rv.update(d)
for key in ks:
if k in d:
d = d[k]
dtemp = factory()
dtemp.update(d)
else:
d = dtemp = factory()
inner[k] = inner = dtemp
k = key
if k in d:
inner[k] = func(d[k])
else:
inner[k] = func(default)
return rv
def get_in(keys, coll, default=None, no_default=False):
""" Returns coll[i0][i1]...[iX] where [i0, i1, ..., iX]==keys.
If coll[i0][i1]...[iX] cannot be found, returns ``default``, unless
``no_default`` is specified, then it raises KeyError or IndexError.
``get_in`` is a generalization of ``operator.getitem`` for nested data
structures such as dictionaries and lists.
>>> transaction = {'name': 'Alice',
... 'purchase': {'items': ['Apple', 'Orange'],
... 'costs': [0.50, 1.25]},
... 'credit card': '5555-1234-1234-1234'}
>>> get_in(['purchase', 'items', 0], transaction)
'Apple'
>>> get_in(['name'], transaction)
'Alice'
>>> get_in(['purchase', 'total'], transaction)
>>> get_in(['purchase', 'items', 'apple'], transaction)
>>> get_in(['purchase', 'items', 10], transaction)
>>> get_in(['purchase', 'total'], transaction, 0)
0
>>> get_in(['y'], {}, no_default=True)
Traceback (most recent call last):
...
KeyError: 'y'
See Also:
itertoolz.get
operator.getitem
"""
try:
return reduce(operator.getitem, keys, coll)
except (KeyError, IndexError, TypeError):
if no_default:
raise
return default
def getter(index):
if isinstance(index, list):
if len(index) == 1:
index = index[0]
return lambda x: (x[index],)
elif index:
return operator.itemgetter(*index)
else:
return lambda x: ()
else:
return operator.itemgetter(index)
def groupby(key, seq):
""" Group a collection by a key function
>>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']
>>> groupby(len, names) # doctest: +SKIP
{3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
>>> iseven = lambda x: x % 2 == 0
>>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
{False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
Non-callable keys imply grouping on a member.
>>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},
... {'name': 'Bob', 'gender': 'M'},
... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP
{'F': [{'gender': 'F', 'name': 'Alice'}],
'M': [{'gender': 'M', 'name': 'Bob'},
{'gender': 'M', 'name': 'Charlie'}]}
Not to be confused with ``itertools.groupby``
See Also:
countby
"""
if not callable(key):
key = getter(key)
d = collections.defaultdict(lambda: [].append) # type: ignore[var-annotated]
for item in seq:
d[key(item)](item)
rv = {}
for k, v in d.items():
rv[k] = v.__self__ # type: ignore[var-annotated, attr-defined]
return rv
def first(seq):
""" The first element in a sequence
>>> first('ABC')
'A'
"""
return next(iter(seq))
| pytorch-master | torch/fx/experimental/unification/unification_tools.py |
from contextlib import contextmanager
from .utils import hashable
from .dispatch import dispatch
_global_logic_variables = set() # type: ignore[var-annotated]
_glv = _global_logic_variables
class Var(object):
""" Logic Variable """
_id = 1
def __new__(cls, *token):
if len(token) == 0:
token = "_%s" % Var._id # type: ignore[assignment]
Var._id += 1
elif len(token) == 1:
token = token[0]
obj = object.__new__(cls)
obj.token = token # type: ignore[attr-defined]
return obj
def __str__(self):
return "~" + str(self.token) # type: ignore[attr-defined]
__repr__ = __str__
def __eq__(self, other):
return type(self) == type(other) and self.token == other.token # type: ignore[attr-defined]
def __hash__(self):
return hash((type(self), self.token)) # type: ignore[attr-defined]
def var():
return lambda *args: Var(*args)
def vars():
return lambda n: [var() for i in range(n)]
@dispatch(Var)
def isvar(v):
return True
isvar
@dispatch(object) # type: ignore[no-redef]
def isvar(o):
return not not _glv and hashable(o) and o in _glv
@contextmanager
def variables(*variables):
""" Context manager for logic variables
>>> from __future__ import with_statement
>>> with variables(1):
... print(isvar(1))
True
>>> print(isvar(1))
False
>>> # xdoctest: +SKIP("undefined vars")
>>> # Normal approach
>>> from unification import unify
>>> x = var('x')
>>> unify(x, 1)
{~x: 1}
>>> # Context Manager approach
>>> with variables('x'):
... print(unify('x', 1))
{'x': 1}
"""
old_global_logic_variables = _global_logic_variables.copy()
_global_logic_variables.update(set(variables))
try:
yield
finally:
_global_logic_variables.clear()
_global_logic_variables.update(old_global_logic_variables)
| pytorch-master | torch/fx/experimental/unification/variable.py |
from .core import unify, reify # type: ignore[attr-defined]
from .dispatch import dispatch
def unifiable(cls):
""" Register standard unify and reify operations on class
This uses the type and __dict__ or __slots__ attributes to define the
nature of the term
See Also:
>>> class A(object):
... def __init__(self, a, b):
... self.a = a
... self.b = b
>>> # xdoctest: +SKIP
>>> unifiable(A)
<class 'unification.more.A'>
>>> x = var('x')
>>> a = A(1, 2)
>>> b = A(1, x)
>>> unify(a, b, {})
{~x: 2}
"""
_unify.add((cls, cls, dict), unify_object)
_reify.add((cls, dict), reify_object)
return cls
#########
# Reify #
#########
def reify_object(o, s):
""" Reify a Python object with a substitution
>>> class Foo(object):
... def __init__(self, a, b):
... self.a = a
... self.b = b
... def __str__(self):
... return "Foo(%s, %s)"%(str(self.a), str(self.b))
>>> # xdoctest: +SKIP
>>> x = var('x')
>>> f = Foo(1, x)
>>> print(f)
Foo(1, ~x)
>>> print(reify_object(f, {x: 2}))
Foo(1, 2)
"""
if hasattr(o, '__slots__'):
return _reify_object_slots(o, s)
else:
return _reify_object_dict(o, s)
def _reify_object_dict(o, s):
obj = object.__new__(type(o))
d = reify(o.__dict__, s)
if d == o.__dict__:
return o
obj.__dict__.update(d)
return obj
def _reify_object_slots(o, s):
attrs = [getattr(o, attr) for attr in o.__slots__]
new_attrs = reify(attrs, s)
if attrs == new_attrs:
return o
else:
newobj = object.__new__(type(o))
for slot, attr in zip(o.__slots__, new_attrs):
setattr(newobj, slot, attr)
return newobj
@dispatch(slice, dict)
def _reify(o, s):
""" Reify a Python ``slice`` object """
return slice(*reify((o.start, o.stop, o.step), s))
#########
# Unify #
#########
def unify_object(u, v, s):
""" Unify two Python objects
Unifies their type and ``__dict__`` attributes
>>> class Foo(object):
... def __init__(self, a, b):
... self.a = a
... self.b = b
... def __str__(self):
... return "Foo(%s, %s)"%(str(self.a), str(self.b))
>>> # xdoctest: +SKIP
>>> x = var('x')
>>> f = Foo(1, x)
>>> g = Foo(1, 2)
>>> unify_object(f, g, {})
{~x: 2}
"""
if type(u) != type(v):
return False
if hasattr(u, '__slots__'):
return unify([getattr(u, slot) for slot in u.__slots__],
[getattr(v, slot) for slot in v.__slots__],
s)
else:
return unify(u.__dict__, v.__dict__, s)
@dispatch(slice, slice, dict)
def _unify(u, v, s):
""" Unify a Python ``slice`` object """
return unify((u.start, u.stop, u.step), (v.start, v.stop, v.step), s)
| pytorch-master | torch/fx/experimental/unification/more.py |
def hashable(x):
try:
hash(x)
return True
except TypeError:
return False
def transitive_get(key, d):
""" Transitive dict.get
>>> d = {1: 2, 2: 3, 3: 4}
>>> d.get(1)
2
>>> transitive_get(1, d)
4
"""
while hashable(key) and key in d:
key = d[key]
return key
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
# Taken from theano/theano/gof/sched.py
# Avoids licensing issues because this was written by Matthew Rocklin
def _toposort(edges):
""" Topological sort algorithm by Kahn [1] - O(nodes + vertices)
inputs:
edges - a dict of the form {a: {b, c}} where b and c depend on a
outputs:
L - an ordered list of nodes that satisfy the dependencies of edges
>>> _toposort({1: (2, 3), 2: (3, )})
>>> # xdoctest: +SKIP
[1, 2, 3]
Closely follows the wikipedia page [2]
[1] Kahn, Arthur B. (1962), "Topological sorting of large networks",
Communications of the ACM
[2] http://en.wikipedia.org/wiki/Toposort#Algorithms
"""
incoming_edges = reverse_dict(edges)
incoming_edges = dict((k, set(val)) for k, val in incoming_edges.items())
S = set((v for v in edges if v not in incoming_edges))
L = []
while S:
n = S.pop()
L.append(n)
for m in edges.get(n, ()):
assert n in incoming_edges[m]
incoming_edges[m].remove(n)
if not incoming_edges[m]:
S.add(m)
if any(incoming_edges.get(v, None) for v in edges):
raise ValueError("Input has cycles")
return L
def reverse_dict(d):
"""Reverses direction of dependence dict
>>> d = {'a': (1, 2), 'b': (2, 3), 'c':()}
>>> reverse_dict(d) # doctest: +SKIP
{1: ('a',), 2: ('a', 'b'), 3: ('b',)}
:note: dict order are not deterministic. As we iterate on the
input dict, it make the output of this function depend on the
dict order. So this function output order should be considered
as undeterministic.
"""
result = {} # type: ignore[var-annotated]
for key in d:
for val in d[key]:
result[val] = result.get(val, tuple()) + (key, )
return result
def xfail(func):
try:
func()
raise Exception("XFailed test passed") # pragma:nocover
except Exception:
pass
def freeze(d):
""" Freeze container to hashable form
>>> freeze(1)
1
>>> freeze([1, 2])
(1, 2)
>>> freeze({1: 2}) # doctest: +SKIP
frozenset([(1, 2)])
"""
if isinstance(d, dict):
return frozenset(map(freeze, d.items()))
if isinstance(d, set):
return frozenset(map(freeze, d))
if isinstance(d, (tuple, list)):
return tuple(map(freeze, d))
return d
| pytorch-master | torch/fx/experimental/unification/utils.py |
from .core import unify, reify # type: ignore[attr-defined]
from .variable import isvar
from .utils import _toposort, freeze
from .unification_tools import groupby, first # type: ignore[import]
class Dispatcher(object):
def __init__(self, name):
self.name = name
self.funcs = dict()
self.ordering = []
def add(self, signature, func):
self.funcs[freeze(signature)] = func
self.ordering = ordering(self.funcs)
def __call__(self, *args, **kwargs):
func, s = self.resolve(args)
return func(*args, **kwargs)
def resolve(self, args):
n = len(args)
for signature in self.ordering:
if len(signature) != n:
continue
s = unify(freeze(args), signature)
if s is not False:
result = self.funcs[signature]
return result, s
raise NotImplementedError("No match found. \nKnown matches: "
+ str(self.ordering) + "\nInput: " + str(args))
def register(self, *signature):
def _(func):
self.add(signature, func)
return self
return _
class VarDispatcher(Dispatcher):
""" A dispatcher that calls functions with variable names
>>> d = VarDispatcher('d')
>>> # xdoctest: +SKIP
>>> x = var('x')
>>> @d.register('inc', x)
... def f(x):
... return x + 1
>>> @d.register('double', x)
... def f(x):
... return x * 2
>>> d('inc', 10)
11
>>> d('double', 10)
20
"""
def __call__(self, *args, **kwargs):
func, s = self.resolve(args)
d = dict((k.token, v) for k, v in s.items())
return func(**d)
global_namespace = dict() # type: ignore[var-annotated]
def match(*signature, **kwargs):
namespace = kwargs.get('namespace', global_namespace)
dispatcher = kwargs.get('Dispatcher', Dispatcher)
def _(func):
name = func.__name__
if name not in namespace:
namespace[name] = dispatcher(name)
d = namespace[name]
d.add(signature, func)
return d
return _
def supercedes(a, b):
""" ``a`` is a more specific match than ``b`` """
if isvar(b) and not isvar(a):
return True
s = unify(a, b)
if s is False:
return False
s = dict((k, v) for k, v in s.items() if not isvar(k) or not isvar(v))
if reify(a, s) == a:
return True
if reify(b, s) == b:
return False
# Taken from multipledispatch
def edge(a, b, tie_breaker=hash):
""" A should be checked before B
Tie broken by tie_breaker, defaults to ``hash``
"""
if supercedes(a, b):
if supercedes(b, a):
return tie_breaker(a) > tie_breaker(b)
else:
return True
return False
# Taken from multipledispatch
def ordering(signatures):
""" A sane ordering of signatures to check, first to last
Topoological sort of edges as given by ``edge`` and ``supercedes``
"""
signatures = list(map(tuple, signatures))
edges = [(a, b) for a in signatures for b in signatures if edge(a, b)]
edges = groupby(first, edges)
for s in signatures:
if s not in edges:
edges[s] = []
edges = dict((k, [b for a, b in v]) for k, v in edges.items()) # type: ignore[attr-defined, assignment]
return _toposort(edges)
| pytorch-master | torch/fx/experimental/unification/match.py |
from warnings import warn
import inspect
from .conflict import ordering, ambiguities, super_signature, AmbiguityWarning
from .utils import expand_tuples
from .variadic import Variadic, isvariadic
import itertools as itl
class MDNotImplementedError(NotImplementedError):
""" A NotImplementedError for multiple dispatch """
def ambiguity_warn(dispatcher, ambiguities):
""" Raise warning when ambiguity is detected
Parameters
----------
dispatcher : Dispatcher
The dispatcher on which the ambiguity was detected
ambiguities : set
Set of type signature pairs that are ambiguous within this dispatcher
See Also:
Dispatcher.add
warning_text
"""
warn(warning_text(dispatcher.name, ambiguities), AmbiguityWarning)
def halt_ordering():
"""Deprecated interface to temporarily disable ordering.
"""
warn(
'halt_ordering is deprecated, you can safely remove this call.',
DeprecationWarning,
)
def restart_ordering(on_ambiguity=ambiguity_warn):
"""Deprecated interface to temporarily resume ordering.
"""
warn(
'restart_ordering is deprecated, if you would like to eagerly order'
'the dispatchers, you should call the ``reorder()`` method on each'
' dispatcher.',
DeprecationWarning,
)
def variadic_signature_matches_iter(types, full_signature):
"""Check if a set of input types matches a variadic signature.
Notes
-----
The algorithm is as follows:
Initialize the current signature to the first in the sequence
For each type in `types`:
If the current signature is variadic
If the type matches the signature
yield True
Else
Try to get the next signature
If no signatures are left we can't possibly have a match
so yield False
Else
yield True if the type matches the current signature
Get the next signature
"""
sigiter = iter(full_signature)
sig = next(sigiter)
for typ in types:
matches = issubclass(typ, sig)
yield matches
if not isvariadic(sig):
# we're not matching a variadic argument, so move to the next
# element in the signature
sig = next(sigiter)
else:
try:
sig = next(sigiter)
except StopIteration:
assert isvariadic(sig)
yield True
else:
# We have signature items left over, so all of our arguments
# haven't matched
yield False
def variadic_signature_matches(types, full_signature):
# No arguments always matches a variadic signature
assert full_signature
return all(variadic_signature_matches_iter(types, full_signature))
class Dispatcher(object):
""" Dispatch methods based on type signature
Use ``dispatch`` to add implementations
Examples
--------
>>> # xdoctest: +SKIP("bad import name")
>>> from multipledispatch import dispatch
>>> @dispatch(int)
... def f(x):
... return x + 1
>>> @dispatch(float)
... def f(x):
... return x - 1
>>> f(3)
4
>>> f(3.0)
2.0
"""
__slots__ = '__name__', 'name', 'funcs', '_ordering', '_cache', 'doc'
def __init__(self, name, doc=None):
self.name = self.__name__ = name
self.funcs = {}
self.doc = doc
self._cache = {}
def register(self, *types, **kwargs):
""" register dispatcher with new implementation
>>> f = Dispatcher('f')
>>> @f.register(int)
... def inc(x):
... return x + 1
>>> @f.register(float)
... def dec(x):
... return x - 1
>>> @f.register(list)
... @f.register(tuple)
... def reverse(x):
... return x[::-1]
>>> f(1)
2
>>> f(1.0)
0.0
>>> f([1, 2, 3])
[3, 2, 1]
"""
def _df(func):
self.add(types, func, **kwargs) # type: ignore[call-arg]
return func
return _df
@classmethod
def get_func_params(cls, func):
if hasattr(inspect, "signature"):
sig = inspect.signature(func)
return sig.parameters.values()
@classmethod
def get_func_annotations(cls, func):
""" get annotations of function positional parameters
"""
params = cls.get_func_params(func)
if params:
Parameter = inspect.Parameter
params = (param for param in params
if param.kind in
(Parameter.POSITIONAL_ONLY,
Parameter.POSITIONAL_OR_KEYWORD))
annotations = tuple(
param.annotation
for param in params)
if all(ann is not Parameter.empty for ann in annotations):
return annotations
def add(self, signature, func):
""" Add new types/method pair to dispatcher
>>> D = Dispatcher('add')
>>> D.add((int, int), lambda x, y: x + y)
>>> D.add((float, float), lambda x, y: x + y)
>>> D(1, 2)
3
>>> D(1, 2.0)
Traceback (most recent call last):
...
NotImplementedError: Could not find signature for add: <int, float>
>>> # When ``add`` detects a warning it calls the ``on_ambiguity`` callback
>>> # with a dispatcher/itself, and a set of ambiguous type signature pairs
>>> # as inputs. See ``ambiguity_warn`` for an example.
"""
# Handle annotations
if not signature:
annotations = self.get_func_annotations(func)
if annotations:
signature = annotations
# Handle union types
if any(isinstance(typ, tuple) for typ in signature):
for typs in expand_tuples(signature):
self.add(typs, func)
return
new_signature = []
for index, typ in enumerate(signature, start=1):
if not isinstance(typ, (type, list)):
str_sig = ', '.join(c.__name__ if isinstance(c, type)
else str(c) for c in signature)
raise TypeError("Tried to dispatch on non-type: %s\n"
"In signature: <%s>\n"
"In function: %s" %
(typ, str_sig, self.name))
# handle variadic signatures
if isinstance(typ, list):
if index != len(signature):
raise TypeError(
'Variadic signature must be the last element'
)
if len(typ) != 1:
raise TypeError(
'Variadic signature must contain exactly one element. '
'To use a variadic union type place the desired types '
'inside of a tuple, e.g., [(int, str)]'
)
new_signature.append(Variadic[typ[0]])
else:
new_signature.append(typ)
self.funcs[tuple(new_signature)] = func
self._cache.clear()
try:
del self._ordering
except AttributeError:
pass
@property
def ordering(self):
try:
return self._ordering
except AttributeError:
return self.reorder()
def reorder(self, on_ambiguity=ambiguity_warn):
self._ordering = od = ordering(self.funcs)
amb = ambiguities(self.funcs)
if amb:
on_ambiguity(self, amb)
return od
def __call__(self, *args, **kwargs):
types = tuple([type(arg) for arg in args])
try:
func = self._cache[types]
except KeyError:
func = self.dispatch(*types)
if not func:
raise NotImplementedError(
'Could not find signature for %s: <%s>' %
(self.name, str_signature(types)))
self._cache[types] = func
try:
return func(*args, **kwargs)
except MDNotImplementedError:
funcs = self.dispatch_iter(*types)
next(funcs) # burn first
for func in funcs:
try:
return func(*args, **kwargs)
except MDNotImplementedError:
pass
raise NotImplementedError(
"Matching functions for "
"%s: <%s> found, but none completed successfully" % (
self.name, str_signature(types),),)
def __str__(self):
return "<dispatched %s>" % self.name
__repr__ = __str__
def dispatch(self, *types):
"""Deterimine appropriate implementation for this type signature
This method is internal. Users should call this object as a function.
Implementation resolution occurs within the ``__call__`` method.
>>> # xdoctest: +SKIP
>>> from multipledispatch import dispatch
>>> @dispatch(int)
... def inc(x):
... return x + 1
>>> implementation = inc.dispatch(int)
>>> implementation(3)
4
>>> print(inc.dispatch(float))
None
See Also:
``multipledispatch.conflict`` - module to determine resolution order
"""
if types in self.funcs:
return self.funcs[types]
try:
return next(self.dispatch_iter(*types))
except StopIteration:
return None
def dispatch_iter(self, *types):
n = len(types)
for signature in self.ordering:
if len(signature) == n and all(map(issubclass, types, signature)):
result = self.funcs[signature]
yield result
elif len(signature) and isvariadic(signature[-1]):
if variadic_signature_matches(types, signature):
result = self.funcs[signature]
yield result
def resolve(self, types):
""" Deterimine appropriate implementation for this type signature
.. deprecated:: 0.4.4
Use ``dispatch(*types)`` instead
"""
warn("resolve() is deprecated, use dispatch(*types)",
DeprecationWarning)
return self.dispatch(*types)
def __getstate__(self):
return {'name': self.name,
'funcs': self.funcs}
def __setstate__(self, d):
self.name = d['name']
self.funcs = d['funcs']
self._ordering = ordering(self.funcs)
self._cache = dict()
@property
def __doc__(self):
docs = ["Multiply dispatched method: %s" % self.name]
if self.doc:
docs.append(self.doc)
other = []
for sig in self.ordering[::-1]:
func = self.funcs[sig]
if func.__doc__:
s = 'Inputs: <%s>\n' % str_signature(sig)
s += '-' * len(s) + '\n'
s += func.__doc__.strip()
docs.append(s)
else:
other.append(str_signature(sig))
if other:
docs.append('Other signatures:\n ' + '\n '.join(other))
return '\n\n'.join(docs)
def _help(self, *args):
return self.dispatch(*map(type, args)).__doc__
def help(self, *args, **kwargs):
""" Print docstring for the function corresponding to inputs """
print(self._help(*args))
def _source(self, *args):
func = self.dispatch(*map(type, args))
if not func:
raise TypeError("No function found")
return source(func)
def source(self, *args, **kwargs):
""" Print source code for the function corresponding to inputs """
print(self._source(*args))
def source(func):
s = 'File: %s\n\n' % inspect.getsourcefile(func)
s = s + inspect.getsource(func)
return s
class MethodDispatcher(Dispatcher):
""" Dispatch methods based on type signature
See Also:
Dispatcher
"""
__slots__ = ('obj', 'cls')
@classmethod
def get_func_params(cls, func):
if hasattr(inspect, "signature"):
sig = inspect.signature(func)
return itl.islice(sig.parameters.values(), 1, None)
def __get__(self, instance, owner):
self.obj = instance
self.cls = owner
return self
def __call__(self, *args, **kwargs):
types = tuple([type(arg) for arg in args])
func = self.dispatch(*types)
if not func:
raise NotImplementedError('Could not find signature for %s: <%s>' %
(self.name, str_signature(types)))
return func(self.obj, *args, **kwargs)
def str_signature(sig):
""" String representation of type signature
>>> str_signature((int, float))
'int, float'
"""
return ', '.join(cls.__name__ for cls in sig)
def warning_text(name, amb):
""" The text for ambiguity warnings """
text = "\nAmbiguities exist in dispatched function %s\n\n" % (name)
text += "The following signatures may result in ambiguous behavior:\n"
for pair in amb:
text += "\t" + \
', '.join('[' + str_signature(s) + ']' for s in pair) + "\n"
text += "\n\nConsider making the following additions:\n\n"
text += '\n\n'.join(['@dispatch(' + str_signature(super_signature(s))
+ ')\ndef %s(...)' % name for s in amb])
return text
| pytorch-master | torch/fx/experimental/unification/multipledispatch/dispatcher.py |
import six
from .utils import typename
class VariadicSignatureType(type):
# checking if subclass is a subclass of self
def __subclasscheck__(cls, subclass):
other_type = (subclass.variadic_type if isvariadic(subclass)
else (subclass,))
return subclass is cls or all(
issubclass(other, cls.variadic_type) for other in other_type # type: ignore[attr-defined]
)
def __eq__(cls, other):
"""
Return True if other has the same variadic type
Parameters
----------
other : object (type)
The object (type) to check
Returns
-------
bool
Whether or not `other` is equal to `self`
"""
return (isvariadic(other) and
set(cls.variadic_type) == set(other.variadic_type)) # type: ignore[attr-defined]
def __hash__(cls):
return hash((type(cls), frozenset(cls.variadic_type))) # type: ignore[attr-defined]
def isvariadic(obj):
"""Check whether the type `obj` is variadic.
Parameters
----------
obj : type
The type to check
Returns
-------
bool
Whether or not `obj` is variadic
Examples
--------
>>> isvariadic(int)
False
>>> isvariadic(Variadic[int])
True
"""
return isinstance(obj, VariadicSignatureType)
class VariadicSignatureMeta(type):
"""A metaclass that overrides ``__getitem__`` on the class. This is used to
generate a new type for Variadic signatures. See the Variadic class for
examples of how this behaves.
"""
def __getitem__(cls, variadic_type):
if not (isinstance(variadic_type, (type, tuple)) or type(variadic_type)):
raise ValueError("Variadic types must be type or tuple of types"
" (Variadic[int] or Variadic[(int, float)]")
if not isinstance(variadic_type, tuple):
variadic_type = variadic_type,
return VariadicSignatureType(
'Variadic[%s]' % typename(variadic_type),
(),
dict(variadic_type=variadic_type, __slots__=())
)
class Variadic(six.with_metaclass(VariadicSignatureMeta)):
"""A class whose getitem method can be used to generate a new type
representing a specific variadic signature.
Examples
--------
>>> Variadic[int] # any number of int arguments
>>> # xdoctest: +SKIP
<class 'multipledispatch.variadic.Variadic[int]'>
>>> Variadic[(int, str)] # any number of one of int or str arguments
<class 'multipledispatch.variadic.Variadic[(int, str)]'>
>>> issubclass(int, Variadic[int])
True
>>> issubclass(int, Variadic[(int, str)])
True
>>> issubclass(str, Variadic[(int, str)])
True
>>> issubclass(float, Variadic[(int, str)])
False
"""
| pytorch-master | torch/fx/experimental/unification/multipledispatch/variadic.py |
from .core import dispatch
from .dispatcher import (Dispatcher, halt_ordering, restart_ordering,
MDNotImplementedError)
| pytorch-master | torch/fx/experimental/unification/multipledispatch/__init__.py |
import inspect
import sys
from .dispatcher import Dispatcher, MethodDispatcher
global_namespace = dict() # type: ignore[var-annotated]
def dispatch(*types, **kwargs):
""" Dispatch function on the types of the inputs
Supports dispatch on all non-keyword arguments.
Collects implementations based on the function name. Ignores namespaces.
If ambiguous type signatures occur a warning is raised when the function is
defined suggesting the additional method to break the ambiguity.
Examples
--------
>>> @dispatch(int)
... def f(x):
... return x + 1
>>> @dispatch(float)
... def f(x):
... return x - 1
>>> f(3)
4
>>> f(3.0)
2.0
>>> # Specify an isolated namespace with the namespace keyword argument
>>> my_namespace = dict()
>>> @dispatch(int, namespace=my_namespace)
... def foo(x):
... return x + 1
>>> # Dispatch on instance methods within classes
>>> class MyClass(object):
... @dispatch(list)
... def __init__(self, data):
... self.data = data
... @dispatch(int)
... def __init__(self, datum):
... self.data = [datum]
>>> MyClass([1, 2, 3]).data
[1, 2, 3]
>>> MyClass(3).data
[3]
"""
namespace = kwargs.get('namespace', global_namespace)
types = tuple(types)
def _df(func):
name = func.__name__
if ismethod(func):
dispatcher = inspect.currentframe().f_back.f_locals.get( # type: ignore[union-attr]
name, # type: ignore[union-attr]
MethodDispatcher(name),
)
else:
if name not in namespace:
namespace[name] = Dispatcher(name)
dispatcher = namespace[name]
dispatcher.add(types, func)
return dispatcher
return _df
def ismethod(func):
""" Is func a method?
Note that this has to work as the method is defined but before the class is
defined. At this stage methods look like functions.
"""
if hasattr(inspect, "signature"):
signature = inspect.signature(func)
return signature.parameters.get('self', None) is not None
else:
if sys.version_info.major < 3:
spec = inspect.getargspec(func)
else:
spec = inspect.getfullargspec(func) # type: ignore[union-attr, assignment]
return spec and spec.args and spec.args[0] == 'self'
| pytorch-master | torch/fx/experimental/unification/multipledispatch/core.py |
from .utils import _toposort, groupby
from .variadic import isvariadic
class AmbiguityWarning(Warning):
pass
def supercedes(a, b):
""" A is consistent and strictly more specific than B """
if len(a) < len(b):
# only case is if a is empty and b is variadic
return not a and len(b) == 1 and isvariadic(b[-1])
elif len(a) == len(b):
return all(map(issubclass, a, b))
else:
# len(a) > len(b)
p1 = 0
p2 = 0
while p1 < len(a) and p2 < len(b):
cur_a = a[p1]
cur_b = b[p2]
if not (isvariadic(cur_a) or isvariadic(cur_b)):
if not issubclass(cur_a, cur_b):
return False
p1 += 1
p2 += 1
elif isvariadic(cur_a):
assert p1 == len(a) - 1
return p2 == len(b) - 1 and issubclass(cur_a, cur_b)
elif isvariadic(cur_b):
assert p2 == len(b) - 1
if not issubclass(cur_a, cur_b):
return False
p1 += 1
return p2 == len(b) - 1 and p1 == len(a)
def consistent(a, b):
""" It is possible for an argument list to satisfy both A and B """
# Need to check for empty args
if not a:
return not b or isvariadic(b[0])
if not b:
return not a or isvariadic(a[0])
# Non-empty args check for mutual subclasses
if len(a) == len(b):
return all(issubclass(aa, bb) or issubclass(bb, aa)
for aa, bb in zip(a, b))
else:
p1 = 0
p2 = 0
while p1 < len(a) and p2 < len(b):
cur_a = a[p1]
cur_b = b[p2]
if not issubclass(cur_b, cur_a) and not issubclass(cur_a, cur_b):
return False
if not (isvariadic(cur_a) or isvariadic(cur_b)):
p1 += 1
p2 += 1
elif isvariadic(cur_a):
p2 += 1
elif isvariadic(cur_b):
p1 += 1
# We only need to check for variadic ends
# Variadic types are guaranteed to be the last element
return (isvariadic(cur_a) and p2 == len(b) or
isvariadic(cur_b) and p1 == len(a))
def ambiguous(a, b):
""" A is consistent with B but neither is strictly more specific """
return consistent(a, b) and not (supercedes(a, b) or supercedes(b, a))
def ambiguities(signatures):
""" All signature pairs such that A is ambiguous with B """
signatures = list(map(tuple, signatures))
return set((a, b) for a in signatures for b in signatures
if hash(a) < hash(b)
and ambiguous(a, b)
and not any(supercedes(c, a) and supercedes(c, b)
for c in signatures))
def super_signature(signatures):
""" A signature that would break ambiguities """
n = len(signatures[0])
assert all(len(s) == n for s in signatures)
return [max([type.mro(sig[i]) for sig in signatures], key=len)[0]
for i in range(n)]
def edge(a, b, tie_breaker=hash):
""" A should be checked before B
Tie broken by tie_breaker, defaults to ``hash``
"""
# A either supercedes B and B does not supercede A or if B does then call
# tie_breaker
return supercedes(a, b) and (not supercedes(b, a) or tie_breaker(a) > tie_breaker(b))
def ordering(signatures):
""" A sane ordering of signatures to check, first to last
Topoological sort of edges as given by ``edge`` and ``supercedes``
"""
signatures = list(map(tuple, signatures))
edges = [(a, b) for a in signatures for b in signatures if edge(a, b)]
edges = groupby(lambda x: x[0], edges)
for s in signatures:
if s not in edges:
edges[s] = []
edges = dict((k, [b for a, b in v]) for k, v in edges.items()) # type: ignore[assignment, attr-defined]
return _toposort(edges)
| pytorch-master | torch/fx/experimental/unification/multipledispatch/conflict.py |
from collections import OrderedDict
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def expand_tuples(L):
"""
>>> expand_tuples([1, (2, 3)])
[(1, 2), (1, 3)]
>>> expand_tuples([1, 2])
[(1, 2)]
"""
if not L:
return [()]
elif not isinstance(L[0], tuple):
rest = expand_tuples(L[1:])
return [(L[0],) + t for t in rest]
else:
rest = expand_tuples(L[1:])
return [(item,) + t for t in rest for item in L[0]]
# Taken from theano/theano/gof/sched.py
# Avoids licensing issues because this was written by Matthew Rocklin
def _toposort(edges):
""" Topological sort algorithm by Kahn [1] - O(nodes + vertices)
inputs:
edges - a dict of the form {a: {b, c}} where b and c depend on a
outputs:
L - an ordered list of nodes that satisfy the dependencies of edges
>>> _toposort({1: (2, 3), 2: (3, )})
[1, 2, 3]
>>> # Closely follows the wikipedia page [2]
>>> # [1] Kahn, Arthur B. (1962), "Topological sorting of large networks",
>>> # Communications of the ACM
>>> # [2] http://en.wikipedia.org/wiki/Toposort#Algorithms
"""
incoming_edges = reverse_dict(edges)
incoming_edges = OrderedDict((k, set(val))
for k, val in incoming_edges.items())
S = OrderedDict.fromkeys(v for v in edges if v not in incoming_edges)
L = []
while S:
n, _ = S.popitem()
L.append(n)
for m in edges.get(n, ()):
assert n in incoming_edges[m]
incoming_edges[m].remove(n)
if not incoming_edges[m]:
S[m] = None
if any(incoming_edges.get(v, None) for v in edges):
raise ValueError("Input has cycles")
return L
def reverse_dict(d):
"""Reverses direction of dependence dict
>>> d = {'a': (1, 2), 'b': (2, 3), 'c':()}
>>> reverse_dict(d) # doctest: +SKIP
{1: ('a',), 2: ('a', 'b'), 3: ('b',)}
:note: dict order are not deterministic. As we iterate on the
input dict, it make the output of this function depend on the
dict order. So this function output order should be considered
as undeterministic.
"""
result = OrderedDict() # type: ignore[var-annotated]
for key in d:
for val in d[key]:
result[val] = result.get(val, tuple()) + (key, )
return result
# Taken from toolz
# Avoids licensing issues because this version was authored by Matthew Rocklin
def groupby(func, seq):
""" Group a collection by a key function
>>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']
>>> groupby(len, names) # doctest: +SKIP
{3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
>>> iseven = lambda x: x % 2 == 0
>>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
{False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
See Also:
``countby``
"""
d = OrderedDict() # type: ignore[var-annotated]
for item in seq:
key = func(item)
if key not in d:
d[key] = list()
d[key].append(item)
return d
def typename(type):
"""Get the name of `type`.
Parameters
----------
type : Union[Type, Tuple[Type]]
Returns
-------
str
The name of `type` or a tuple of the names of the types in `type`.
Examples
--------
>>> typename(int)
'int'
>>> typename((int, float))
'(int, float)'
"""
try:
return type.__name__
except AttributeError:
if len(type) == 1:
return typename(*type)
return '(%s)' % ', '.join(map(typename, type))
| pytorch-master | torch/fx/experimental/unification/multipledispatch/utils.py |
# -*- coding: utf-8 -*-
from torch.fx.experimental.migrate_gradual_types.operation import op_add, op_sub, op_mul, op_div, \
op_mod, op_gt, op_lt, op_neq, op_eq
from torch.fx.tensor_type import TensorType, Dyn
class Constraint:
pass
class Conj(Constraint):
def __init__(self, conjuncts):
"""
:param conjuncts: Conjuction of constraints
"""
self.conjucts = conjuncts
def __eq__(self, other):
if isinstance(other, Conj):
return self.conjucts == other.conjucts and self.conjucts == other.conjucts
else:
return False
def __repr__(self):
return f'And({self.conjucts})'
class Disj(Constraint):
def __init__(self, disjuncts):
"""
:param disjuncts: Disjunction of constraints
"""
self.disjuncts = disjuncts
def __eq__(self, other):
if isinstance(other, Disj):
return self.disjuncts == other.disjuncts and self.disjuncts == other.disjuncts
else:
return False
def __repr__(self):
return f'Or({self.disjuncts})'
class Prod(Constraint):
def __init__(self, products):
"""
:param products: lists of dimensions to multiply
"""
self.products = products
def __eq__(self, other):
if isinstance(other, Prod):
return self.products == other.products and self.products == other.products
else:
return False
def __repr__(self):
return f'Product({self.products})'
class T(Constraint):
"""
True
"""
def __init__(self):
pass
def __eq__(self, other):
return isinstance(other, T)
def __repr__(self):
return 'True'
class F(Constraint):
"""
False
"""
def __init__(self):
pass
def __eq__(self, other):
return isinstance(other, F)
def __repr__(self):
return 'False'
class BinaryConstraint(Constraint):
"""
Represents all binary operations
"""
def __init__(self, lhs, rhs, op):
"""
:param lhs: lhs of the constraint
:param rhs: rhs of the constraint
:param op: string reprsenting the operation
"""
self.lhs = lhs
self.rhs = rhs
self.op = op
def __eq__(self, other):
if isinstance(other, BinaryConstraint):
return self.lhs == other.lhs and self.rhs == other.rhs and self.op == other.op
else:
return False
def __repr__(self):
return f'({self.lhs} {self.op} {self.rhs})'
class BinConstraintT(BinaryConstraint):
"""
Binary constraints about tensors
"""
def __init__(self, lhs, rhs, op):
assert (isinstance(lhs, TVar) or isinstance(lhs, TensorType) or isinstance(lhs, int) or lhs == Dyn) and \
(isinstance(rhs, TVar) or isinstance(rhs, TensorType) or isinstance(rhs, int) or rhs == Dyn)
super().__init__(lhs, rhs, op)
def __eq__(self, other):
return super().__eq__(other)
class BinConstraintD(BinaryConstraint):
"""
Binary constraints about dimensions
"""
def __init__(self, lhs, rhs, op):
assert is_algebraic_expression(lhs) or is_dim(lhs) or is_bool_expr(lhs)
assert is_algebraic_expression(rhs) or is_dim(rhs) or is_bool_expr(rhs)
super().__init__(lhs, rhs, op)
def __eq__(self, other):
return super().__eq__(other)
class TGreatestUpperBound(Constraint):
"""
Greatest Upper bound for tensors with dynamic type
"""
def __init__(self, res, rhs1, rhs2):
"""
:param res: tensor variable that stores the result of the outout
:param rhs1: tensor or tensor variable
:param rhs2: tensor or tensor variabke
"""
self.res = res
self.rhs1 = rhs1
self.rhs2 = rhs2
def __repr__(self):
return f'{self.res} = {self.rhs1}⊔*{self.rhs2}'
def __eq__(self, other):
if isinstance(other, TGreatestUpperBound):
return self.res == other.res and self.rhs1 == other.rhs1 and self.rhs2 == other.rhs2
else:
return False
class DGreatestUpperBound(Constraint):
"""
Greatest Upper bound for dimensions
"""
def __init__(self, res, rhs1, rhs2):
"""
:param res: Dimension variable to store the result
:param rhs1: dimension variable 1
:param rhs2: dimension variable 2
"""
assert is_dim(res)
assert is_dim(rhs1)
assert is_dim(rhs2)
self.res = res
self.rhs1 = rhs1
self.rhs2 = rhs2
def __repr__(self):
return f'{self.res} = {self.rhs1}⊔{self.rhs2}'
def __eq__(self, other):
if isinstance(other, DGreatestUpperBound):
return self.res == other.res and self.rhs1 == other.rhs1 and self.rhs2 == other.rhs2
else:
return False
class CanReshape(Constraint):
"""
can_reshape constraint
"""
def __init__(self, src, target):
"""
:param src: tensor variable
:param target: tensor
"""
self.src = src
self.target = target
def __repr__(self):
return f'can-reshape({self.src}, {self.target})'
def __eq__(self, other):
if isinstance(other, CanReshape):
return self.src == other.src and self.target == other.target
else:
return False
class IndexSelect(Constraint):
def __init__(self, tensor_size, input_var, dim_replace, index, output):
"""
Args:
input_var: input to index_select
tensor_size: tensor size we are considering
dim_replace: the dimension of the output at "index"
index: location of the dimensions to replace in the input
outut: variable to store the result
"""
assert isinstance(input_var, TVar)
assert isinstance(output, TVar)
assert isinstance(dim_replace, DVar) or dim_replace == Dyn
assert isinstance(index, int)
self.input_var = input_var
self.tensor_size = tensor_size
self.dim_replace = dim_replace
self.index = index
self.output = output
def __repr__(self):
return f' {self.output} = ' \
f'IndexSelect({self.input_var}, ' \
f'tensor_size: {self.tensor_size}, ' \
f'{self.dim_replace}, ' \
f'{self.index})'
def __eq__(self, other):
if isinstance(other, IndexSelect):
return self.tensor_size == other.tensor_size and \
self.dim_replace == other.dim_replace and \
self.index == other.index and \
self.output == other.output and \
self.input_var == other.input_var
else:
return False
class Transpose(Constraint):
def __init__(self, tensor_size, input_var, index1, index2, output):
"""
Args:
tensor_size: current tensor size
input_var: variable to hold input
index1: dimension 1
index2: dimension 2
output: output that stores result
"""
assert isinstance(input_var, TVar)
assert isinstance(output, TVar)
assert isinstance(index1, int)
assert isinstance(index2, int)
self.input_var = input_var
self.tensor_size = tensor_size
self.index1 = index1
self.index2 = index2
self.output = output
def __repr__(self):
return f' {self.output} = ' \
f'Transpose({self.input_var}, ' \
f'tensor_size: {self.tensor_size}, ' \
f'{self.index1}, ' \
f'{self.index2})'
def __eq__(self, other):
if isinstance(other, Transpose):
return self.tensor_size == other.tensor_size and \
self.index1 == other.index1 and \
self.index2 == other.index2 and \
self.output == other.output and \
self.input_var == other.input_var
else:
return False
class GetItem(Constraint):
def __init__(self, tensor_size, index, res, input_var):
"""
Constraint for getting item given a tensor size
:param tensor_size: actual number
:param index: actual number representing the index
:param res: dimension variable to carry the item we get
:param input_var: a tensor variable from which we will get item
"""
assert isinstance(res, DVar)
self.res = res
self.tensor_size = tensor_size
self.index = index
self.input_var = input_var
def __repr__(self):
return f' {self.res} = GetItem({self.input_var}, tensor_size: {self.tensor_size}, {self.index})'
def __eq__(self, other):
if isinstance(other, GetItem):
return self.res == other.res and \
self.tensor_size == other.tensor_size and \
self.index == other.index and \
self.input_var == other.input_var
else:
return False
class GetItemTensor(Constraint):
def __init__(self, tensor_size, index_tuple, res, input_var):
"""
Constraint for getting item given a tensor size
However, when the argument is a tuple, we will
expect a tensor
:param tensor_size: actual number representing the rank
:param index_tuple: tuple for indexing
:param res: tensor variable to carry the item we get
:param input_var: a tensor variable from which we will get item
"""
assert isinstance(res, TVar)
self.res = res
self.tensor_size = tensor_size
self.index_tuple = index_tuple
self.input_var = input_var
def __repr__(self):
return f' {self.res} = GetItemT({self.input_var}, tensor_size: {self.tensor_size}, {self.index_tuple})'
def __eq__(self, other):
if isinstance(other, GetItemTensor):
return self.res == other.res and \
self.tensor_size == other.tensor_size and \
self.index_tuple == other.index_tuple and \
self.input_var == other.input_var
else:
return False
class CalcConv(Constraint):
def __init__(self, conv_result, input_var, c_out, kernel, padding, stride, dilation, matching_constraint_vars):
"""
:param conv_result: the convolution result
:param input_var: input to convolution
:param c_out: output chanel type
:param kernel: kernel tuple
"""
self.conv_result = conv_result
self.input_var = input_var
self.c_out = c_out
self.kernel = kernel
self.padding = padding
self.stride = stride
self.dilation = dilation
self.matching_constraint = matching_constraint_vars
def __repr__(self):
return f'{self.conv_result} =' \
f' calc-conv({self.input_var},' \
f' {self.c_out}, {self.kernel}, ' \
f'{self.padding}, {self.stride},' \
f' {self.dilation})'
def __eq__(self, other):
if isinstance(other, CalcConv):
return self.conv_result == other.conv_result and self.input_var == other.input_var and \
self.c_out == other.c_out and self.kernel == other.kernel and self.padding == other.padding \
and self.stride == other.stride and self.dilation == other.dilation \
and self.matching_constraint == other.matching_constraint
else:
return False
class CalcMaxPool(Constraint):
def __init__(self, maxpool_result, input_var, kernel, padding, stride, dilation, matching_constraint_vars):
"""
:param maxpool_result: the result of maxpool
:param input_var: input to convolution
:param kernel: kernel tuple
"""
self.maxpool_result = maxpool_result
self.input_var = input_var
self.kernel = kernel
self.padding = padding
self.stride = stride
self.dilation = dilation
self.matching_constraint = matching_constraint_vars
def __repr__(self):
return f'{self.maxpool_result} =' \
f' calc-maxpool({self.input_var},' \
f' {self.kernel}, ' \
f'{self.padding}, {self.stride},' \
f' {self.dilation})'
def __eq__(self, other):
if isinstance(other, CalcMaxPool):
return self.maxpool_result == other.maxpool_result and self.input_var == other.input_var \
and self.kernel == other.kernel and self.padding == other.padding \
and self.stride == other.stride and self.dilation == other.dilation \
and self.matching_constraint == other.matching_constraint
else:
return False
class ApplyBroadcasting(Constraint):
def __init__(self, res1, res2, input1, input2):
"""
:param res1: resulting tensor 1
:param res2: resulting tensor 2
:param input1: tensor variable 1
:param input2: tensor variable 2
"""
self.res1 = res1
self.res2 = res2
self.input1 = input1
self.input2 = input2
def __eq__(self, other):
if isinstance(other, ApplyBroadcasting):
return self.res1 == other.res1 \
and self.res2 == other.res2 \
and self.input1 == other.input1 \
and self.input2 == other.input2
else:
return False
def __repr__(self):
return f'{self.res1}, {self.res2} ='f' apply-broadcasting({self.input1},' f' {self.input2})'
class CalcProduct(Constraint):
"""
Given correct dimensions, calculate the product for flatten accounting for Dyn
"""
def __init__(self, start, end, flattened, dims_to_flatten):
"""
:param start: start index
:param end: end index
:param theta: variable to store the product
:param dims_to_flatten: the type which we will flatten
"""
assert isinstance(dims_to_flatten, list)
assert isinstance(flattened, TVar)
assert isinstance(start, int)
assert isinstance(end, int)
self.start = start
self.end = end
self.dims_to_flatten = dims_to_flatten
self.flattened = flattened
def __eq__(self, other):
if isinstance(other, CalcProduct):
return self.start == other.start and self.end == other.end and \
self.dims_to_flatten == other.dims_to_flatten and self.flattened == other.flattened
else:
return False
def __repr__(self):
return f'{self.flattened} = CalcProduct({self.start}, {self.end}, {self.dims_to_flatten})'
class TVar:
"""
Tensor variable with no tensor constructor
"""
def __init__(self, tvar):
"""
:param tvar: tensor variable
"""
self.tvar = tvar
def __repr__(self):
return f'TV({self.tvar})'
def __eq__(self, other):
if isinstance(other, TVar):
return self.tvar == other.tvar
else:
return False
class DVar:
"""
Dimension variable
"""
def __init__(self, c):
"""
:param c: character or number
"""
self.c = c
def __repr__(self):
return f'DV({self.c})'
def __eq__(self, other):
if isinstance(other, DVar):
return self.c == other.c
else:
return False
class BVar:
"""
Boolean variable
"""
def __init__(self, c):
"""
:param c: character or number
"""
self.c = c
def __repr__(self):
return f'BV({self.c})'
def __eq__(self, other):
if isinstance(other, BVar):
return self.c == other.c
else:
return False
def is_algebraic_expression(constraint):
if isinstance(constraint, BinConstraintD):
return constraint.op in [op_add, op_sub, op_div, op_mul, op_mod]
else:
return isinstance(constraint, Prod)
def is_bool_expr(constraint):
if isinstance(constraint, BinConstraintD):
return constraint.op in [op_gt, op_lt, op_neq, op_eq]
else:
return isinstance(constraint, BVar) or isinstance(constraint, Conj) or isinstance(constraint, Disj)
def is_dim(d):
return isinstance(d, DVar) or isinstance(d, int) or d == Dyn
| pytorch-master | torch/fx/experimental/migrate_gradual_types/constraint.py |
# mypy: ignore-errors
import copy
import itertools
from torch.fx.experimental.migrate_gradual_types.constraint_generator import BinConstraintT, MAX_TENSOR_RANK
from torch.fx.experimental.migrate_gradual_types.constraint import T, BinConstraintD, Conj, Constraint, DVar, TVar, \
Transpose
from torch.fx.experimental.migrate_gradual_types.constraint import Disj, TGreatestUpperBound
from torch.fx.experimental.migrate_gradual_types.constraint import DGreatestUpperBound
from torch.fx.experimental.migrate_gradual_types.constraint import CalcConv, CalcMaxPool
from torch.fx.experimental.migrate_gradual_types.constraint import CalcProduct, CanReshape
from torch.fx.experimental.migrate_gradual_types.constraint import ApplyBroadcasting, Prod, F, GetItem, GetItemTensor, IndexSelect
from torch.fx.experimental.migrate_gradual_types.operation import op_eq, op_precision, op_leq, op_matching
from torch.fx.experimental.migrate_gradual_types.operation import op_consistency, op_neq
from torch.fx.experimental.migrate_gradual_types.operation import op_mul, op_add, op_sub, op_div, op_mod
from torch.fx.experimental.migrate_gradual_types.util import gen_tensor_dims, gen_nat_constraints, gen_dvar
from torch.fx.tensor_type import TensorType, Dyn
from typing import Callable, Dict, List
_TRANSFORMATION_RULES: Dict[Constraint, Callable] = {}
def register_transformation_rule(call_target):
def register(fn):
if call_target in _TRANSFORMATION_RULES:
raise RuntimeError(f'Transformation rule already registered for {call_target}!')
_TRANSFORMATION_RULES[call_target] = fn
return fn
return register
def valid_index(index, dims):
"""
Given a list of dimensions, checks if an index is valid in the list
"""
try:
dims[index]
return T()
except IndexError:
return F()
@register_transformation_rule(Transpose)
def transform_transpose(constraint, counter):
"""
Similar to a sequence of two index-selects
"""
dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
is_valid_index1 = valid_index(constraint.index1, dims)
is_valid_index2 = valid_index(constraint.index2, dims)
new_dims = copy.deepcopy(dims)
nat_constraints = gen_nat_constraints(dims)
if is_valid_index1 == T() and is_valid_index2 == T():
new_dims[constraint.index1] = dims[constraint.index2]
new_dims[constraint.index2] = dims[constraint.index1]
transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
*nat_constraints,
is_valid_index1, is_valid_index2,
BinConstraintT(constraint.output, TensorType(new_dims), op_eq)])
return transformed_constraint, counter
@register_transformation_rule(IndexSelect)
def transform_index_select(constraint, counter):
"""
The constraints consider the given tensor size, checks if the index is valid
and if so, generates a constraint for replacing the input dimension
with the required dimension
"""
dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
is_valid_index = valid_index(constraint.index, dims)
nat_constraints = gen_nat_constraints(dims)
# if the index is valid then replace the input dimension with the new dimension
# otherwise the dimension will not be replaced and the clause will contain False
if is_valid_index == T():
new_dims = copy.deepcopy((dims))
new_dims[constraint.index] = constraint.dim_replace
transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
*nat_constraints,
is_valid_index,
BinConstraintT(constraint.output, TensorType(new_dims), op_eq)])
# print(constraints)
return transformed_constraint, counter
@register_transformation_rule(GetItem)
def transform_get_item(constraint, counter):
"""
generate an equality of the form:
t = [a1, ..., an]
then generate constraints that check if the given index is valid
given this particular tensor size.
If the index is valid, generate a constraint to get the item
Note that we already handled the Dyn input case in the previous
step.
Args:
constraint: GetItem which assumes we are getting an item from a tensor (not Dyn)
counter: variable tracking
Returns: simplified constraints for GetItem
"""
dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
nat_constraints = gen_nat_constraints(dims)
is_valid_index = valid_index(constraint.index, dims)
all_constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
*nat_constraints,
is_valid_index]
# if the index is valid, we generate a constraint for getting an item
# otherwise this clause will have been UNSAT due to the wrong index
if is_valid_index == T():
all_constraints.append(BinConstraintD(constraint.res, dims[constraint.index], op_eq))
return Conj(all_constraints), counter
def valid_index_tensor(index, dims):
"""
if the slice instances exceed the length of the dimensions
then this is a type error so we return False
"""
slice_count = 0
for s in index:
if isinstance(s, slice):
slice_count += 1
if slice_count > len(dims):
return F()
else:
return T()
@register_transformation_rule(GetItemTensor)
def transform_get_item_tensor(constraint, counter):
"""
When the index is a tuple, then the output will be a tensor
TODO: we have to check if this is the case for all HF models
The cases we are covrering here are a tuple with one of:
- slice with default argument
- None
None appends 1 to the input tensor dimensions
so each occurrence of 'None' increases the rank by 1
slice with default arguments does not change the rank
"""
assert isinstance(constraint.index_tuple, tuple)
# generate a result tensor of the expected size
dims, counter = gen_tensor_dims(constraint.tensor_size, counter)
nat_constraints = gen_nat_constraints(dims)
# generate a place-holder list of the right rank
# where "slice" does not contribute to the rank and "None" does
none_c = constraint.index_tuple.count(None)
resulting_tensor_dims = (none_c + len(dims)) * [None]
dim_index = 0
for i in range(len(constraint.index_tuple)):
# append 1 to the right location of the resulting tensor
if constraint.index_tuple[i] is None:
resulting_tensor_dims[i] = 1
elif constraint.index_tuple[i] == slice(None, None, None):
pass
else:
raise NotImplementedError('Method not yet implemented')
# append the remaining dimensions to the right location
dim_index = 0
for i in range(len(resulting_tensor_dims)):
if resulting_tensor_dims[i] is None:
resulting_tensor_dims[i] = dims[dim_index]
dim_index += 1
# check if the index is valid
is_valid_index = valid_index_tensor(constraint.index_tuple, dims)
# check if the resulting tensor is within bounds
if len(resulting_tensor_dims) > 4:
return F(), counter
else:
constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq),
BinConstraintT(constraint.res, TensorType(resulting_tensor_dims), op_eq),
*nat_constraints,
is_valid_index]
return Conj(constraints), counter
@register_transformation_rule(BinConstraintT)
def generate_binconstraint_t(constraint, counter):
"""
Transform binary constraints for tensors
"""
# precision constraints
if constraint.op == op_precision:
if constraint.lhs == Dyn:
return T(), counter
elif isinstance(constraint.lhs, TensorType):
is_fully_static = all([d != Dyn for d in constraint.lhs.__args__])
if is_fully_static:
return BinConstraintT(constraint.lhs, constraint.rhs, op_eq), counter
else:
new_dims = []
for _ in range(len(constraint.lhs.__args__)):
dim, counter = gen_dvar(counter)
new_dims.append(dim)
new_dim_constraints = [BinConstraintD(old_dim, new_dim, op_precision) for
new_dim, old_dim in zip(new_dims, constraint.lhs.__args__)] + \
[BinConstraintT(constraint.rhs, TensorType(new_dims), op_eq)] + \
[BinConstraintD(1, new_dim, op_leq) for
new_dim in new_dims]
return Conj(new_dim_constraints), counter
# matching
elif constraint.op == op_matching:
assert isinstance(constraint.rhs, TensorType)
d1 = constraint.rhs.__args__[0]
d2 = constraint.rhs.__args__[1]
d3 = constraint.rhs.__args__[2]
d4 = constraint.rhs.__args__[3]
conj = [BinConstraintT(constraint.lhs, Dyn, op_eq),
BinConstraintD(d1, Dyn, op_eq),
BinConstraintD(d2, Dyn, op_eq),
BinConstraintD(d3, Dyn, op_eq),
BinConstraintD(d4, Dyn, op_eq)]
return Disj([Conj(conj),
BinConstraintT(constraint.lhs, TensorType([d1, d2, d3, d4]), op_eq)]), counter
elif constraint.op == op_consistency:
c_dyn = Disj([BinConstraintT(constraint.lhs, Dyn, op_eq), BinConstraintT(constraint.rhs, Dyn, op_eq)])
[c_tensor_1, c_tensor_2, c_tensor_3, c_tensor_4], counter = gen_consistency_constraints(constraint, counter)
return Disj([c_dyn, c_tensor_1, c_tensor_2, c_tensor_3, c_tensor_4]), counter
elif constraint.op == op_leq:
assert isinstance(constraint.rhs, int)
disj = [BinConstraintT(constraint.lhs, Dyn, op_eq)]
for i in range(1, constraint.rhs + 1):
dims = []
for j in range(1, i + 1):
dim_var, counter = gen_dvar(counter)
dims.append(dim_var)
disj.append(BinConstraintT(constraint.lhs, TensorType(dims), op_eq))
return Disj(disj), counter
else:
return constraint, counter
@register_transformation_rule(BinConstraintD)
def generate_binconstraint_d(constraint, counter):
"""
Transform binary constraints for dimensions
"""
if constraint.op == op_precision:
if isinstance(constraint.lhs, int):
return BinConstraintD(constraint.lhs, constraint.rhs, op_eq), counter
elif constraint.lhs == Dyn:
return T(), counter
elif constraint.op == op_consistency:
return Disj([BinConstraintD(constraint.lhs, constraint.rhs, op_eq),
BinConstraintD(constraint.rhs, Dyn, op_eq), BinConstraintD(constraint.lhs, Dyn, op_eq)]), counter
else:
return constraint, counter
@register_transformation_rule(Conj)
def generate_conj(constraint, counter):
"""
Transform conjunctions
"""
new = []
for c in constraint.conjucts:
new_c, counter = transform_constraint(c, counter)
new.append(new_c)
return Conj(new), counter
@register_transformation_rule(Disj)
def generate_disj(constraint, counter):
"""
Transform disjunctions
"""
new = []
for c in constraint.disjuncts:
new_c, counter = transform_constraint(c, counter)
new.append(new_c)
return Disj(new), counter
@register_transformation_rule(TGreatestUpperBound)
def generate_gub(constraint, counter):
"""
Transform greatest upper bound for tensors. Results in equality and Greatest Upper Bound
on dimensions
"""
c1 = Conj([Disj([BinConstraintT(constraint.rhs1, Dyn, op_eq),
BinConstraintT(constraint.rhs2, Dyn, op_eq)]), BinConstraintT(constraint.res, Dyn, op_eq)])
[c2, c3, c4, c5], counter = gen_greatest_upper_bound(constraint, counter)
return Disj([c1, c2, c3, c4, c5]), counter
@register_transformation_rule(DGreatestUpperBound)
def generate_d_gub(constraint, counter):
"""
Transform greatest upper bound for dimensions into equality constraints
"""
c1 = Conj([BinConstraintD(constraint.rhs1, Dyn, op_eq), BinConstraintD(constraint.res, constraint.rhs2, op_eq)])
c2 = Conj([BinConstraintD(constraint.rhs2, Dyn, op_eq), BinConstraintD(constraint.res, constraint.rhs1, op_eq)])
c3 = Conj([BinConstraintD(constraint.rhs2, constraint.rhs1, op_eq), BinConstraintD(constraint.res, constraint.rhs1, op_eq)])
return Disj([c1, c2, c3]), counter
@register_transformation_rule(CalcConv)
def generate_calc_conv(constraint, counter):
d, counter = gen_tensor_dims(4, counter)
conv_result = TensorType([d[0], d[1], d[2], d[3]])
# the convolution result is a tensor of size 4
c1 = BinConstraintT(constraint.conv_result, conv_result, op_eq)
# the second dimension of the output is equal to the output channels
c2 = Conj([BinConstraintD(d[1], constraint.c_out, op_eq), BinConstraintD(d[1], Dyn, op_neq)])
# the input corresponds to the output in the first dimension of the convolution
c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq)
c4, c5 = calc_last_two_dims(constraint, d)
leq_constraints = Conj([BinConstraintD(0, d[0], op_leq),
BinConstraintD(0, d[1], op_leq),
BinConstraintD(0, d[2], op_leq),
BinConstraintD(0, d[3], op_leq)])
return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter
@register_transformation_rule(CalcMaxPool)
def generate_calc_maxpool(constraint, counter):
"""
Transform maxpool constraints
"""
d, counter = gen_tensor_dims(4, counter)
maxpool_result = TensorType([d[0], d[1], d[2], d[3]])
# the maxpool result is a tensor of size 4
c1 = BinConstraintT(constraint.maxpool_result, maxpool_result, op_eq)
# the input corresponds to the output in the first and second dimension of maxpool
c2 = BinConstraintD(constraint.matching_constraint[1], d[1], op_eq)
c3 = BinConstraintD(constraint.matching_constraint[0], d[0], op_eq)
c4, c5 = calc_last_two_dims(constraint, d)
leq_constraints = Conj([BinConstraintD(0, d[0], op_leq),
BinConstraintD(0, d[1], op_leq),
BinConstraintD(0, d[2], op_leq),
BinConstraintD(0, d[3], op_leq)])
return Conj([c1, c2, c3, c4, c5, leq_constraints]), counter
@register_transformation_rule(CalcProduct)
def generate_calc_product(constraint, counter):
"""
Transform flatten constraints
"""
start = constraint.start
end = constraint.end
dims = constraint.dims_to_flatten
flattened = constraint.flattened
n = len(constraint.dims_to_flatten)
# this will be evaluated right here
boundary_check = (0 <= start and start < end and end <= n)
c_boundary = T() if boundary_check else F()
lhs = dims[0:start]
rhs = dims[end:]
mid = dims[start:end]
all_possibilities = generate_all_int_dyn_dim_possibilities(mid)
all_constraints = []
for p in all_possibilities:
p = list(p)
# this tells us there is a dynamic variable
contains_dyn = not(all([constraint.op == op_neq for constraint in p]))
if contains_dyn:
mid_var = [Dyn]
total_constraints = lhs + mid_var + rhs
if len(total_constraints) > 4:
all_constraints.append(F())
else:
all_constraints.append(Conj([BinConstraintT(flattened, TensorType(lhs + mid_var + rhs), op_eq)] + p))
else:
new_var, counter = gen_dvar(counter)
mid_eq_prod = Conj([BinConstraintD(new_var, Prod(mid), op_eq), BinConstraintD(new_var, Dyn, op_neq)])
mid_var = [new_var]
total_constraints = lhs + mid_var + rhs
if len(total_constraints) > 4:
all_constraints.append(F())
else:
all_constraints.append(Conj([BinConstraintT(flattened, TensorType(lhs + mid_var + rhs), op_eq), mid_eq_prod] + p))
return Conj([Disj(all_constraints), c_boundary]), counter
@register_transformation_rule(CanReshape)
def generate_reshape(constraint, counter):
"""
Transform reshape constraints
"""
d, counter = gen_tensor_dims(4, counter)
d1 = d[0]
d2 = d[1]
d3 = d[2]
d4 = d[3]
target = constraint.target.__args__
is_fully_static = all([d != Dyn for d in target])
# dynamic tensor
c1_dyn = BinConstraintT(constraint.src, Dyn, op_eq)
c2_tensor1 = BinConstraintT(constraint.src, TensorType([d1]), op_eq)
c2_tensor2 = BinConstraintT(constraint.src, TensorType([d1, d2]), op_eq)
c2_tensor3 = BinConstraintT(constraint.src, TensorType([d1, d2, d3]), op_eq)
c2_tensor4 = BinConstraintT(constraint.src, TensorType([d1, d2, d3, d4]), op_eq)
d1_eq_dyn = BinConstraintD(d1, Dyn, op_eq)
d1_neq_dyn = BinConstraintD(d1, Dyn, op_neq)
d2_eq_dyn = BinConstraintD(d2, Dyn, op_eq)
d2_neq_dyn = BinConstraintD(d2, Dyn, op_neq)
d3_eq_dyn = BinConstraintD(d3, Dyn, op_eq)
d3_neq_dyn = BinConstraintD(d3, Dyn, op_neq)
d4_eq_dyn = BinConstraintD(d3, Dyn, op_eq)
d4_neq_dyn = BinConstraintD(d3, Dyn, op_neq)
nat_d1 = BinConstraintD(0, d1, op_leq)
nat_d2 = BinConstraintD(0, d2, op_leq)
nat_d3 = BinConstraintD(0, d3, op_leq)
nat_d4 = BinConstraintD(0, d4, op_leq)
if is_fully_static:
# size 1 tensor
c3_tensor1 = Disj([d1_eq_dyn,
(Conj([d1_neq_dyn,
BinConstraintD(d1, Prod(target), op_eq)]))])
all_tensor_1 = Conj([c2_tensor1, c3_tensor1])
# size 2 tensor
all_tensor_2 = Conj([c2_tensor2, gen_all_reshape_possibilities([d1, d2], target)])
# size 3 tensor
all_tensor_3 = Conj([c2_tensor3, gen_all_reshape_possibilities([d1, d2, d3], target)])
# size 4 tensor
all_tensor_4 = Conj([c2_tensor4, gen_all_reshape_possibilities([d1, d2, d3, d4], target)])
return Conj([Disj([c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4]),
nat_d1, nat_d2, nat_d3, nat_d4]), counter
# then there must be exactly one occurrence of dyn
else:
new_target = []
for n in target:
if n != Dyn:
new_target.append(n)
# tensor 1
c3_tensor1 = Disj([d1_eq_dyn,
(Conj([d1_neq_dyn,
is_dim_div_by_target(new_target, d1)]))])
all_tensor_1 = Conj([c2_tensor1, c3_tensor1])
# tensor 2
c21 = Disj([d1_eq_dyn, d2_eq_dyn])
c22 = Conj([d1_neq_dyn, d2_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2]))])
all_tensor_2 = Conj([c2_tensor2, Disj([c21, c22])])
# tensor 3
c31 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn])
c32 = Conj([d1_neq_dyn, d2_neq_dyn, d3_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2, d3]))])
all_tensor_3 = Conj([c2_tensor3, Disj([c31, c32])])
# tensor 4
c41 = Disj([d1_eq_dyn, d2_eq_dyn, d3_eq_dyn, d4_eq_dyn])
c42 = Conj([d1_neq_dyn, d2_neq_dyn, d3_neq_dyn, d4_neq_dyn, is_dim_div_by_target(new_target, Prod([d1, d2, d3, d4]))])
all_tensor_4 = Conj([c2_tensor4, Disj([c41, c42])])
return Conj([Disj([c1_dyn, all_tensor_1, all_tensor_2, all_tensor_3, all_tensor_4]),
nat_d1, nat_d2, nat_d3, nat_d4]), counter
@register_transformation_rule(ApplyBroadcasting)
def generate_broadcasting(constraint, counter):
"""
Transform broadcasting constraints
"""
e11, e12 = constraint.res1, constraint.res2
e1, e2 = constraint.input1, constraint.input2
e1_dyn = BinConstraintT(e1, Dyn, op_eq)
e2_dyn = BinConstraintT(e2, Dyn, op_eq)
# Introduce dimensions
e1_equal_e11 = BinConstraintT(e1, e11, op_eq)
e2_equal_e12 = BinConstraintT(e2, e12, op_eq)
# dyn possibility
e1_dyn_constraint = Conj([e1_dyn, e1_equal_e11, e2_equal_e12])
e2_dyn_constraint = Conj([e2_dyn, e1_equal_e11, e2_equal_e12])
# tensor possibility
# generate dimensions to create tensors of size 1
final_tensor_1_constraint, _, _, nat_dims_1, counter = \
gen_broadcasting_constraints(e1, e2, e11, e12, 1, counter)
# generate dimensions to create tensors of size 2
final_tensor_2_constraint_no_padding, final_tensor_2_constraint_padding_arg1, \
final_tensor_2_constraint_padding_arg2, nat_dims_2, counter = \
gen_broadcasting_constraints(e1, e2, e11, e12, 2, counter)
# generate dimensions to create tensors of size 3
final_tensor_3_constraint_no_padding, final_tensor_3_constraint_padding_arg1, \
final_tensor_3_constraint_padding_arg2, nat_dims_3, counter = \
gen_broadcasting_constraints(e1, e2, e11, e12, 3, counter)
# generate dimensions to create tensors of size 4
final_tensor_4_constraint_no_padding, final_tensor_4_constraint_padding_arg1, \
final_tensor_4_constraint_padding_arg2, nat_dims_4, counter = \
gen_broadcasting_constraints(e1, e2, e11, e12, 4, counter)
final_result = Disj([
e1_dyn_constraint,
e2_dyn_constraint,
final_tensor_1_constraint,
final_tensor_2_constraint_no_padding,
final_tensor_2_constraint_padding_arg1,
final_tensor_2_constraint_padding_arg2,
final_tensor_3_constraint_no_padding,
final_tensor_3_constraint_padding_arg1,
final_tensor_3_constraint_padding_arg2,
final_tensor_4_constraint_no_padding,
final_tensor_4_constraint_padding_arg1,
final_tensor_4_constraint_padding_arg2
])
return Conj([final_result, *nat_dims_1, *nat_dims_2, *nat_dims_3, *nat_dims_4]), counter
def transform_constraint(constraint: Constraint, counter: int):
"""
Transforms a constraint into a simpler constraint.
Ex: precision and consistency are transformed to equality
Args:
constraint: constraint to be transformed
counter: for variable tracking
Returns: Constraint
"""
if type(constraint) in _TRANSFORMATION_RULES:
return _TRANSFORMATION_RULES[type(constraint)](constraint, counter)
else:
return constraint, counter
def calc_last_two_dims(constraint, d: List[DVar]):
"""
Generates constraints for the last two dimensions of a convolution or a maxpool output
Args:
constraint: CalcConv or CalcMaxPool
d: The list of output dimensions
Returns: Constraints for calculating the last two dimensions of the output
"""
assert isinstance(constraint, CalcConv) or isinstance(constraint, CalcMaxPool)
b3 = constraint.matching_constraint[2]
b4 = constraint.matching_constraint[3]
b3_dyn = Conj([BinConstraintD(d[2], Dyn, op_eq), BinConstraintD(b3, Dyn, op_eq)])
b4_dyn = Conj([BinConstraintD(d[3], Dyn, op_eq), BinConstraintD(b4, Dyn, op_eq)])
d3_not_dyn = Conj([BinConstraintD(d[2], Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq)])
d4_not_dyn = Conj([BinConstraintD(d[3], Dyn, op_neq), BinConstraintD(b4, Dyn, op_neq)])
# transform parameters into tuples incase they are not already
padding = (constraint.padding, constraint.padding) \
if isinstance(constraint.padding, int) else constraint.padding
kernel = (constraint.kernel, constraint.kernel) \
if isinstance(constraint.kernel, int) else constraint.kernel
stride = (constraint.stride, constraint.stride) \
if isinstance(constraint.stride, int) else constraint.stride
dilation = (constraint.dilation, constraint.dilation) \
if isinstance(constraint.dilation, int) else constraint.dilation
f1 = BinConstraintD(b3, BinConstraintD(2, padding[0], op_mul), op_add)
f2 = BinConstraintD(dilation[0], BinConstraintD(kernel[0], 1, op_sub), op_mul)
f3 = BinConstraintD(BinConstraintD(BinConstraintD(f1, f2, op_sub), 1, op_sub), stride[0], op_div)
f4 = BinConstraintD(f3, 1, op_add)
c4 = Disj([b3_dyn, Conj([d3_not_dyn, BinConstraintD(d[2], f4, op_eq)])])
f11 = BinConstraintD(b4, BinConstraintD(2, padding[1], op_mul), op_add)
f22 = BinConstraintD(dilation[1], BinConstraintD(kernel[1], 1, op_sub), op_mul)
f33 = BinConstraintD(BinConstraintD(BinConstraintD(f11, f22, op_sub), 1, op_sub), stride[1], op_div)
f44 = BinConstraintD(f33, 1, op_add)
c5 = Disj([b4_dyn, Conj([d4_not_dyn, BinConstraintD(d[3], f44, op_eq)])])
return c4, c5
def generate_all_int_dyn_dim_possibilities(my_list: List[DVar]):
"""
Generate all possibilities of being equal or not equal to dyn for my_list
Args:
my_list: List of tensor dimensions
Returns: A list of a list of constraints. Each list of constraints corresponds to
one possibility about the values of the dimension variables
"""
# generate all possibilities of being equal or not equal to dyn for my_list
eq_possibilities = [BinConstraintD(my_list[i], Dyn, op_eq) for i in range(len(my_list))]
neq_possibilities = [BinConstraintD(my_list[i], Dyn, op_neq) for i in range(len(my_list))]
d_possibilities = []
for i in zip(eq_possibilities, neq_possibilities):
d_possibilities.append(list(i))
all_possibilities = list(itertools.product(*d_possibilities))
return all_possibilities
def is_target_div_by_dim(target: List[int], dim: List[DVar]):
"""
Generate constraints to check if the target dimensions are divisible by the input dimensions
Args:
target: Target dimensions
dim: Input dimensions
Returns: Constraints to check divisibility
"""
return BinConstraintD(BinConstraintD(Prod(target), dim, op_mod), 0, op_eq)
def is_dim_div_by_target(target: List[int], dim: List[DVar]):
"""
Generate constraints to check if the input dimensions is divisible by the target dimensions
Args:
target: Target dimensions
dim: Input dimensions
Returns: Constraints to check divisibility
"""
return BinConstraintD(BinConstraintD(dim, Prod(target), op_mod), 0, op_eq)
def gen_all_reshape_possibilities(list_of_dims, target):
"""
Consider all possibilities what the input dimensions could be (number or dynamic)
Then generate the appropriate constraints using multiplication or mod depending on the possibility
The possibilities we consider here are the cross product of being equal to dyn or not equal to dyn
for the input. Target is fixed because at most one dimension could be dyn.
We have different cases for this.
Args:
list_of_dims: The input list of dimensions
target: The tensor we want to reshape to
Returns: A disjuncition of transformed reshape constraints
"""
all_possibilities = generate_all_int_dyn_dim_possibilities(list_of_dims)
all_constraints = []
for p in all_possibilities:
to_multiply = []
p = list(p)
for constraint in p:
assert isinstance(constraint, BinConstraintD)
if constraint.op == op_neq:
to_multiply.append(constraint.lhs)
if not to_multiply:
all_constraints.append(Conj(p))
elif len(to_multiply) < len(list_of_dims):
all_constraints.append(Conj(p + [is_target_div_by_dim(target, Prod(to_multiply))]))
else:
all_constraints.append(Conj(p + [BinConstraintD(Prod(list_of_dims),
Prod(target), op_eq)]))
return Disj(all_constraints)
def broadcast_dim(tensor_input1, tensor_input2, res1, res2, index, padding=False):
"""
Apply broadcasting to the 'index' dimension of tensor_input1.
Args:
tensor_input1: should represent [d1, ..., d_index, ...] where d_index = 1
tensor_input2: represents the second input
res1: broadcasted result 1
res2: broadcasted result 2
index: the index to broadcast
padding: If padding was used, then tensor_input1[index] does not exist
Returns:
"""
if tensor_input1[index] is None:
assert padding
if not padding:
# then the inputs are the same length so they all have dimensions at "index"
return Conj([BinConstraintD(tensor_input1[index], 1, op_eq),
BinConstraintD(res1[index], res2[index], op_eq),
BinConstraintD(res2[index], tensor_input2[index], op_eq)])
else:
# we don't set the input dimension to 1, since it doesn't exist.
return Conj([BinConstraintD(res1[index], res2[index], op_eq),
BinConstraintD(res2[index], tensor_input2[index], op_eq)])
def apply_padding(e1_var: TVar,
e11: BinConstraintT,
e2: BinConstraintT,
e12: BinConstraintT,
d2: List[DVar],
d11: List[DVar],
d12: List[DVar],
counter: int):
"""
We are considering the possibility where one input has less dimensions than
another input, so we apply padding to the broadcasted results
Args:
e1_var: Variable representing the first input where padding will be
e11: constraint of the form e11 = Tensortype[d1, ..., dn]
e2: constraint of the form e2 = Tensortype[d1, ..., dn]
e12: constraint of the form e11 = Tensortype[d1, ..., dn]
d2: Tensor variables for the second input
d11: Tensor variables for the broadcasted first input
d12: Tensor variables for the broadcasted second input
counter: variable tracking
Returns: A new constraint whose goal is to apply padding to the broadcasted result
"""
res = []
# pad the shorter input with None so we can pass it to the broadcasting helper function
for i in range(1, len(d2)):
d1, counter = gen_tensor_dims(i, counter)
nat_constraints = gen_nat_constraints(d1 + d2 + d11 + d12)
e1 = BinConstraintT(e1_var, TensorType(d1), op_eq)
simulate_padding = [None] * (len(d2) - i)
assert len(simulate_padding + d1) == len(d2)
broadcast_padding = []
# for every padding size, we also consider broadcasting
for j in range((len(d2) - i)):
broadcast_padding.append(broadcast_dim(simulate_padding, d2, d11, d12, j, True))
# we consider the possibilities for broadcasting for every dimension. Since we already
# padded d1, we do not consider it while broadcasting
all_broadcasting_possibilities = generate_all_broadcasting_possibilities_no_padding(d1,
d2[(len(d2) - i):],
d11[(len(d2) - i):],
d12[(len(d2) - i):])
# combine all constraints into a conjunction
c = Conj([e1, e11, e2, e12,
*broadcast_padding,
all_broadcasting_possibilities,
*nat_constraints
])
res.append(c)
return Disj(res), counter
def no_broadcast_dim_with_index(d1: List[DVar],
d2: List[DVar],
d3: List[DVar],
d4: List[DVar],
i: int):
"""
Args:
d1: inpput 1
d2: inpput 2
d3: simulated broadcasting for input 1
d4: simulated broadcasting for input 2
i: the rank of the resulting tensor addition
Returns: Constraints for when no broadcasting occurs
"""
return Conj([
Disj([
Conj([BinConstraintD(d1[i], 1, op_eq),
BinConstraintD(d2[i], 1, op_eq)]),
Conj([BinConstraintD(d1[i], 1, op_neq),
BinConstraintD(d2[i], 1, op_neq)])]),
BinConstraintD(d1[i], d3[i], op_eq),
BinConstraintD(d2[i], d4[i], op_eq)])
def gen_lists_of_dims(num_tensors: int, dim_size: int, counter: int):
"""
Generate lists of DVar to represent tensor dimensions
Args:
num_tensors: the required number of tensors
dim_size: the number of dimensions for each tensor
counter: variable tracking
Returns: A list of a list of tensor dimensions
"""
res = []
for _ in range(num_tensors):
dims, counter = gen_tensor_dims(dim_size, counter)
res.append(dims)
return res, counter
def create_equality_constraints_for_broadcasting(e1: TVar,
e2: TVar,
e11: TVar,
e12: TVar,
d1: List[DVar],
d2: List[DVar],
d11: List[DVar],
d12: List[DVar]):
"""
Create equality constraints for when no broadcasting occurs
Args:
e1: Input 1
e2: Input 2
e11: Broadcasted input 1
e12: Broadcasted input 2
d1: Variables that store dimensions for e1
d2: Variables that store dimensions for e2
d11: Variables that store dimensions for e11
d12: Variables that store dimensions for e22
Returns: Four equality constraints
"""
e1_tensor = BinConstraintT(e1, TensorType(d1), op_eq)
e11_tensor = BinConstraintT(e11, TensorType(d11), op_eq)
e2_tensor = BinConstraintT(e2, TensorType(d2), op_eq)
e12_tensor = BinConstraintT(e12, TensorType(d12), op_eq)
return [e1_tensor, e11_tensor, e2_tensor, e12_tensor]
def gen_consistency_constraints(constraint: Constraint, counter: int):
"""
Args:
constraint: Consistency constraint on tensors
counter: for variable tracking
Returns: Equality and consistency constraints on dimensions
"""
all_constraints = []
for i in range(1, MAX_TENSOR_RANK + 1):
new_dims_rhs_1, counter = gen_tensor_dims(i, counter)
new_dims_rhs_2, counter = gen_tensor_dims(i, counter)
nat_constraints = gen_nat_constraints(new_dims_rhs_1 + new_dims_rhs_2)
c_tensor_i = Conj([BinConstraintT(constraint.lhs, TensorType(new_dims_rhs_1), op_eq),
BinConstraintT(constraint.rhs, TensorType(new_dims_rhs_2), op_eq)] +
[BinConstraintD(d1, d2, op_consistency) for
d1, d2 in zip(new_dims_rhs_1, new_dims_rhs_2)] + nat_constraints)
all_constraints.append(c_tensor_i)
return all_constraints, counter
def gen_greatest_upper_bound(constraint: TGreatestUpperBound, counter: int):
"""
Args:
constraint: Greatest upper bound on tensors
counter: variable tracking
Returns: A set of equality constraints and DGreatestUpperBound constraints
"""
all_constraints = []
for i in range(1, MAX_TENSOR_RANK + 1):
c = []
dims1, counter = gen_tensor_dims(i, counter)
c1tensor = TensorType(dims1)
dims2, counter = gen_tensor_dims(i, counter)
c2tensor = TensorType(dims2)
dims3, counter = gen_tensor_dims(i, counter)
c3tensor = TensorType(dims3)
c += [BinConstraintT(constraint.rhs1, c1tensor, op_eq),
BinConstraintT(constraint.rhs2, c2tensor, op_eq),
BinConstraintT(constraint.res, c3tensor, op_eq)] + \
gen_nat_constraints(dims1 + dims2 + dims3)
assert len(c3tensor.__args__) == len(c1tensor.__args__) == len(c2tensor.__args__)
for i in range(len(c3tensor.__args__)):
c.append(DGreatestUpperBound(c3tensor.__args__[i],
c1tensor.__args__[i],
c2tensor.__args__[i]))
all_constraints.append(Conj(c))
return all_constraints, counter
def generate_all_broadcasting_possibilities_no_padding(d1: List[DVar], d2: List[DVar], d11: List[DVar], d12: List[DVar]):
"""
Generate broadcasting constraints assuming no padding. Broadcasting can happen at any dimension.
We look at all combinations for all dimendions in d1 and d2
Args:
d1: input1 dimensions
d2: input2 dimensions
d11: broadcasted input1 dimensions
d12: broadcasted input2 dimensions
Returns: broadcasting constraints relating the input dimensions to the broadcasted dimensions
"""
size = len(d1)
res2 = []
for i in range(size):
t1 = broadcast_dim(d1, d2, d11, d12, i)
t2 = broadcast_dim(d2, d1, d12, d11, i)
t3 = no_broadcast_dim_with_index(d1, d2, d11, d12, i)
res2.append(Disj([t1, t2, t3]))
return Conj(res2)
def gen_broadcasting_constraints(e1: TVar, e2: TVar, e11: TVar, e12: TVar, i: int, counter: int):
"""
Simulates broadcasting on e1 and e2 and returns the results
respectively in e11 and e12. Because of gradual types,
e1 and e2 may not be equal. Similarly, e11 and e12 may not
be equal. e11 and e12 should be guaranteed to be consistent
as they represent the shapes of the tensors to be added after
broadcasting.
Args:
e1: TVar representing the type of input 1
e2: TVar representing the type of input 2
e11: TVar representing the representing broadcasted input 1
e12: TVar representing the representing broadcasted input 2
i: The rank of the resulting type of addition
counter: for variable tracking
Returns: Simplified broadcasting constraints
"""
dims, counter = gen_lists_of_dims(4, i, counter)
[d1, d2, d3, d4] = dims
nat_dims_i = gen_nat_constraints(list(itertools.chain(*dims)))
initialize_tensors_constraints = create_equality_constraints_for_broadcasting(e1, e2, e11, e12,
d1, d2, d3, d4)
[e1_tensor, e11_tensor, e2_tensor, e12_tensor] = initialize_tensors_constraints
# without padding, broadcast all possibilities for tensors of size i
final_tensor_constraint_no_padding = Conj([*initialize_tensors_constraints,
generate_all_broadcasting_possibilities_no_padding(d1, d2, d3, d4)])
# with padding, broadcast all possibilities for tensors of size i
final_tensor_constraint_padding_arg1, counter = \
apply_padding(e1, e11_tensor, e2_tensor, e12_tensor, d2, d3, d4, counter)
final_tensor_constraint_padding_arg2, counter = \
apply_padding(e2, e12_tensor, e1_tensor, e11_tensor, d1, d4, d3, counter)
return final_tensor_constraint_no_padding, \
final_tensor_constraint_padding_arg1, \
final_tensor_constraint_padding_arg2, nat_dims_i, counter
| pytorch-master | torch/fx/experimental/migrate_gradual_types/constraint_transformation.py |
from torch.fx.experimental.migrate_gradual_types.constraint import TVar, DVar, BinConstraintD, \
BVar
from torch.fx.experimental.migrate_gradual_types.operation import op_leq
def gen_tvar(curr):
"""
Generate a tensor variable
:param curr: The current counter
:return: a tensor variable and the updated counter
"""
curr += 1
return TVar(curr), curr
def gen_dvar(curr):
"""
Generate a dimension variable
:param curr: the current counter
:return: a dimension variable and an updated counter
"""
curr += 1
return DVar(curr), curr
def gen_bvar(curr):
"""
Generate a boolean variable
:param curr: the current counter
:return: a boolean variable and an updated counter
"""
curr += 1
return BVar(curr), curr
def gen_tensor_dims(n, curr):
"""
Generate a list of tensor dimensions
:param n: the number of dimensions
:param curr: the current counter
:return: a list of dimension variables and an updated counter
"""
dims = []
for _ in range(n):
dvar, curr = gen_dvar(curr)
dims.append(dvar)
return dims, curr
def gen_nat_constraints(list_of_dims):
"""
Generate natural number constraints for dimensions
"""
return [BinConstraintD(0, d, op_leq) for d in list_of_dims]
| pytorch-master | torch/fx/experimental/migrate_gradual_types/util.py |
pytorch-master | torch/fx/experimental/migrate_gradual_types/__init__.py |
|
# -*- coding: utf-8 -*-
op_add = '+'
op_sub = '-'
op_mul = '*'
op_div = '/'
op_eq = '='
op_neq = '!='
op_imp = '=>'
op_matching = '⊳'
op_consistency = '~'
op_precision = '⊑'
op_leq = '≤'
op_lt = '<'
op_gt = '>'
op_mod = '%'
| pytorch-master | torch/fx/experimental/migrate_gradual_types/operation.py |
from torch.fx.experimental.migrate_gradual_types.constraint import Conj, Disj, T, F, BinConstraintT, BVar, is_bool_expr
from torch.fx.experimental.migrate_gradual_types.constraint import BinConstraintD, TVar, DVar
from torch.fx.experimental.migrate_gradual_types.constraint import Prod, is_algebraic_expression, is_dim
from torch.fx.experimental.migrate_gradual_types.constraint_generator import ConstraintGenerator
from torch.fx.experimental.migrate_gradual_types.constraint_transformation import transform_constraint
from torch.fx.experimental.migrate_gradual_types.operation import op_add, op_eq, op_neq, op_gt, op_lt
from torch.fx.experimental.migrate_gradual_types.operation import op_leq, op_sub, op_div, op_mul, op_mod
from torch.fx.tensor_type import TensorType, Dyn
try:
import z3 # type: ignore[import]
from torch.fx.experimental.migrate_gradual_types.z3_types import tensor_type, z3_dyn, D
HAS_Z3 = True
def transform_to_z3(constraint, counter, dimension_dict):
if isinstance(constraint, Conj):
conjuncts = []
for c in constraint.conjucts:
new_c, counter = transform_to_z3(c, counter, dimension_dict)
conjuncts.append(new_c)
return z3.And(conjuncts), counter
elif isinstance(constraint, Disj):
disjuncts = []
for c in constraint.disjuncts:
new_c, counter = transform_to_z3(c, counter, dimension_dict)
disjuncts.append(new_c)
return z3.Or(disjuncts), counter
elif isinstance(constraint, T):
return True, counter
elif isinstance(constraint, F):
return False, counter
elif isinstance(constraint, BinConstraintT):
if constraint.op == op_eq:
lhs, counter = transform_var(constraint.lhs, counter, dimension_dict)
rhs, counter = transform_var(constraint.rhs, counter, dimension_dict)
return (lhs == rhs), counter
else:
raise NotImplementedError('Method not yet implemented')
elif isinstance(constraint, BinConstraintD):
if constraint.op == op_eq:
if isinstance(constraint.lhs, BVar) and is_bool_expr(constraint.rhs):
transformed_rhs, counter = transform_to_z3(constraint.rhs, counter, dimension_dict)
transformed_lhs = z3.Bool(constraint.lhs.c)
return transformed_lhs == transformed_rhs, counter
elif is_dim(constraint.lhs) and is_dim(constraint.rhs):
# with dimension tranformations we consider the encoding
lhs, counter = transform_dimension(constraint.lhs, counter, dimension_dict)
rhs, counter = transform_dimension(constraint.rhs, counter, dimension_dict)
return lhs == rhs, counter
else:
# then we have an algebraic expression which means that we disregard the
# first element of the encoding
lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
return lhs == rhs, counter
# The assumption here is that the LHS and RHS must be dimensions
elif constraint.op == op_neq:
assert is_dim(constraint.lhs)
assert is_dim(constraint.rhs)
lhs, counter = transform_dimension(constraint.lhs, counter, dimension_dict)
rhs, counter = transform_dimension(constraint.rhs, counter, dimension_dict)
if constraint.rhs == Dyn or constraint.lhs == Dyn:
if constraint.rhs == Dyn:
return lhs.arg(0) == 1, counter
elif constraint.lhs == Dyn:
return rhs.arg(0) == 1, counter
# if one of the instances is a number
elif isinstance(constraint.lhs, int) or isinstance(constraint.rhs, int):
if isinstance(constraint.lhs, int):
return z3.Or([rhs.arg(0) == 0, z3.And([rhs.arg(0) == 1, lhs.arg(1) != rhs.arg(1)])]), counter
elif isinstance(constraint.rhs, int):
return z3.Or([lhs.arg(0) == 0, z3.And([lhs.arg(0) == 1, lhs.arg(1) != rhs.arg(1)])]), counter
else:
return z3.Or([z3.And([lhs.arg(0) == 0, rhs.arg(0) != 0]),
z3.And([lhs.arg(0) != 0, rhs.arg(0) == 0]),
z3.And([lhs.arg(0) != 0, rhs.arg(0) != 0, lhs.arg(1) != rhs.arg(1)])]), counter
elif constraint.op == op_leq:
# if the dimensions are not dyn, this will come into effect
# there would have been another constraint specifying if a given dimension
# is dyn or not
assert is_dim(constraint.lhs) and is_dim(constraint.rhs)
lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
return lhs <= rhs, counter
elif constraint.op == op_gt:
assert is_dim(constraint.lhs) and is_dim(constraint.rhs)
lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
return lhs > rhs, counter
elif constraint.op == op_lt:
assert is_dim(constraint.lhs) and is_dim(constraint.rhs)
lhs, counter = transform_algebraic_expression(constraint.lhs, counter, dimension_dict)
rhs, counter = transform_algebraic_expression(constraint.rhs, counter, dimension_dict)
return lhs < rhs, counter
else:
raise NotImplementedError('operation not yet implemented')
else:
raise NotImplementedError('Operation not yet implemented')
def transform_var(tensor, counter, dimension_dict):
"""
Transforms tensor variables to a format understood by z3
Args:
tensor: Tensor variable or a tensor type potentially with variable dimensions
Returns: Transformed variable to a z3 format
"""
if isinstance(tensor, TensorType):
res = []
for t in tensor.__args__:
transformed, counter = transform_dimension(t, counter, dimension_dict)
res.append(transformed)
assert len(res) <= 4
if len(tensor.__args__) == 1:
return tensor_type.tensor1(res[0]), counter
elif len(tensor.__args__) == 2:
return tensor_type.tensor2(res[0], res[1]), counter
elif len(tensor.__args__) == 3:
return tensor_type.tensor3(res[0], res[1], res[2]), counter
elif len(tensor.__args__) == 4:
return tensor_type.tensor4(res[0], res[1], res[2], res[3]), counter
elif tensor == Dyn:
return z3_dyn, counter
elif isinstance(tensor, TVar):
return z3.Const(tensor.tvar, tensor_type), counter
def transform_dimension(dimension, counter, dimension_dict):
"""
Takes a dimension variable or a number and transforms it to a tuple
according to our scheme
Args:
dimension: The dimension to be transformed
counter: variable tracking
Returns: tuple and the current counter
"""
if dimension == Dyn:
counter += 1
return D(0, z3.Int(counter)), counter
elif isinstance(dimension, int):
return D(1, dimension), counter
elif isinstance(dimension, DVar):
if dimension.c in dimension_dict:
return D(z3.Int(dimension_dict[dimension.c]), z3.Int(dimension.c)), counter
else:
counter += 1
dimension_dict[dimension.c] = counter
return D(z3.Int(counter), z3.Int(dimension.c)), counter
def transform_algebraic_expression(expr, counter, dimension_dict):
"""
Transforms an algebraic expression to z3 format
Args:
expr: An expression is either a dimension variable or an algebraic-expression
Returns: the transformed expression
"""
assert is_algebraic_expression(expr) or is_dim(expr)
if is_dim(expr):
transformed, counter = transform_dimension(expr, counter, dimension_dict)
return transformed.arg(1), counter
elif isinstance(expr, Prod):
dims = []
for dim in expr.products:
assert is_dim(dim)
d, counter = transform_dimension(dim, counter, dimension_dict)
dims.append(d.arg(1))
return z3.Product(dims), counter
elif is_algebraic_expression(expr):
lhs, counter = transform_algebraic_expression(expr.lhs, counter, dimension_dict)
rhs, counter = transform_algebraic_expression(expr.rhs, counter, dimension_dict)
if expr.op == op_sub:
c = lhs - rhs
elif expr.op == op_add:
c = lhs + rhs
elif expr.op == op_div:
c = lhs / rhs
elif expr.op == op_mul:
c = lhs * rhs
elif expr.op == op_mod:
c = lhs % rhs
else:
raise NotImplementedError('operation not yet implemented')
return c, counter
else:
raise RuntimeError
def transform_all_constraints(traced, counter=0):
"""
Given a trace, generates constraints and transforms them to z3 format
"""
dimension_dict = {} # type: ignore[var-annotated]
generator = ConstraintGenerator(traced)
new_constraints, counter = generator.generate_constraints(counter)
# print(new_constraints.conjucts[0])
# print(*new_constraints.conjucts, sep='\n')
# transform precision, matching, consistency till obtaining a fixed point
new_constraints, counter = iterate_till_fixed_point(new_constraints, counter)
# print(new_constraints)
# print(new_constraints.conjucts)
# new_constraints.conjucts = new_constraints.conjucts[:-1]
# print(*new_constraints.conjucts, sep='\n')
transformed, counter = transform_to_z3(new_constraints, counter, dimension_dict)
# print(transformed)
return transformed
def iterate_till_fixed_point(constraints, counter):
"""
Transform constraints till reaching a fixed point
"""
old_c = None
while old_c != constraints:
old_c = constraints
constraints, counter = transform_constraint(constraints, counter)
return constraints, counter
def transform_all_constraints_trace_time(tracer_root, graph, node, counter=0):
"""
Takes a node and a graph and generates two sets of constraints.
One set constraints the node's constraints and another set
constraints the negation of the node's constraints
Args:
tracer_root: the root for getting the module instances
graph: the graph so far in the tracing process
node: node that represents a conditional
counter: variable tracking
Returns: Two sets of constraints. One with a conjunction with the
the conditional constraint and the other with a conjunction with
its negation.
"""
dimension_dict = {} # type: ignore[var-annotated]
generator = ConstraintGenerator(tracer_root, graph)
new_constraints, counter = generator.generate_constraints(counter)
condition_constraint = new_constraints.conjucts[-1]
# we know the constraint is a conjunction where the last constraint is about the conditional
# so remove the last constraint
new_constraints.conjucts = new_constraints.conjucts[:-1]
# transform precision, matching, consistency till obtaining a fixed point
new_constraints, counter = iterate_till_fixed_point(new_constraints, counter)
# since the function returns a list of one element, we get the first element
# we are only interested in the RHS in this case because the LHS just stores
# the result
# we make sure the constraint is of the form:
# c = b where b is a boolean expression
# and we consider b (constraint.rhs) for transformation
assert isinstance(condition_constraint.lhs, BVar)
assert is_bool_expr(condition_constraint.rhs)
condition_constraint_rhs = condition_constraint.rhs
# transform the condition constraint
condition_constraint_rhs, counter = iterate_till_fixed_point(condition_constraint_rhs, counter)
transformed, counter = transform_to_z3(new_constraints, counter, dimension_dict)
transformed_condition_constraint, counter = transform_to_z3(condition_constraint_rhs, counter, dimension_dict)
negation_transformed_condition_constraint = z3.Not(transformed_condition_constraint)
return z3.And([transformed, transformed_condition_constraint]),\
z3.And([transformed, negation_transformed_condition_constraint])
def evaluate_conditional_with_constraints(tracer_root, graph, node, counter=0, user_constraints=None):
"""
Given an IR and a node representing a conditional, evaluate the conditional
and its negation
Args:
tracer_root: Tracer root for module instances
node: The node to be evaluated
Returns: the results of evaluating the condition and the negation with
the rest of the constraints
"""
transformed_positive, transformed_negative = \
transform_all_constraints_trace_time(tracer_root, graph, node, counter)
s = z3.Solver()
s.add(transformed_positive)
if user_constraints is not None:
s.add(user_constraints)
condition = s.check()
s = z3.Solver()
s.add(transformed_negative)
if user_constraints is not None:
s.add(user_constraints)
negation = s.check()
return condition, negation
except ImportError:
HAS_Z3 = False
| pytorch-master | torch/fx/experimental/migrate_gradual_types/transform_to_z3.py |
try:
import z3 # type: ignore[import]
HAS_Z3 = True
# dynamic type
dyn = z3.DeclareSort('Dyn')
dyn_type = z3.Const('dyn', dyn)
# dimension
dim = z3.Datatype('dim')
dim.declare('dim', ('0', z3.IntSort()), ('1', z3.IntSort()))
dim = dim.create()
# tensors
tensor_type = z3.Datatype('TensorType')
tensor_type.declare('Dyn', ('dyn', dyn))
tensor_type.declare('tensor1', ('0', dim))
tensor_type.declare('tensor2', ('0', dim), ('1', dim))
tensor_type.declare('tensor3', ('0', dim), ('1', dim), ('2', dim))
tensor_type.declare('tensor4', ('0', dim), ('1', dim), ('2', dim), ('3', dim))
tensor_type = tensor_type.create()
# create dimension
D = dim.dim
z3_dyn = tensor_type.Dyn(dyn_type)
except ImportError:
HAS_Z3 = False
| pytorch-master | torch/fx/experimental/migrate_gradual_types/z3_types.py |
import torch
import operator
from typing import Callable, Dict, Iterable
from torch.fx._symbolic_trace import _assert_is_none
from torch.fx.experimental.migrate_gradual_types.constraint import ApplyBroadcasting, CalcProduct, \
Disj, TGreatestUpperBound, CalcMaxPool, CalcConv, Conj, BinConstraintT, CanReshape, BinConstraintD, GetItem, T, F, \
TVar, DVar, GetItemTensor, IndexSelect, Transpose, DGreatestUpperBound
from torch.fx.experimental.migrate_gradual_types.operation import \
op_eq, op_matching, op_consistency, op_leq, op_precision, op_gt, op_div, op_sub, op_neq, op_lt, op_add, op_mul
from torch.fx.node import Target, Node
from torch.fx.experimental.migrate_gradual_types.util import gen_tensor_dims, gen_nat_constraints, gen_dvar, gen_tvar, \
gen_bvar
from torch.fx.tensor_type import Dyn, TensorType
from torch.nn.modules.conv import Conv2d
from torch.nn.modules.batchnorm import BatchNorm2d
_INFERENCE_RULES: Dict[Target, Callable] = {}
MAX_TENSOR_RANK = 4
def register_inference_rule(call_target):
def register(fn):
if call_target in _INFERENCE_RULES:
raise RuntimeError(f'Inference rule already registered for {call_target}!')
_INFERENCE_RULES[call_target] = fn
return fn
return register
def generate_flatten_constraints(start_dim, end_dim, input, flattened, n, counter):
d, counter = gen_tensor_dims(n, counter)
c1 = BinConstraintT(input, TensorType(d), op_eq)
start_dim = n if start_dim == -1 else abs(start_dim)
end_dim = n + end_dim + 1 if end_dim < 0 else end_dim + 1
c2 = CalcProduct(start_dim, end_dim, flattened, d)
nat_constraints = gen_nat_constraints(d)
return Conj([c1, c2, *nat_constraints]), counter
@register_inference_rule(getattr)
def get_attr_inference_rule(n: Node, symbols, constraints, counter):
"""
If the attribute is "device" then the tensor shape is preserved
"""
assert isinstance(n.args[0], Node)
assert isinstance(n.args[1], str)
output, counter = gen_tvar(counter)
symbols[n] = output
input = symbols[n.args[0]]
attr = n.args[1]
if attr == 'device':
return [BinConstraintT(input, output, op_eq)], counter
else:
raise NotImplementedError('Not yet implemented')
@register_inference_rule(torch.bmm)
def bmm_inference_rule(n: Node, symbols, constraints, counter):
"""
Constraints that match the input to a size 3 tensor
and switch the dimensions according to the rules
of batch multiplication
"""
assert isinstance(n.args[0], Node)
assert isinstance(n.args[1], Node)
bmm_output, counter = gen_tvar(counter)
symbols[n] = bmm_output
bmm_input1 = symbols[n.args[0]]
bmm_input2 = symbols[n.args[1]]
dims_input1, counter = gen_tensor_dims(3, counter)
dims_input2, counter = gen_tensor_dims(3, counter)
inputs_dyn = Conj([BinConstraintT(bmm_input1, Dyn, op_eq),
BinConstraintT(bmm_input2, Dyn, op_eq),
BinConstraintT(bmm_output, Dyn, op_eq)])
input1_dyn = Conj([BinConstraintT(bmm_input1, Dyn, op_eq),
BinConstraintT(bmm_input2, TensorType(dims_input2), op_eq),
BinConstraintT(bmm_output, TensorType([dims_input2[0], Dyn, dims_input2[2]]), op_eq)])
input2_dyn = Conj([BinConstraintT(bmm_input2, Dyn, op_eq),
BinConstraintT(bmm_input1, TensorType(dims_input1), op_eq),
BinConstraintT(bmm_output, TensorType([dims_input1[0], dims_input1[1], Dyn]), op_eq)])
consistency_constraints = [BinConstraintD(dims_input1[0], dims_input2[0], op_consistency)]
batch_size, counter = gen_dvar(counter)
inputs_are_tensors = Conj([BinConstraintT(bmm_input1, TensorType(dims_input1), op_eq),
BinConstraintT(bmm_input2, TensorType(dims_input2), op_eq),
BinConstraintT(bmm_output, TensorType([batch_size, dims_input1[1], dims_input2[2]]), op_eq),
*consistency_constraints, DGreatestUpperBound(batch_size, dims_input1[0], dims_input2[0])])
return [Disj([inputs_dyn, input1_dyn, input2_dyn, inputs_are_tensors])], counter
@register_inference_rule("index_select")
def index_select_inference_rule(n: Node, symbols, constraints, counter):
"""
We constrain the second argument to a vector or Dyn.
The output replaces the input with the shape of the vector
at the position given by the index (first argument)
"""
# print(n.args)
assert isinstance(n.args[0], Node)
assert isinstance(n.args[1], int)
assert isinstance(n.args[2], Node)
index_select, counter = gen_tvar(counter)
symbols[n] = index_select
dims, counter = gen_tensor_dims(1, counter)
# equality constraint
is_size_1 = BinConstraintT(symbols[n.args[2]], TensorType(dims), op_eq)
is_dyn = BinConstraintT(symbols[n.args[2]], Dyn, op_eq)
c2 = Conj([is_size_1, Disj([IndexSelect(i + 1, symbols[n.args[0]], dims[0], n.args[1], index_select)
for i in range(MAX_TENSOR_RANK)])])
c3 = Conj([is_dyn, Disj([IndexSelect(i + 1, symbols[n.args[0]], Dyn, n.args[1], index_select)
for i in range(MAX_TENSOR_RANK)])])
return [Disj([c2, c3])], counter
@register_inference_rule("expand")
def expand_inference_rule(n: Node, symbols, constraints, counter):
"""
We generate the exact constraints as we do for tensor additions but we constraint
the rank of this expression to be equal to len(n.args[1:]) so that only
those cases get considered for the output
"""
assert isinstance(n.args[0], Node)
# define the output for expand
expand, counter = gen_tvar(counter)
symbols[n] = expand
# since we do not have two nodes here, we will construct an argument variable
e1 = symbols[n.args[0]]
e2, counter = gen_tvar(counter)
e2_nat_constraints = []
for arg in n.args[1:]:
assert isinstance(arg, Node) or isinstance(arg, int)
if isinstance(arg, Node):
assert isinstance(symbols[arg], DVar)
e2_nat_constraints.append(BinConstraintD(0, symbols[arg], op_leq))
e2_constraint = BinConstraintT(e2, TensorType([arg if isinstance(arg, int) else symbols[arg] for arg in n.args[1:]]), op_eq)
constraints, counter = gen_broadcasting_constraints(e1, e2, symbols, counter, expand)
# constraint the output size
dims, counter = gen_tensor_dims(len(n.args[1:]), counter)
nat_constraints = gen_nat_constraints(dims)
c = [BinConstraintT(expand, TensorType(dims), op_eq), *nat_constraints, e2_constraint, *e2_nat_constraints]
constraints += c
return constraints, counter
@register_inference_rule(torch.nn.functional.gelu)
@register_inference_rule(torch.nn.functional.dropout)
@register_inference_rule(torch.nn.functional.softmax)
@register_inference_rule("detach")
@register_inference_rule("to")
@register_inference_rule("int")
@register_inference_rule("long")
@register_inference_rule("contiguous")
@register_inference_rule(torch.ones)
def equality_inference_rule(n: Node, symbols, constraints, counter):
"""
We generate the constraint: input = output
"""
assert isinstance(n.args[0], Node)
output, counter = gen_tvar(counter)
symbols[n] = output
input = symbols[n.args[0]]
assert isinstance(input, TVar)
return [BinConstraintT(input, output, op_eq)], counter
@register_inference_rule("transpose")
def transpose_inference_rule(n: Node, symbols, constraints, counter):
"""
Can be considered as a sequence of two index selects, so we generate constraints accordingly
"""
assert isinstance(n.args[0], Node)
assert isinstance(n.args[1], int)
assert isinstance(n.args[2], int)
output, counter = gen_tvar(counter)
symbols[n] = output
from_arg = symbols[n.args[0]]
assert isinstance(from_arg, TVar)
# input and output are dyn
is_dyn = Conj([BinConstraintT(from_arg, Dyn, op_eq), BinConstraintT(output, Dyn, op_eq)])
# or input is a tensor and we actually do the replacement
c3 = Disj([Transpose(i + 1, from_arg, n.args[1], n.args[2], output) for i in range(MAX_TENSOR_RANK)])
return [Disj([is_dyn, c3])], counter
@register_inference_rule("type_as")
def type_inference_rule(n: Node, symbols, constraints, counter):
"""
We generate the constraint: input = output
"""
assert isinstance(n.args[0], Node)
assert isinstance(n.args[1], Node)
output, counter = gen_tvar(counter)
symbols[n] = output
from_arg = symbols[n.args[0]]
to_arg = symbols[n.args[1]]
assert isinstance(from_arg, TVar)
assert isinstance(to_arg, TVar)
return [BinConstraintT(from_arg, to_arg, op_consistency),
BinConstraintT(output, to_arg, op_eq)], counter
@register_inference_rule("masked_fill_")
def masked_fill_inference_rule(n: Node, symbols, constraints, counter):
"""
Similar to addition. For now we implemenent the constraints when
the argument is a boolean tensor. There is also a case for when
it is a condition. We will leave this out for now.
"""
assert isinstance(n.args[0], Node)
assert isinstance(n.args[1], Node)
# We will retrieve the type variables from the symbol table
# and confirm they are tensor variables
e1 = symbols[n.args[0]]
e2 = symbols[n.args[1]]
if isinstance(e1, TVar) and isinstance(e2, TVar):
masked_fill_tensor, counter = gen_tvar(counter)
symbols[n] = masked_fill_tensor
return gen_broadcasting_constraints(e1, e2, symbols, counter, masked_fill_tensor)
else:
raise NotImplementedError('Not yet implemented')
@register_inference_rule(torch.nn.functional.embedding)
def embedding_inference_rule_functional(n: Node, symbols, constraints, counter):
assert isinstance(n.args[0], Node)
embedding_dim_weights = symbols[n.args[1]]
# will treat this as a static shape. So we will not use matching.
weight_dims, counter = gen_tensor_dims(2, counter)
equality_constraint = BinConstraintT(embedding_dim_weights, TensorType(weight_dims), op_eq)
embedding_dim = weight_dims[1]
constraints, counter = gen_embedding_rules(n, symbols, embedding_dim, counter)
return [equality_constraint] + constraints, counter
@register_inference_rule(torch.nn.modules.sparse.Embedding)
def embedding_inference_rule(n: Node, module_instance, symbols, constraints, counter):
"""
The output shape differs from the input shape in the last dimension
"""
assert isinstance(n.args[0], Node)
return gen_embedding_rules(n, symbols, module_instance.embedding_dim, counter)
def gen_embedding_rules(n: Node, symbols, embedding_dim, counter):
embedding_output, counter = gen_tvar(counter)
symbols[n] = embedding_output
embedding_input = symbols[n.args[0]]
input_dyn = BinConstraintT(embedding_input, Dyn, op_eq)
output_dyn = BinConstraintT(embedding_output, Dyn, op_eq)
c1 = Conj([input_dyn, output_dyn])
c2 = []
for i in range(1, MAX_TENSOR_RANK):
new_dims, counter = gen_tensor_dims(i, counter)
nat_constraints = gen_nat_constraints(new_dims)
# we consider all tensor sizes and append embedding_dim to the end of the output dimension in all cases
c_tensor_i = Conj([BinConstraintT(embedding_input, TensorType(new_dims), op_eq),
BinConstraintT(embedding_output, TensorType(new_dims + [embedding_dim]), op_eq)] +
nat_constraints)
c2.append(c_tensor_i)
return [Disj([c1, Disj(c2)])], counter
@register_inference_rule(torch.tensor)
def tensor_inference_rule(n: Node, symbols, constraints, counter):
"""
If the tensor is a scalar, we will skip it since we
do not support scalars yet. We will add support in the future
if it's needed. For our examples so far, scalars are not needed.
"""
return [], counter
@register_inference_rule("reshape")
@register_inference_rule("view")
def view_inference_rule(n: Node, symbols, constraints, counter):
"""
Similar to reshape but with an extra condition on the strides
"""
assert isinstance(n.args[0], Node)
# generate the new variable
my_view, counter = gen_tvar(counter)
symbols[n] = my_view
src_var = symbols[n.args[0]]
t2 = [symbols[elem] if isinstance(elem, Node) else elem for elem in n.args[1:]] # target shape
t2_type = []
num_constraints = []
for t in t2:
if t == -1:
var, counter = gen_dvar(counter)
t2_type.append(var)
num_constraints.append(BinConstraintD(var, Dyn, op_neq))
else:
num_constraints.append(BinConstraintD(t, Dyn, op_neq))
t2_type.append(t)
t2_type = TensorType(t2_type) # type: ignore[assignment]
c1 = BinConstraintT(my_view, t2_type, op_eq)
c2 = CanReshape(src_var, t2_type)
# TODO: add the extra check mentioned here:
# https://pytorch.org/docs/stable/generated/torch.Tensor.view.html#torch.Tensor.view
return [c1, c2] + num_constraints, counter # type: ignore[operator]
@register_inference_rule("size")
def size_inference_rule(n: Node, symbols, constraints, counter):
"""
The constraint is just lhs = rhs.
Ex: size = input_ids.size()
"""
if len(n.args) == 1:
# generate the new variable
size, counter = gen_tvar(counter)
symbols[n] = size
input = symbols[n.args[0]]
c = BinConstraintT(input, size, op_eq)
return [c], counter
elif len(n.args) == 2:
# TODO: review this rule; should input = dyn; output = dyn be included here?
if isinstance(n.args[1], int):
# generate the new variable
size_index, counter = gen_dvar(counter)
symbols[n] = size_index
input = symbols[n.args[0]]
c2 = [GetItem(i + 1, n.args[1], size_index, input) for i in range(MAX_TENSOR_RANK)]
c3 = BinConstraintD(0, size_index, op_leq)
input_dyn = BinConstraintT(input, Dyn, op_eq)
output_dyn = BinConstraintD(size_index, Dyn, op_eq)
c1 = Conj([input_dyn, output_dyn])
return [Disj([c1, Conj([Disj(c2), c3])])], counter
else:
raise NotImplementedError
else:
raise NotImplementedError
def range_check(i, n):
"""
Checks if an index i is within range of a size n list
Args:
i: index
n: list size
Returns: Boolean
"""
if i >= 0:
return T() if i < n else F()
else:
return T() if i >= n else F()
@register_inference_rule(torch.cumsum)
def cumsum_inference_rule(n: Node, symbols, constraints, counter):
"""
Input and output shapes should be equal
We should verify that the index is valid
"""
assert isinstance(n.args[0], Node)
arg_1 = n.args[1] if len(n.args) > 1 else n.kwargs["dim"]
assert isinstance(arg_1, int)
output, counter = gen_tvar(counter)
symbols[n] = output
input = symbols[n.args[0]]
input_dyn = BinConstraintT(input, Dyn, op_eq)
output_dyn = BinConstraintT(output, Dyn, op_eq)
c1 = Conj([input_dyn, output_dyn])
c2 = []
for i in range(1, MAX_TENSOR_RANK + 1):
new_dims, counter = gen_tensor_dims(i, counter)
nat_constraints = gen_nat_constraints(new_dims)
c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims), op_eq),
BinConstraintT(output, TensorType(new_dims), op_eq)] +
[range_check(arg_1, i)] + nat_constraints)
c2.append(c_tensor_i)
dyn_or_tensor = Disj([c1, Disj(c2)])
return [dyn_or_tensor], counter
@register_inference_rule(_assert_is_none)
def assert_inference_rule(n: Node, symbols, constraints, counter):
assert len(n.users) == 0
return [], counter
@register_inference_rule(operator.getitem)
def getitem_inference_rule(n: Node, symbols, constraints, counter):
assert isinstance(n.args[0], Node)
# dimension output case
if isinstance(n.args[1], int):
# create and store the new dimension variable
get_item_output, counter = gen_dvar(counter)
symbols[n] = get_item_output
# retreive arg variables
get_item_arg = symbols[n.args[0]]
assert isinstance(get_item_arg, TVar)
# if the input is dynamic, we accept any index and return
# a dynamic dimension as output
input_dyn = BinConstraintT(get_item_arg, Dyn, op_eq)
output_dyn = BinConstraintD(get_item_output, Dyn, op_eq)
c1 = Conj([input_dyn, output_dyn])
# if the input is a tensor,
# generate a getItem constraint which will be expanded based on the
# tensor dimension.
c2 = [GetItem(i + 1, n.args[1], get_item_output, get_item_arg) for i in range(MAX_TENSOR_RANK)]
# since the output is a dimension, we make sure it's a natural number
# added as a conjunction to the disjuction of c2
c3 = BinConstraintD(0, get_item_output, op_leq)
return [Disj([c1, Conj([Disj(c2), c3])])], counter
# tensor output case
elif isinstance(n.args[1], tuple):
# create and store the new tensor variable
get_item_output, counter = gen_tvar(counter)
symbols[n] = get_item_output
# retreive arg variables
if n.args[0] in symbols:
get_item_arg = symbols[n.args[0]]
assert isinstance(get_item_arg, TVar)
input_dyn = BinConstraintT(get_item_arg, Dyn, op_eq)
output_dyn = BinConstraintT(get_item_output, Dyn, op_eq) # type: ignore[assignment]
c1 = Conj([input_dyn, output_dyn])
c2 = [GetItemTensor(i + 1, n.args[1], get_item_output, get_item_arg) # type: ignore[misc]
for i in range(MAX_TENSOR_RANK)]
else:
# TODO: we should figure out why there is a key-error here.
return [], counter
return [Disj([c1, *c2])], counter
else:
raise RuntimeError('Method not yet implemented')
@register_inference_rule(operator.gt)
def gt_inference_rule(n: Node, symbols, constraints, counter):
assert isinstance(n.args[0], Node) or isinstance(n.args[0], int)
assert isinstance(n.args[1], Node) or isinstance(n.args[1], int)
# We make sure this node will not be used again. We do not
# generate a constraint about that node. Only about the operands.
e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0]
e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1]
if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
if isinstance(e1, TVar) and isinstance(e2, TVar):
gt_tensor, counter = gen_tvar(counter)
symbols[n] = gt_tensor
return gen_broadcasting_constraints(e1, e2, symbols, counter, gt_tensor)
elif isinstance(e1, DVar) and isinstance(e2, DVar):
# This is meant to be used for flow analysis only
gt_constraint = BinConstraintD(e1, e2, op_gt)
my_gt, counter = gen_bvar(counter)
equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq)
return [equality_constraint], counter
else:
raise RuntimeError('Sort Mismatch')
elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node):
if isinstance(e1, DVar):
# This is meant to be used for flow analysis only
gt_constraint = BinConstraintD(e1, e2, op_gt)
my_gt, counter = gen_bvar(counter)
equality_constraint = BinConstraintD(my_gt, gt_constraint, op_eq)
return [equality_constraint], counter
else:
raise NotImplementedError('Method not yet implemented')
else:
raise NotImplementedError('Method not yet implemented')
@register_inference_rule(operator.eq)
def eq_inference_rule(n: Node, symbols, constraints, counter):
assert isinstance(n.args[0], Node) or isinstance(n.args[0], int)
assert isinstance(n.args[1], Node) or isinstance(n.args[1], int)
e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0]
e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1]
if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
if isinstance(e1, TVar) and isinstance(e2, TVar):
eq_tensor, counter = gen_tvar(counter)
symbols[n] = eq_tensor
return gen_broadcasting_constraints(e1, e2, symbols, counter, eq_tensor)
elif isinstance(e1, DVar) and isinstance(e2, DVar):
# This is meant to be used for flow analysis only
eq_constraint = BinConstraintD(e1, e2, op_eq)
my_eq, counter = gen_bvar(counter)
equality_constraint = BinConstraintD(my_eq, eq_constraint, op_eq)
return [equality_constraint], counter
else:
raise RuntimeError('Sort Mismatch')
elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node):
if isinstance(e1, DVar):
# This is meant to be used for flow analysis only
eq_constraint = BinConstraintD(e1, e2, op_eq)
my_eq, counter = gen_bvar(counter)
equality_constraint = BinConstraintD(my_eq, eq_constraint, op_eq)
return [equality_constraint], counter
else:
raise NotImplementedError('Method not yet implemented')
else:
raise NotImplementedError('Method not yet implemented')
@register_inference_rule(operator.ne)
def neq_inference_rule(n: Node, symbols, constraints, counter):
"""
Translates to inconsistent in gradual types.
To prove inequality, we should prove that
tensors are either different sizes or
disagree on at least one dimension
This is a WIP (works when the condition
is false. We are working on making this operation work
when the condition is true as well)
"""
assert isinstance(n.args[0], Node)
assert isinstance(n.args[1], tuple)
# implementing for size 3 and 4
if len(n.args[1]) == 3:
assert isinstance(n.args[1][0], Node) or isinstance(n.args[1][0], int)
assert isinstance(n.args[1][1], Node) or isinstance(n.args[1][1], int)
assert isinstance(n.args[1][2], Node) or isinstance(n.args[1][2], int)
lhs = symbols[n.args[0]]
b, counter = gen_tensor_dims(4, counter)
input_is_size3 = BinConstraintT(lhs, TensorType([b[0], b[1], b[2]]), op_eq)
d1 = n.args[1][0] if isinstance(n.args[1][0], int) else symbols[n.args[1][0]]
d2 = n.args[1][1] if isinstance(n.args[1][1], int) else symbols[n.args[1][1]]
d3 = n.args[1][2] if isinstance(n.args[1][2], int) else symbols[n.args[1][2]]
# dimensions not equal
my_ne, counter = gen_bvar(counter)
neq_1 = BinConstraintD(d1, b[0], op_neq)
neq_2 = BinConstraintD(d2, b[1], op_neq)
neq_3 = BinConstraintD(d3, b[2], op_neq)
# dimensions inconsistent
dims_inconsistent1 = Conj([BinConstraintD(d1, Dyn, op_neq), BinConstraintD(b[0], Dyn, op_neq), neq_1])
dims_inconsistent2 = Conj([BinConstraintD(d2, Dyn, op_neq), BinConstraintD(b[1], Dyn, op_neq), neq_2])
dims_inconsistent3 = Conj([BinConstraintD(d3, Dyn, op_neq), BinConstraintD(b[2], Dyn, op_neq), neq_3])
dims_inconsistent = Disj([dims_inconsistent1, dims_inconsistent2, dims_inconsistent3])
# we are covering size 3 and 4 only for now
ne_constraint = Conj([input_is_size3, dims_inconsistent])
my_ne, counter = gen_bvar(counter)
equality_constraint = BinConstraintD(my_ne, ne_constraint, op_eq)
elif len(n.args[1]) == 4:
assert isinstance(n.args[1][0], Node) or isinstance(n.args[1][0], int)
assert isinstance(n.args[1][1], Node) or isinstance(n.args[1][1], int)
assert isinstance(n.args[1][2], Node) or isinstance(n.args[1][2], int)
assert isinstance(n.args[1][3], Node) or isinstance(n.args[1][3], int)
lhs = symbols[n.args[0]]
b1, counter = gen_dvar(counter)
b2, counter = gen_dvar(counter)
b3, counter = gen_dvar(counter)
b4, counter = gen_dvar(counter)
input_is_size4 = BinConstraintT(lhs, TensorType([b1, b2, b3, b4]), op_eq)
d1 = n.args[1][0] if isinstance(n.args[1][0], int) else symbols[n.args[1][0]]
d2 = n.args[1][1] if isinstance(n.args[1][1], int) else symbols[n.args[1][1]]
d3 = n.args[1][2] if isinstance(n.args[1][2], int) else symbols[n.args[1][2]]
d4 = n.args[1][3] if isinstance(n.args[1][3], int) else symbols[n.args[1][3]]
# dimensions not equal
my_ne, counter = gen_bvar(counter)
neq_1 = BinConstraintD(d1, b1, op_neq)
neq_2 = BinConstraintD(d2, b2, op_neq)
neq_3 = BinConstraintD(d3, b3, op_neq)
neq_4 = BinConstraintD(d4, b4, op_neq)
# dimensions to inconsistent
dims_inconsistent1 = Conj([BinConstraintD(d1, Dyn, op_neq), BinConstraintD(b1, Dyn, op_neq), neq_1])
dims_inconsistent2 = Conj([BinConstraintD(d2, Dyn, op_neq), BinConstraintD(b2, Dyn, op_neq), neq_2])
dims_inconsistent3 = Conj([BinConstraintD(d3, Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq), neq_3])
dims_inconsistent4 = Conj([BinConstraintD(d4, Dyn, op_neq), BinConstraintD(b3, Dyn, op_neq), neq_4])
dims_inconsistent = Disj([dims_inconsistent1, dims_inconsistent2, dims_inconsistent3, dims_inconsistent4])
ne_constraint = Conj([input_is_size4, dims_inconsistent])
my_ne, counter = gen_bvar(counter)
equality_constraint = BinConstraintD(my_ne, ne_constraint, op_eq)
else:
raise NotImplementedError('Method not yet implemented')
return [equality_constraint], counter
@register_inference_rule(operator.lt)
def lt_inference_rule(n: Node, symbols, constraints, counter):
assert isinstance(n.args[0], Node) or isinstance(n.args[0], int)
assert isinstance(n.args[1], Node) or isinstance(n.args[1], int)
# We make sure this node will not be used again. We do not
# generate a constraint about that node. Only about the operands.
e1 = symbols[n.args[0]] if isinstance(n.args[0], Node) else n.args[0]
e2 = symbols[n.args[1]] if isinstance(n.args[1], Node) else n.args[1]
if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
if isinstance(e1, TVar) and isinstance(e2, TVar):
lt_tensor, counter = gen_tvar(counter)
symbols[n] = lt_tensor
return gen_broadcasting_constraints(e1, e2, symbols, counter, lt_tensor)
elif isinstance(e1, DVar) and isinstance(e2, DVar):
# This is meant to be used for flow analysis only
lt_constraint = BinConstraintD(e1, e2, op_lt)
my_lt, counter = gen_bvar(counter)
equality_constraint = BinConstraintD(my_lt, lt_constraint, op_eq)
return [equality_constraint], counter
else:
raise RuntimeError('Sort Mismatch')
elif isinstance(n.args[0], Node) and not isinstance(n.args[1], Node):
if isinstance(e1, DVar):
# This is meant to be used for flow analysis only
lt_constraint = BinConstraintD(e1, e2, op_lt)
my_lt, counter = gen_bvar(counter)
equality_constraint = BinConstraintD(my_lt, lt_constraint, op_eq)
return [equality_constraint], counter
else:
raise NotImplementedError('Method not yet implemented')
else:
raise NotImplementedError('Method not yet implemented')
@register_inference_rule(torch.full)
def full_inference_rule(n: Node, symbols, constraints, counter):
full, counter = gen_tvar(counter)
symbols[n] = full
res = []
assert isinstance(n.args[0], Iterable)
for arg in n.args[0]:
res.append(symbols[arg])
c = BinConstraintT(full, TensorType(list(res)), op_eq) # type: ignore[arg-type]
return [c], counter
# TODO normalize index
@register_inference_rule(torch.arange)
def arange_inference_rule(n: Node, symbols, constraints, counter):
start = 0
step = 1
if len(n.args) == 1:
end = symbols[n.args[0]]
else:
raise NotImplementedError('Not yet implemented')
# int((end - start) / step)
d1, counter = gen_dvar(counter)
size_constraint = BinConstraintD(d1, BinConstraintD(BinConstraintD(end, start, op_sub), step, op_div), op_eq)
arange, counter = gen_tvar(counter)
symbols[n] = arange
# either the a parameter is a number or it is Dyn
c1 = Disj([BinConstraintD(end, Dyn, op_eq),
BinConstraintD(start, Dyn, op_eq),
BinConstraintD(step, Dyn, op_eq)])
c2 = BinConstraintD(d1, Dyn, op_eq)
both_dyn = Conj([c1, c2])
c11 = Conj([BinConstraintD(end, Dyn, op_neq),
BinConstraintD(start, Dyn, op_neq),
BinConstraintD(step, Dyn, op_neq)])
c22 = BinConstraintD(d1, Dyn, op_neq)
both_numbers = Conj([c11, c22, size_constraint])
return [BinConstraintT(arange, TensorType([d1]), op_eq), Disj([both_dyn, both_numbers])], counter
def gen_broadcasting_constraints(e1, e2, symbols, counter, output_var):
# additional vars that don't correspond to expressions
e11, counter = gen_tvar(counter)
e22, counter = gen_tvar(counter)
# generate constraints
c1 = TGreatestUpperBound(output_var, e11, e22)
c2 = ApplyBroadcasting(e11, e22, e1, e2)
c3 = BinConstraintT(e11, e22, op_consistency)
return [c1, c2, c3], counter
@register_inference_rule(operator.mul)
@register_inference_rule(torch.ne)
@register_inference_rule("ne")
@register_inference_rule(torch.add)
@register_inference_rule(operator.add)
def broadcasting_inference_rule(n: Node, symbols, constraints, counter):
op_code = None
if n.target == operator.add or n.target == torch.add:
op_code = op_add
elif n.target == operator.mul:
op_code = op_mul
if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):
if isinstance(symbols[n.args[0]], TVar) and isinstance(symbols[n.args[1]], TVar):
my_output, counter = gen_tvar(counter)
symbols[n] = my_output
e1 = symbols[n.args[0]]
e2 = symbols[n.args[1]]
return gen_broadcasting_constraints(e1, e2, symbols, counter, my_output)
else:
raise NotImplementedError('Method not yet implemented')
elif isinstance(n.args[0], Node) and (isinstance(n.args[1], int) or isinstance(n.args[1], float)):
if isinstance(symbols[n.args[0]], TVar):
my_output, counter = gen_tvar(counter)
symbols[n] = my_output
e1 = symbols[n.args[0]]
return [BinConstraintT(my_output, e1, op_eq)], counter
elif isinstance(symbols[n.args[0]], DVar):
my_output, counter = gen_dvar(counter)
symbols[n] = my_output
e1 = symbols[n.args[0]]
# we will propagate the runtime value here since this is regular addition
c = Conj([BinConstraintD(my_output, BinConstraintD(e1, n.args[1], op_code), op_eq),
BinConstraintD(0, my_output, op_leq)])
return [c], counter
elif isinstance(n.args[1], Node) and (isinstance(n.args[0], int) or isinstance(n.args[1], float)):
if isinstance(symbols[n.args[1]], TVar):
my_output, counter = gen_tvar(counter)
symbols[n] = my_output
e2 = symbols[n.args[1]]
return [BinConstraintT(my_output, e2, op_eq)], counter
elif isinstance(symbols[n.args[1]], DVar):
my_output, counter = gen_dvar(counter)
symbols[n] = my_output
e2 = symbols[n.args[1]]
# we will propagate the runtime value here since this is regular addition
c = Conj([BinConstraintD(my_output, BinConstraintD(e2, n.args[0], op_code), op_eq),
BinConstraintD(0, my_output, op_leq)])
return [c], counter
else:
raise NotImplementedError('Method not yet implemented')
else:
# TODO generate add constraints for scalar addition
raise NotImplementedError('Addition not yet implemented')
@register_inference_rule(torch.flatten)
def flatten_inference_rule(n: Node, symbols, constraints, counter):
assert isinstance(n.args[0], Node)
# generate the new variable
flattened, counter = gen_tvar(counter)
symbols[n] = flattened
input = symbols[n.args[0]]
# set the default start and end dims
start_dim = 1
end_dim = -1
if len(n.args) > 1:
assert isinstance(n.args[1], int)
start_dim = n.args[1]
if len(n.args) > 2:
assert isinstance(n.args[2], int)
end_dim = n.args[2]
c1 = BinConstraintT(input, Dyn, op_eq)
c2 = BinConstraintT(flattened, Dyn, op_eq)
both_dyn = Conj([c1, c2])
const = []
for i in range(1, MAX_TENSOR_RANK + 1):
c, counter = generate_flatten_constraints(start_dim, end_dim, input, flattened, i, counter)
const.append(c)
return [Disj([both_dyn, *const])], counter
@register_inference_rule(torch.nn.functional.layer_norm)
def layer_norm_functional(n: Node, symbols, constraints, counter):
"""
We generate the constraint: input = output
"""
assert isinstance(n.args[0], Node)
return gen_layer_norm_constraints(n, n.args[1], symbols, counter)
@register_inference_rule(torch.nn.LayerNorm)
def layer_norm_inference_rule(n: Node, module_instance, symbols, constraints, counter):
"""
Input and output shapes should be equal.
Input should be consistent with the normalized_shape
"""
assert isinstance(n.args[0], Node)
return gen_layer_norm_constraints(n, module_instance.normalized_shape, symbols, counter)
def gen_layer_norm_constraints(n: Node, normalized_shape, symbols, counter):
output, counter = gen_tvar(counter)
symbols[n] = output
input = symbols[n.args[0]]
input_dyn = BinConstraintT(input, Dyn, op_eq)
output_dyn = BinConstraintT(output, Dyn, op_eq)
c1 = Conj([input_dyn, output_dyn])
c2 = []
for i in range(1, MAX_TENSOR_RANK + 1):
new_dims_rhs, counter = gen_tensor_dims(i, counter)
nat_constraints = gen_nat_constraints(new_dims_rhs)
c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims_rhs), op_eq),
BinConstraintT(output, TensorType(new_dims_rhs), op_eq)] +
add_layer_norm_constraints(new_dims_rhs, list(normalized_shape)) +
nat_constraints)
c2.append(c_tensor_i)
return [Disj([c1, Disj(c2)])], counter
@register_inference_rule(torch.nn.Dropout)
@register_inference_rule(torch.nn.ReLU)
def relu_inference_rule(n: Node, module_instance, symbols, constraints, counter):
"""
Input and output shapes should be equal.
"""
assert isinstance(n.args[0], Node)
output, counter = gen_tvar(counter)
symbols[n] = output
input = symbols[n.args[0]]
assert isinstance(input, TVar)
return [BinConstraintT(input, output, op_eq)], counter
@register_inference_rule(torch.nn.Linear)
def linear_inference_rule(n: Node, module_instance, symbols, constraints, counter):
"""
Input and output sizes should be the same except for the last dimension
If the input is Dyn, then so should the output
"""
assert isinstance(n.args[0], Node)
return linear_constraints(n, module_instance.in_features, module_instance.out_features, symbols, counter)
@register_inference_rule("dim") # type: ignore[attr-defined]
def torch_dim_inference_rule(n: Node, symbols, constraints, counter):
assert isinstance(n.args[0], Node)
my_dim, counter = gen_dvar(counter)
symbols[n] = my_dim
input = symbols[n.args[0]]
input_dyn = BinConstraintT(input, Dyn, op_eq)
output_dyn = BinConstraintD(my_dim, Dyn, op_eq)
c1 = []
for i in range(1, MAX_TENSOR_RANK + 1):
new_dims_rhs_1, counter = gen_tensor_dims(i, counter)
c_tensor_i = Conj([BinConstraintT(input, TensorType(new_dims_rhs_1), op_eq),
BinConstraintD(my_dim, i, op_eq)])
c1.append(c_tensor_i)
return [Disj([Conj([input_dyn, output_dyn]), Disj(c1)])], counter
@register_inference_rule(torch._C._nn.linear) # type: ignore[attr-defined]
def torch_linear_inference_rule(n: Node, symbols, constraints, counter):
assert isinstance(n.args[0], Node)
weight_dims, counter = gen_tensor_dims(2, counter)
equality_constraint = BinConstraintT(n.args[1], TensorType(weight_dims), op_eq)
constraints, counter = linear_constraints(n, weight_dims[0], weight_dims[1], symbols, counter)
return [equality_constraint] + constraints, counter
def linear_constraints(n: Node, in_features, out_features, symbols, counter):
linear_output, counter = gen_tvar(counter)
symbols[n] = linear_output
linear_input = symbols[n.args[0]]
input_dyn = BinConstraintT(linear_input, Dyn, op_eq)
output_dyn = BinConstraintT(linear_output, Dyn, op_eq)
c1 = Conj([input_dyn, output_dyn])
c2 = []
for i in range(1, MAX_TENSOR_RANK + 1):
new_dims_rhs_1, counter = gen_tensor_dims(i, counter)
new_dims_rhs_2, counter = gen_tensor_dims(i, counter)
nat_constraints = gen_nat_constraints(new_dims_rhs_1 + new_dims_rhs_2)
c_tensor_i = Conj([BinConstraintT(linear_input, TensorType(new_dims_rhs_1), op_eq),
BinConstraintT(linear_output, TensorType(new_dims_rhs_2), op_eq)] +
add_linear_constraints(new_dims_rhs_1, new_dims_rhs_2, in_features, out_features) +
nat_constraints)
c2.append(c_tensor_i)
return [Disj([c1, Disj(c2)])], counter
def add_layer_norm_constraints(input_dim, normalized_dim):
"""
The constraints say that the type has te form: [*, 1024, 1024]
while the normalized_dim have the form [1024, 1024]
Args:
input_dim: Input shape of layer norm
normalized_dim: normalized_dim parameter of the module instance
"""
# in this case we return false since there's a pattern mismatch
if len(normalized_dim) > len(input_dim):
return [F()]
else:
constraints = []
for i, n in zip(reversed(input_dim), reversed(normalized_dim)):
constraints.append(BinConstraintD(i, n, op_consistency))
return constraints
def add_linear_constraints(dims1, dims2, in_features, out_features):
assert len(dims1) == len(dims2)
constraints = []
for i in range(len(dims1)):
if i == len(dims1) - 1:
constraints.append(BinConstraintD(dims1[i], in_features, op_consistency))
constraints.append(BinConstraintD(dims2[i], out_features, op_eq))
else:
constraints.append(BinConstraintD(dims1[i], dims2[i], op_eq))
return constraints
@register_inference_rule(torch.reshape)
def reshape_inference_rule(n: Node, symbols, constraints, counter):
assert isinstance(n.args[0], Node)
# generate the new variable
my_reshape, counter = gen_tvar(counter)
symbols[n] = my_reshape
src_var = symbols[n.args[0]]
t2 = n.args[1]
t2_type = TensorType([Dyn if elem == -1 else elem for elem in t2]) # type: ignore[union-attr]
c1 = BinConstraintT(my_reshape, t2_type, op_eq) # type: ignore[union-attr]
c2 = CanReshape(src_var, t2_type)
return [c1, c2], counter
@register_inference_rule(BatchNorm2d)
def batchnorm_inference_rule(n: Node, module_instance, symbols, constraints, counter):
assert isinstance(n.args[0], Node)
# generate the new variable
batchnorm_output, counter = gen_tvar(counter)
symbols[n] = batchnorm_output
batchnorm_input = symbols[n.args[0]]
# dim vars
d1, counter = gen_dvar(counter)
d2, counter = gen_dvar(counter)
d3, counter = gen_dvar(counter)
d4, counter = gen_dvar(counter)
nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
c1 = BinConstraintT(batchnorm_input, TensorType([d1, d2, d3, d4]), op_matching)
c2 = BinConstraintT(batchnorm_input, batchnorm_output, op_eq)
return [c1, c2, *nat_constraints], counter
@register_inference_rule(torch.nn.AdaptiveAvgPool2d)
def adaptive_inference_rule(n: Node, module_instance, symbols, constraints, counter):
assert isinstance(n.args[0], Node)
avg_pool, counter = gen_tvar(counter)
symbols[n] = avg_pool
input_var = symbols[n.args[0]]
# dim vars
d1, counter = gen_dvar(counter)
d2, counter = gen_dvar(counter)
d3, counter = gen_dvar(counter)
d4, counter = gen_dvar(counter)
nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching)
c2 = BinConstraintT(avg_pool, TensorType([d1, d2, module_instance.output_size[0], module_instance.output_size[1]]), op_eq)
return [c1, c2, *nat_constraints], counter
@register_inference_rule(Conv2d)
def conv2d_inference_rule(n: Node, module_instance, symbols, constraints, counter):
assert isinstance(n.args[0], Node)
my_conv, counter = gen_tvar(counter)
symbols[n] = my_conv
input_var = symbols[n.args[0]]
# dim vars
[d1, d2, d3, d4], counter = gen_tensor_dims(MAX_TENSOR_RANK, counter)
# c1 = Matching(input_var, TensorType([d1, d2, d3, d4]))
c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching)
# c2 = DConsistency(module_instance.in_channels, d2)
c2 = BinConstraintD(module_instance.in_channels, d2, op_consistency)
c3 = CalcConv(my_conv, input_var,
module_instance.out_channels,
module_instance.kernel_size,
module_instance.padding,
module_instance.stride,
module_instance.dilation, [d1, d2, d3, d4])
nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
return [c1, c2, c3, *nat_constraints], counter
@register_inference_rule(torch.nn.MaxPool2d)
def maxpool_inference_rule(n: Node, module_instance, symbols, constraints, counter):
assert isinstance(n.args[0], Node)
maxpool, counter = gen_tvar(counter)
symbols[n] = maxpool
input_var = symbols[n.args[0]]
# dim vars
[d1, d2, d3, d4], counter = gen_tensor_dims(MAX_TENSOR_RANK, counter)
c1 = BinConstraintT(input_var, TensorType([d1, d2, d3, d4]), op_matching)
c2 = CalcMaxPool(maxpool, input_var, module_instance.kernel_size, module_instance.padding,
module_instance.stride, module_instance.dilation, [d1, d2, d3, d4])
nat_constraints = gen_nat_constraints([d1, d2, d3, d4])
return [c1, c2, *nat_constraints], counter
class ConstraintGenerator:
def __init__(self, traced, graph=None):
self.traced = traced # traced or tracer.root
self.traced_params = dict(self.traced.named_parameters())
self.constraints = []
self.symbol_dict = {}
self.graph = traced.graph if hasattr(traced, 'graph') else graph
def generate_constraints(self, counter=0):
"""
Iterate through every node and generate constraints
Effect: self.constraints will be populated with the final constraints
"""
graph = self.graph
all_constraints = []
# Annotate with Dyn if no type exists
for n in graph.nodes:
if n.type is None:
n.type = Dyn
for n in graph.nodes:
(constraints, counter) = self.generate_constraints_node(n, counter)
all_constraints += constraints
return Conj(all_constraints), counter
def generate_constraints_node(self, n: Node, counter):
"""
Generate constraints the given node:
Currently supported operations:
- Reshape
- Add
- conv2d
"""
if n.op == 'placeholder':
x, counter = gen_tvar(counter)
self.symbol_dict[n] = x
if n.type != Dyn and (not isinstance(n.type, TensorType)):
if n.type == torch.nn.parameter.Parameter:
# since we have a parameter, the shape must be static
assert 'example_value' in n.meta
n.type = TensorType(n.meta['example_value'].size())
else:
n.type = Dyn
c1 = BinConstraintT(n.type, x, op_precision)
c2 = BinConstraintT(x, MAX_TENSOR_RANK, op_leq)
return [c1, c2], counter
elif n.op == 'call_function':
if n.target in _INFERENCE_RULES:
return _INFERENCE_RULES[n.target](n, self.symbol_dict, self.constraints, counter)
else:
raise RuntimeError(f'No inference rule registered for target {n.target}!')
elif n.op == 'call_module':
module_instance = self.traced.get_submodule(n.target)
if type(module_instance) in _INFERENCE_RULES:
return _INFERENCE_RULES[type(module_instance)](n,
module_instance,
self.symbol_dict,
self.constraints, counter)
else:
raise RuntimeError(f'No inference rule registered for class {type(module_instance)}!')
elif n.op == 'call_method':
if n.target in _INFERENCE_RULES:
return _INFERENCE_RULES[n.target](n, self.symbol_dict, self.constraints, counter)
else:
raise RuntimeError(f'No inference rule registered for target {n.target}!')
elif n.op == 'get_attr':
t = self.traced_params.get(n.target, None)
if isinstance(t, torch.Tensor):
if len(t.shape) > 0:
res = []
for t in t.shape:
res.append(t)
attr_type = TensorType(res)
output, counter = gen_tvar(counter)
self.symbol_dict[n] = output
return [BinConstraintT(output, attr_type, op_eq)], counter
else:
# scalar?
return [], counter
else:
return [], counter
elif n.op == 'output':
return [], counter
else:
raise NotImplementedError(f"Method {n.op} not yet implemented")
| pytorch-master | torch/fx/experimental/migrate_gradual_types/constraint_generator.py |
from typing import Any, Callable, Tuple, Dict, Optional
import logging
import torch
import torch.fx
from torch.fx.node import map_arg
from torch.fx._compatibility import compatibility
from .shape_prop import ShapeProp
from .split_utils import split_by_tags
from .tools_common import (
Tensors,
TensorOrTensors,
NodeList,
NodeSet,
CALLABLE_NODE_OPS,
FxNetAccFusionsFinder,
Names
)
from dataclasses import dataclass
__all__ = ['FxNetMinimizerBadModuleError', 'FxNetMinimizerRunFuncError', 'FxNetMinimizerResultMismatchError']
_LOGGER = logging.getLogger(__name__)
@compatibility(is_backward_compatible=False)
class FxNetMinimizerBadModuleError(Exception):
"""
Raised if failed to split out a minimize module
"""
pass
@compatibility(is_backward_compatible=False)
class FxNetMinimizerRunFuncError(Exception):
"""
Raised if error occurs during run_a or run_b functions
"""
pass
@compatibility(is_backward_compatible=False)
class FxNetMinimizerResultMismatchError(Exception):
"""
Raised if comparing function thinks the results are mismatching.
"""
pass
@dataclass
class _MinimizerSettingBase:
"""
Args:
`accumulate_error`: Instead of using a's input for both converted module to verify
, use the previous outputs of each converted module as input to accumulate the
errors.
`traverse_method`: "sequential" or "binary" or "accumulate"
Determine the way of traverse the nodes in FX module.
`find_all`: Minimizer will go through the entire model and return all problematic nodes.
`return_intermediate`: If true, when using `run_nodes()` function to run the
model, intermediate results of all the ops will be returned as output.
"""
accumulate_error: bool = False
traverse_method: str = "sequential"
find_all: bool = False
return_intermediate: bool = False
def __str__(self):
settings_str = "FX Minimizer Settings:\n"
for k, v in vars(self).items():
settings_str += f"\t{k}: {v}\n"
return settings_str
class _MinimizerBase:
"""
This class is used to automatically find problematic nodes in a model. It takes a FX
graphmodule and generate some submodules while traverse the graph. Then two functions
`run_a` and `run_b` will be used to run the same submodule and a function `compare_fn`
will be used to compare the results.
Currently we provides two ways to traverse the graph and generate submodules.
1. Sequential traversal: this will traverse the graph node by node and generate
one submodule with one sigle node.
2. Binary searching: this will do a binary search style traversal on the graph.
For internal Users, a guide can be found here https://fb.quip.com/HDtuAgiKGfkP.
"""
def __init__(
self,
module: torch.fx.GraphModule,
sample_input: Tensors,
compare_fn: Callable[[TensorOrTensors, TensorOrTensors, Names], Tuple[float, bool]],
settings: _MinimizerSettingBase,
):
assert isinstance(module, torch.fx.GraphModule)
self.module = module
self.sample_input = sample_input
self.compare_fn = compare_fn
self.settings = settings
# Stores outputs of run_a function
self.a_outputs: Dict[str, Any] = {}
# Stores outputs of run_b function
self.b_outputs: Dict[str, Any] = {}
# Stores the results of compare_fn
self.results: Dict[Any, Any] = {}
callable_nodes = {
node for node in self.module.graph.nodes if node.op in CALLABLE_NODE_OPS
}
ShapeProp(self.module).propagate(*self.sample_input)
self.fusions = FxNetAccFusionsFinder(self.module, callable_nodes)()
# Check if number of input in sample_input matches the number of placeholders
placeholders = [
node.name for node in self.module.graph.nodes if node.op == "placeholder"
]
assert len(placeholders) == len(self.sample_input)
# Store sample_input
for i, name in enumerate(placeholders):
self.a_outputs[name] = sample_input[i]
self.b_outputs[name] = sample_input[i]
def run_a(self, mod: torch.fx.GraphModule, inputs: Tensors) -> TensorOrTensors:
"""
Run `mod` with `inputs` and generate output. The output will be compared with
output of run_b().
"""
raise RuntimeError("run_a() is not implemented.")
def run_b(self, mod: torch.fx.GraphModule, inputs: Tensors) -> TensorOrTensors:
"""
Run `mod` with `inputs` and generate output. The output will be compared with
output of run_a().
"""
raise RuntimeError("run_b() is not implemented.")
def _store_outputs(
self,
a_result: TensorOrTensors,
b_result: TensorOrTensors,
submodule: torch.fx.GraphModule,
):
"""
Store the outputs of self.run_a() and self.run_b() into self.a_outputs and
self.b_outputs, so that we can use them when execute preceding nodes that
use those outputs as inputs.
Args:
a_result: Output of self.run_a(). Could be a tensor or tensors.
b_result: Output of self.run_b(). Could be a tensor or tensors.
submodule: The module that generates a_result and b_result.
"""
output_node = next(
node for node in submodule.graph.nodes if node.op == "output"
)
# Only one output
if isinstance(output_node.args[0], torch.fx.Node):
self.a_outputs[output_node.args[0].name] = a_result
self.b_outputs[output_node.args[0].name] = b_result
# Multiple outputs
else:
for i, arg in enumerate(output_node.args[0]):
self.a_outputs[arg.name] = a_result[i]
self.b_outputs[arg.name] = b_result[i]
def _get_submod_inputs(
self, main_module: torch.fx.GraphModule, submod_path: str
) -> Tuple[Tensors, Tensors]:
"""
Try get submodule inputs from stored outputs. If not found then use
torch_glow.get_submod_inputs to get the inputs.
If accumulate_error is False, use a_input for run_a() and run_b()
otherwise use a_input for run_a and b_input for run_b.
Args:
main_module: Top-levlel fx module.
submod_path: Path to the submodule we want to run and compare results.
Returns:
a_input: List of tensor(s) that will be used by run_a() as submodule inputs.
b_input: List of tensor(s) that will be used by run_b() as submodule inputs.
"""
a_input = []
b_input = []
submodule = getattr(main_module, submod_path)
placeholders = [
node.name for node in submodule.graph.nodes if node.op == "placeholder"
]
# If all placeholder can be found in stored outputs, use stored
# outputs as inputs. Otherwise, use `torch_glow.get_submod_inputs`
# to get the inputs.
if set(placeholders) <= self.a_outputs.keys():
for name in placeholders:
a_input.append(self.a_outputs[name])
b_input.append(self.b_outputs[name])
else:
if self.settings.accumulate_error:
print(f"Can't find previous stored outputs named {placeholders}!")
def get_inputs(self: torch.nn.Module, inputs: Any):
nonlocal a_input
a_input = inputs
# Use forward hook to get the inputs to the submodule
handle = submodule.register_forward_pre_hook(get_inputs)
main_module(*self.sample_input)
handle.remove()
b_input = a_input
if not self.settings.accumulate_error:
return a_input, a_input
return a_input, b_input
def _tag_nodes(self, selected_nodes: NodeSet):
"""
Tag selected nodes with tag "minimize". Nodes with the same tags will
be split to the same submodule afterwards.
Args:
selected_nodes: Nodes that we want to minimize. We will tag those nodes
with "minimize", all preceding nodes with "main_0" and all following
nodes with "main_1".
"""
for node in self.module.graph.nodes:
if node.op not in CALLABLE_NODE_OPS:
continue
if node in selected_nodes:
node.tag = "minimize"
elif any(
n.tag in {"minimize", "main_1"}
for n in node.all_input_nodes
if n.op in CALLABLE_NODE_OPS
):
node.tag = "main_1"
else:
node.tag = "main_0"
def _build_submodule(self, nodes: NodeSet) -> Tuple[torch.fx.GraphModule, str]:
"""
Split self.module so that one submodule consists of `nodes` and only `nodes`.
Args:
nodes: Nodes that we want to include in the minimize submodule.
Returns:
split_module (torch.fx.GraphModule): the module after split.
submodule_name (str): the name of the submodule that consists of `nodes`.
"""
# Color provided nodes
self._tag_nodes(nodes)
# Split module based on coloring
split_module = split_by_tags(self.module, ["main_0", "minimize", "main_1"])
# Find submodule containing colored nodes
submodule_name: str = ""
for child_name, _ in split_module.named_children():
# Skip submodules we're not interested in at the moment
if "minimize" not in child_name:
continue
if submodule_name == "":
submodule_name = child_name
else:
raise FxNetMinimizerBadModuleError(
f"Expected only one minimize submodule with nodes {nodes}"
)
if submodule_name == "":
raise FxNetMinimizerBadModuleError(
f"Minimize submodule was not found with nodes {nodes}"
)
return split_module, submodule_name
def _run_and_compare(
self,
split_module: torch.fx.GraphModule,
submod_name: str,
output_names: Names
):
"""
Run the submodule in `split_module` that has name `submod_name`
using `self.run_a` and `self.run_b` and compare their results.
Args:
split_module: Main module that contains the minimize submodule.
submod_name: Name of the minimize submodule.
output_names: Names of the node we want to output. If None, we
will use the original output.
"""
submodule = getattr(split_module, submod_name)
a_input, b_input = self._get_submod_inputs(split_module, submod_name)
if output_names:
output_nodes: NodeList = []
for node in submodule.graph.nodes:
if node.op == "output":
submodule.graph.erase_node(node)
if node.name in output_names:
output_nodes.append(node)
submodule.graph.output(
output_nodes[0] if len(output_nodes) == 1 else tuple(output_nodes)
)
submodule.graph.lint()
submodule.recompile()
# Use name of args in output node as key to store comparison result
for node in submodule.graph.nodes:
if node.op == "output":
result_key = map_arg(node.args, lambda x: x.name)
a_result = self.run_a(submodule, a_input)
b_result = self.run_b(submodule, b_input)
self._store_outputs(a_result, b_result, submodule)
# Compare results
names: Names = output_names
if output_names is None:
names = [str(v) for v in result_key]
numeric_result, bool_result = self.compare_fn(a_result, b_result, names)
self.results[result_key] = numeric_result
if not bool_result:
raise FxNetMinimizerResultMismatchError(f"Result mismatch for {result_key}")
def _binary_search_impl(self, nodes: NodeList) -> NodeSet:
"""
Recursive binary search implementation.
"""
cur_nodes: NodeSet = set(nodes)
for node in nodes:
if node in self.fusions:
cur_nodes.update(self.fusions[node])
try:
split_module, submod_name = self._build_submodule(cur_nodes)
self._run_and_compare(
split_module,
submod_name,
[]
)
except (FxNetMinimizerRunFuncError, FxNetMinimizerResultMismatchError):
if len(nodes) == 1:
return cur_nodes
mid = len(nodes) // 2
culprits = self._binary_search_impl(nodes[:mid])
if not self.settings.find_all:
return culprits
culprits.update(self._binary_search_impl(nodes[mid:]))
if len(culprits) == 0:
raise FxNetMinimizerBadModuleError(
"Found an error in a group of nodes, but was not able to minimize",
nodes,
)
return culprits
else:
return set()
def _binary_traverse(self, nodes: NodeList) -> NodeSet:
"""
Binary search on `nodes` for culprit.
"""
return self._binary_search_impl(nodes)
def _sequential_traverse(self, nodes: NodeList) -> NodeSet:
"""
Traverse `nodes` one by one and determine if any of them is a culprit.
"""
culprits: NodeSet = set()
for node in nodes:
_LOGGER.info(f"Visit node: {node.name}")
cur_nodes: NodeSet = {node}
if node in self.fusions:
cur_nodes = self.fusions[node]
try:
split_module, submod_name = self._build_submodule(cur_nodes)
self._run_and_compare(
split_module, submod_name, [node.name]
)
except (FxNetMinimizerResultMismatchError):
culprits.add(node)
if not self.settings.find_all:
return culprits
except (FxNetMinimizerRunFuncError):
culprits.update(cur_nodes)
if not self.settings.find_all:
return culprits
return culprits
def _accumulate_traverse(self, nodes: NodeList) -> NodeSet:
culprits: NodeSet = set()
nodes_to_run: NodeSet = set()
# find_all is not supported for accumulate traversal because all the
# ops run on NNPI. So we return after the first op that raises error.
if self.settings.find_all:
print("'Find All' mode is not supported in accumulate traversal.")
return culprits
for node in nodes:
nodes_to_run.add(node)
node_name = node.name
if node_name is not None and isinstance(node_name, tuple):
node_name = node_name[0]
assert node_name is not None and isinstance(node_name, str), f"minimize: node_name: {node_name}"
try:
split_module, submod_name = self._build_submodule(nodes_to_run)
self._run_and_compare(split_module, submod_name, [node_name])
except (FxNetMinimizerResultMismatchError,
FxNetMinimizerRunFuncError):
culprits.add(node)
return culprits
return culprits
def _collect_nodes(self, start: Optional[str], end: Optional[str]) -> NodeList:
"""
Collect nodes in the model that between nodes with name of `start` and `end`.
These two nodes are also included.
"""
nodes: NodeList = []
add_node = start is None
for node in self.module.graph.nodes:
if node.op not in CALLABLE_NODE_OPS:
continue
if node.name == start:
add_node = True
if add_node:
nodes.append(node)
if node.name == end:
break
return nodes
def run_nodes(self, start: Optional[str] = None, end: Optional[str] = None):
"""
Run part of the model from `start` node to `end` node. If `start` is None
then we start from the beginning of the model. If `end` is None then we
stop at the end of the model.
Args:
start: The name of the node which is the first node of the submodule
we want to run. If set to None, then we'll start with the first
node of the model.
end: The name of the node which is the last node of the submodule we
want to run. If set to None, we'll end with the last node of the
model.
"""
nodes = self._collect_nodes(start, end)
cur_nodes = set(nodes)
for node in nodes:
if node in self.fusions:
cur_nodes.update(self.fusions[node])
output_names = []
if self.settings.return_intermediate:
output_names = [node.name for node in nodes]
try:
split_module, submod_name = self._build_submodule(cur_nodes)
self._run_and_compare(split_module, submod_name, output_names)
except (
FxNetMinimizerRunFuncError,
FxNetMinimizerResultMismatchError,
) as e:
print(e)
def minimize(self, start: Optional[str] = None, end: Optional[str] = None) -> NodeSet:
"""
Minimizing the model from node with name `start` to node with name `end` base
on self.settings. Find culprits that causes FxNetMinimizerRunFuncError or
FxNetMinimizerResultMismatchError errors.
Args:
start: The name of the node where we want to start minimizing. If set
to None, then we'll start with the first node of the model.
end: The name of the node where we want to terminate minimizing. If
set to None, we'll end with the last node of the model.
Returns:
nodes: A list of nodes that causes FxNetMinimizerRunFuncError or
FxNetMinimizerResultMismatchError errors during minimizing.
"""
print(self.settings)
print(self.module.graph)
nodes = self._collect_nodes(start, end)
if self.settings.traverse_method == "sequential":
return self._sequential_traverse(nodes)
if self.settings.traverse_method == "binary":
return self._binary_traverse(nodes)
if self.settings.traverse_method == "accumulate":
return self._accumulate_traverse(nodes)
raise RuntimeError(f"Unknow traverse method {self.settings.traverse_method}!")
| pytorch-master | torch/fx/passes/net_min_base.py |
from torch.fx.graph_module import GraphModule
from typing import Any, Callable, Dict, List, Tuple, Type
import torch
import torch.nn as nn
from torch.fx._compatibility import compatibility
__all__ = ['default_matching', 'extract_attrs_for_lowering', 'lift_lowering_attrs_to_nodes']
# Matching method matches the attribute name of current version to the attribute name of `target_version`
@compatibility(is_backward_compatible=False)
def default_matching(name: str, target_version: int) -> str:
"""Default matching method
"""
return name
# This dict maps the nn.Module class name to the attribute name list that we want to fetch for lowering.
# The first integer in the tuple is the version number of the nn.Module class when we create the parameter list.
# If there's a version mismatch then it means the parameter names in the book might be mismatched with nn.Module.
module_fetch_book: Dict[Type, Tuple[int, List[str], Callable[[str, int], str]]] = {
torch.nn.modules.linear.Linear: (1, ["weight", "bias"], default_matching),
torch.nn.modules.conv.Conv2d: (
1, ["weight", "bias", "kernel_size", "stride", "padding", "dilation", "groups", "padding_mode"], default_matching
),
torch.nn.modules.batchnorm.BatchNorm2d: (2, ["weight", "bias", "running_mean", "running_var", "eps"], default_matching),
torch.nn.modules.pooling.AdaptiveAvgPool2d: (1, [], default_matching),
torch.nn.modules.pooling.MaxPool2d: (
1, ["kernel_size", "stride", "padding", "dilation", "return_indices", "ceil_mode"], default_matching
),
torch.nn.modules.activation.ReLU: (1, ["inplace"], default_matching),
}
@compatibility(is_backward_compatible=False)
def extract_attrs_for_lowering(mod: nn.Module) -> Dict[str, Any]:
"""If `mod` is in `module_fetch_book`, fetch the mod's attributes that in the `module_fetch_book`
after checking module's version is compatible with the `module_fetch_book`.
"""
attrs_for_lowering: Dict[str, Any] = {}
attrs_for_lowering["name"] = torch.typename(mod)
if type(mod) in module_fetch_book:
version, param_to_fetch, matching_method = module_fetch_book[type(mod)]
if version < mod._version:
raise RuntimeError(f"Fetcher version {version} try to fetch {torch.typename(mod)} version {mod._version}, "
"please upgrade the module_fetch_book, open an issue and @842974287 "
"or report a bug to AIACC team directly.")
for attr in param_to_fetch:
attrs_for_lowering[attr] = getattr(mod, matching_method(attr, mod._version))
else:
raise RuntimeError(f"{torch.typename(mod)} is not in the module_fetch_book yet, "
"please add it to the module_fetch_book, open an issue and @842974287 "
"or report a bug to AIACC team directly.")
return attrs_for_lowering
@compatibility(is_backward_compatible=False)
def lift_lowering_attrs_to_nodes(fx_module: GraphModule) -> None:
"""Recursively traverse all `fx_module` nodes and fetch the module's attributes if the node is a leaf module.
"""
submodules = dict(fx_module.named_modules())
for node in fx_module.graph.nodes:
if node.op == "call_module":
if isinstance(submodules[node.target], GraphModule):
lift_lowering_attrs_to_nodes(submodules[node.target])
else:
node.attrs_for_lowering = extract_attrs_for_lowering(submodules[node.target])
| pytorch-master | torch/fx/passes/param_fetch.py |
from typing import List, Tuple, Union, Dict, Any, Set, Mapping
import collections
from dataclasses import dataclass
import torch
import torch.fx
from torch.fx.node import _get_qualified_name
from torch.fx._compatibility import compatibility
__all__ = ['get_acc_ops_name', 'get_node_target', 'is_node_output_tensor', 'FxNetAccFusionsFinder', 'legalize_graph']
Tensors = Union[Tuple[torch.Tensor], List[torch.Tensor]]
TensorOrTensors = Union[torch.Tensor, Tensors]
NodeList = List[torch.fx.Node]
NodeSet = Set[torch.fx.Node]
Names = List[str]
CALLABLE_NODE_OPS = {"call_module", "call_function", "call_method"}
@compatibility(is_backward_compatible=False)
def get_acc_ops_name(k):
if isinstance(k, str):
return k
elif k.__module__ and "acc_ops" in k.__module__:
return f"acc_ops.{k.__name__}"
else:
module = k.__module__.replace('torch._ops', 'torch.ops') # WAR for bug in how torch.ops assigns module
return f"{module if module else ''}.{k.__name__}"
@compatibility(is_backward_compatible=False)
def get_node_target(submodules: Mapping[str, torch.nn.Module], node: torch.fx.Node) -> str:
"""
Given a `node` returns its target typename.
For "call_method" node, return node.target which is the name of that method being called.
This could potential lead to conflict but should be okay because normally it's on a tensor.
For "call_function" node, return typename of node.target.
For "call_module" node, return typename of the module that node.target point to.
If seeing "_VariableFunctionsClass" in the target name string, it will be replaced by
"torch". e.g. _VariableFunctionsClass.relu would become torch.relu.
"""
assert node.op in CALLABLE_NODE_OPS, (
"Expect op types of " + ", ".join(CALLABLE_NODE_OPS) + f", but found {node.op}"
)
if node.op == "call_module":
assert isinstance(node.target, str)
submod = submodules[node.target]
submod_type = getattr(submod, "_base_class_origin", type(submod))
return get_acc_ops_name(submod_type)
elif node.op == "call_function":
target: Any = node.target
return (
f"acc_ops.{target.__name__}"
if target.__module__ is not None and "acc_ops" in target.__module__
else _get_qualified_name(target)
)
else:
assert isinstance(node.target, str)
return node.target
@compatibility(is_backward_compatible=False)
def is_node_output_tensor(node: torch.fx.Node) -> bool:
"""Checks if the node output produces a Tensor or not.
NOTE: This requires to run `ShapeProp` on the containing fx graph before
calling this function. This is because it works by checking the `type`
metadata on the node. This metadata is produced by the `ShapeProp`.
"""
type_ = node.meta.get("type", None)
return type_ is not None and issubclass(type_, torch.Tensor)
@compatibility(is_backward_compatible=False)
class FxNetAccFusionsFinder:
"""
Finds groups of connected ACC nodes that pass non-tensor data between each other.
Such groups are called fusion groups.
"""
def __init__(self, module: torch.fx.GraphModule, acc_nodes: NodeSet):
self.module = module
self.nodes = list(module.graph.nodes)
self.acc_nodes = acc_nodes
@dataclass
class FusionGroup:
# The smallest idx of nodes in the fusion group after topological sorting all the nodes in the model.
top_node_idx: int
# Nodes in this fusion group.
nodes: NodeSet
# Inputs to this fusion group.
inputs: NodeSet
# Nodes that in the fusion group that haven't been processed yet.
nodes_need_process: NodeSet
def add_node(self, node):
"""
Add a node to fusion group.
"""
if node in self.nodes:
return
self.nodes_need_process.add(node)
self.nodes.add(node)
self.inputs.discard(node)
self.inputs.update(
{
n
for n in node.all_input_nodes
if n.op in CALLABLE_NODE_OPS and n not in self.nodes
}
)
def recursive_add_node(
self,
fusion_group: "FxNetAccFusionsFinder.FusionGroup",
inputs: Union[NodeSet, NodeList],
):
"""
Start from inputs and going reverse topological order. If any upstream node
is in the fusion group, add all the nodes in this path to fusion group.
"""
for arg in inputs:
# Skip placeholder and get_attr because they won't be in the fusion group.
if arg.op not in CALLABLE_NODE_OPS:
continue
# If the node has smaller idx, it's already an upstream node of the fusion
# group. We don't need to check it anymore.
if self.nodes.index(arg) < fusion_group.top_node_idx:
continue
# If the node is in the fusion group, return True.
if arg in fusion_group.nodes:
return True
# Check the upstream nodes of the node, if any of them is in the fusion group
# we'll add this node to fusion group and return True.
if self.recursive_add_node(fusion_group, arg.all_input_nodes):
fusion_group.add_node(arg)
return True
return False
def __call__(self) -> Dict[torch.fx.Node, NodeSet]:
result: Dict[torch.fx.Node, NodeSet] = {}
acc_nodes = list(self.acc_nodes)
for node in acc_nodes:
if node in result:
continue
if node.op not in CALLABLE_NODE_OPS:
continue
if "tensor_meta" in node.meta:
continue
if node not in self.acc_nodes:
continue
fusion_group: "FxNetAccFusionsFinder.FusionGroup" = self.FusionGroup(
top_node_idx=self.nodes.index(node),
nodes={node},
inputs=set(node.all_input_nodes),
nodes_need_process={node},
)
while fusion_group.nodes_need_process:
node = fusion_group.nodes_need_process.pop()
self.recursive_add_node(fusion_group, fusion_group.inputs)
# Optionally add downstream nodes
if "tensor_meta" not in node.meta:
for user in node.users:
if user.op not in CALLABLE_NODE_OPS:
continue
if user in fusion_group.nodes:
continue
fusion_group.add_node(user)
self.recursive_add_node(fusion_group, fusion_group.inputs)
# Add some upstream nodes
for arg in node.all_input_nodes:
if arg.op not in CALLABLE_NODE_OPS:
continue
if "tensor_meta" in arg.meta:
continue
if arg in fusion_group.nodes:
continue
fusion_group.add_node(arg)
fusion_group.top_node_idx = min(
fusion_group.top_node_idx, self.nodes.index(arg)
)
self.recursive_add_node(fusion_group, fusion_group.inputs)
if not (set(fusion_group.nodes) <= self.acc_nodes):
self.acc_nodes -= fusion_group.nodes
else:
for n in fusion_group.nodes:
result[n] = fusion_group.nodes
return result
@compatibility(is_backward_compatible=False)
def legalize_graph(gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
"""
Replace the graph of the given GraphModule with one that contains the same nodes as the
original, but in topologically sorted order.
This is used by the merge_matmul transformation below, which disturbs the topologically sorted
order of its input GraphModule, so that this order is restored before further transformation.
Arguments:
gm: The graph module to topologically sort. It is modified in-place.
Returns:
The graph module in-place sorted
"""
indeg = {node: 0 for node in gm.graph.nodes}
new_graph = torch.fx.Graph()
# Track how many unfulfilled dependencies each node has
for node in gm.graph.nodes:
for user in node.users:
indeg[user] += 1
queue: collections.deque = collections.deque()
# Add all nodes with no dependencies to the queue
for node in gm.graph.nodes:
if indeg[node] == 0:
queue.append(node)
env: Dict[torch.fx.Node, torch.fx.Node] = {}
# Pop nodes from the queue, and add nodes that have had all their
# dependencies fulfilled
while len(queue) > 0:
cur = queue.popleft()
env[cur] = new_graph.node_copy(cur, lambda x: env[x])
for user in cur.users:
indeg[user] -= 1
if indeg[user] == 0:
queue.append(user)
# If the new graph's size is not as large as the old one, then there must be
# a cycle (i.e. some node's dependencies were not satisfied.)
if len(new_graph.nodes) < len(gm.graph.nodes):
raise RuntimeError(f"Input graph has cycles, unable to add {[node for node in indeg if indeg[node] != 0]}")
gm.graph = new_graph
return gm
| pytorch-master | torch/fx/passes/tools_common.py |
from dataclasses import dataclass, field
from typing import List, Optional, Dict
import torch.fx
from torch.fx.graph import map_arg
from .tools_common import NodeList, NodeSet
from torch.fx._compatibility import compatibility
from torch.fx.passes.utils import lift_subgraph_as_module, HolderModule
__all__ = ['getattr_recursive', 'setattr_recursive', 'Component', 'split_by_tags']
@compatibility(is_backward_compatible=False)
def getattr_recursive(obj, name):
for layer in name.split("."):
if hasattr(obj, layer):
obj = getattr(obj, layer)
else:
return None
return obj
@compatibility(is_backward_compatible=False)
def setattr_recursive(obj, attr, value):
if "." not in attr:
setattr(obj, attr, value)
else:
layer = attr.split(".")
setattr_recursive(getattr(obj, layer[0]), ".".join(layer[1:]), value)
@compatibility(is_backward_compatible=False)
@dataclass
class Component:
"""
A component serves as a container for a subgraph we want to create afterwards.
"""
graph: torch.fx.Graph
order: int
name: str
# Stores the placeholder nodes in `graph`.
input_placeholders: List = field(default_factory=list)
# Store the nodes in original graph that are placeholder in `graph`.
orig_inputs: List = field(default_factory=list)
# Store the nodes in original graph that are outputs in `graph`.
orig_outputs: List = field(default_factory=list)
# Mapping from get_attr node in original graph to get_attr node in `graph`.
getattr_maps: Dict[torch.fx.Node, torch.fx.Node] = field(default_factory=dict)
constructor_args: List[str] = field(default_factory=list)
gm: Optional[torch.fx.GraphModule] = None
@compatibility(is_backward_compatible=False)
def split_by_tags(gm: torch.fx.GraphModule, tags: List[str]) -> torch.fx.GraphModule:
"""
Splits a GraphModule using tags on its graph nodes. We honor the order of
tags. For example, we have tags = ["a", "b", "c"], the function will create
the initial submodules in the order of "a_0", "b_1", "c_2".
To set a tag:
gm.graph.nodes[idx].tag = "mytag"
This will result in all nodes with the same tag being extracted and placed in their
own submodule. For placeholder, output and get_attr node, the tag is ignored. placeholder
and output nodes are created when needed while get_attr nodes get copied to submodules
where they are used.
Given the following module def:
class SimpleModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(...)
self.linear2 = torch.nn.Linear(...)
self.linear3 = torch.nn.Linear(...)
def forward(self, in1, in2):
r1 = self.linear1(in1)
r2 = self.linear2(in2)
r3 = torch.cat([r1, r2])
return self.linear3(r3)
Marking the node corresponding to in1 with the tag sc.REQUEST_ONLY.lower() results in the following split:
ro_0:
def forward(self, in1):
self = self.root
linear1 = self.linear1(in1)
return linear1
main_1:
def forward(self, in2, linear1):
self = self.root
linear2 = self.linear2(in2)
cat_1 = torch.cat([linear1, linear2])
linear3 = self.linear3(cat_1)
return linear3
main_0:
def forward(self, in1, in2):
self = self.root
ro_0 = self.ro_0(in1)
main_1 = self.main_1(in2, ro_0)
return main_1
"""
def flatten(x: torch.fx.node.Argument) -> NodeList:
"""
Stores nodes in x to a list and returns the list.
"""
r: NodeList = []
map_arg(x, r.append)
return r
# Mapping from node in original module to node in created submodule.
node_remapping: Dict[torch.fx.Node, torch.fx.Node] = {}
# Mapping from node in original module or created submodules to
# corresponding component.
node_to_component: Dict[torch.fx.Node, Component] = {}
# Mapping from tag to the corresponding component.
tag_to_component: Dict[str, Component] = {}
# Stores all components.
all_components: List[Component] = []
# Stores nodes that will be used in main graph.
used_in_main: NodeSet = set()
# Main graph after split.
main_g = torch.fx.Graph()
# Mapping from node in original module to node in main graph after split.
main_remapping: Dict[torch.fx.Node, torch.fx.Node] = {}
# Output node of original module.
output_node: Optional[torch.fx.Node] = None
# Create a component for each tag, we don't expect to create other components afterwards.
for tag in tags:
comp = Component(torch.fx.Graph(), len(all_components), f"{tag}")
all_components.append(comp)
tag_to_component[tag] = comp
# Traverse the nodes in original graph and take care of them.
for node in gm.graph.nodes:
if node.op == "output":
if output_node is not None:
raise RuntimeError("Multiple output nodes in graph!")
output_node = node
continue
# Placeholders in the original graph get copied to main graph.
if node.op == "placeholder":
main_remapping[node] = main_g.placeholder(node.name, type_expr=node.type)
continue
# Get_attr nodes are ignored because we are not tagging them.
# Instead, we copy them directly to the submodules use them afterwards.
if node.op == "get_attr":
continue
# Now we process callable nodes which are nodes with op of call_module,
# call_function or call_method. Every callable nodes should be tagged.
assert hasattr(node, "tag")
upstream_components = [
node_to_component[x]
for x in flatten(node.args) + flatten(node.kwargs)
if x.op not in {"placeholder", "get_attr"}
]
comp = tag_to_component[node.tag]
node_to_component[node] = comp
# Max order of upperstream components.
mx = max((c.order for c in upstream_components), default=0)
# Expect the componet for `node` has higher order then its upstream components.
assert comp.order >= mx
# Map a input of `node` to nodes in the component's graph.
def remap_func(x):
# If input is a get_attr node, copy it to current component's graph.
# Returns the get_attr node in current component's graph.
if x.op == "get_attr":
if x not in comp.getattr_maps:
comp.getattr_maps[x] = comp.graph.get_attr(
x.target, type_expr=x.type
)
return comp.getattr_maps[x]
# If input is not a placeholder, it should have been put into a component
# already. If it's the current component then we return the corresponding
# node in the component.
if x.op != "placeholder" and node_to_component[x] == comp:
return node_remapping[x]
# If input is a placeholder or it's in other components, we want to make it
# as a placeholder in current component's graph.
if x not in comp.orig_inputs:
comp.orig_inputs.append(x)
comp.input_placeholders.append(
comp.graph.placeholder(x.name, type_expr=x.type)
)
used_in_main.add(x)
return comp.input_placeholders[
next(i for i, y in enumerate(comp.orig_inputs) if x is y)
]
n = comp.graph.node_copy(node, remap_func)
n.tag = node.tag # type: ignore[attr-defined]
node_remapping[node] = n
node_to_component[n] = comp
if output_node is None:
raise RuntimeError("Graph had no output node!")
for x in flatten(output_node.args[0]):
if x.op == "get_attr":
# We don't need components mapping for nodes of type "get_attr"
# that are consumed by the output. Only need to make sure we create
# corresponding counterparts in the resulting graph.
main_remapping[x] = main_g.get_attr(x.name, type_expr=x.type)
else:
# All component results consumed by the output node should be
# marked as "used in main".
used_in_main.add(x)
# If a node is used in main graph then we mark it as an output in the component
# it belongs to.
for n in used_in_main:
if n.op != "placeholder":
node_to_component[n].orig_outputs.append(n)
# Now we create a graphmodule for each component.
for comp in all_components:
outs = tuple(map(node_remapping.__getitem__, comp.orig_outputs))
# Take care of the args of FX output node. If there's a single
# output then the output node args is like (output_single), else
# if there're multiple outputs then the output node args is like
# ((output_0, output_1, ...)).
comp.graph.output(outs[0] if len(outs) == 1 else outs)
comp.gm = lift_subgraph_as_module(gm, comp.graph)
# Create a call_module node in main graph.
main_node = main_g.call_module(
comp.name,
args=tuple(map(main_remapping.__getitem__, comp.orig_inputs)),
kwargs=None,
)
if len(outs) == 1:
main_remapping[comp.orig_outputs[0]] = main_node
else:
for i, o in enumerate(comp.orig_outputs):
# Use Proxy to record getitem access.
main_remapping[o] = torch.fx.Proxy(main_node)[i].node # type: ignore[index]
main_g.output(map_arg(output_node.args[0], main_remapping.__getitem__))
main_root = HolderModule({comp.name: comp.gm for comp in all_components})
# If the output nodes consumes get_attr directly in the original graph,
# then we need to make sure get_attr is copied to the new graph.
for x in flatten(output_node.args[0]):
if x.op == "get_attr":
setattr(main_root, x.name, getattr_recursive(gm, x.target)) # type: ignore[arg-type]
return torch.fx.GraphModule(main_root, main_g)
| pytorch-master | torch/fx/passes/split_utils.py |
import torch
import torch.fx
import traceback
from torch.fx.node import Node, map_aggregate
from typing import Any, Tuple, NamedTuple, Optional, Dict
from torch.fx._compatibility import compatibility
__all__ = ['TensorMetadata', 'ShapeProp']
@compatibility(is_backward_compatible=True)
class TensorMetadata(NamedTuple):
# TensorMetadata is a structure containing pertinent information
# about a tensor within a PyTorch program.
# General Tensor metadata
shape : torch.Size
dtype : torch.dtype
requires_grad : bool
stride : Tuple[int]
memory_format : Optional[torch.memory_format]
# Quantization metadata
is_quantized : bool
qparams: Dict[str, Any]
def _extract_tensor_metadata(result : torch.Tensor) -> TensorMetadata:
"""
Extract a TensorMetadata NamedTuple describing `result`.
"""
shape = result.shape
dtype = result.dtype
requires_grad = result.requires_grad
stride = result.stride()
memory_formats = {
torch.contiguous_format,
torch.channels_last,
torch.channels_last_3d,
}
memory_format = None
for query_format in memory_formats:
if result.is_contiguous(memory_format=query_format):
memory_format = query_format
break
is_quantized = result.is_quantized
qparams: Dict[str, Any] = {}
if is_quantized:
qscheme = result.qscheme()
qparams["qscheme"] = qscheme
if qscheme in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
qparams["scale"] = result.q_scale() # type: ignore[assignment]
qparams["zero_point"] = result.q_zero_point() # type: ignore[assignment]
elif qscheme in {torch.per_channel_affine, torch.per_channel_affine_float_qparams, torch.per_channel_symmetric}:
# In this branch, scale and zero_point are expected to be tensors,
# we store the values as immutable_list in TensorMetadata for
# easier serialization downstream
qparams["scale"] = result.q_per_channel_scales().tolist() # type: ignore[assignment]
qparams["zero_point"] = result.q_per_channel_zero_points().tolist() # type: ignore[assignment]
qparams["axis"] = result.q_per_channel_axis() # type: ignore[assignment]
return TensorMetadata(
shape, dtype, requires_grad, stride, memory_format, is_quantized, qparams)
@compatibility(is_backward_compatible=True)
class ShapeProp(torch.fx.Interpreter):
"""
Execute an FX graph Node-by-Node and
record the shape and type of the result
into the corresponding node.
Example:
In this example, we record the shape
and data type of a module given
an example input ``torch.randn(50, D_in)``.
We print the name, shape and dtype of each node.
class TwoLayerNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
super(TwoLayerNet, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x):
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear2(h_relu)
return y_pred
N, D_in, H, D_out = 64, 1000, 100, 10
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
model = TwoLayerNet(D_in, H, D_out)
gm = torch.fx.symbolic_trace(model)
sample_input = torch.randn(50, D_in)
ShapeProp(gm).propagate(sample_input)
for node in gm.graph.nodes:
print(node.name, node.meta['tensor_meta'].dtype,
node.meta['tensor_meta'].shape)
The output of this code is:
x torch.float32 torch.Size([50, 1000])
linear1 torch.float32 torch.Size([50, 100])
clamp_1 torch.float32 torch.Size([50, 100])
linear2 torch.float32 torch.Size([50, 10])
output torch.float32 torch.Size([50, 10])
Args:
module (GraphModule): The module to be executed
"""
def run_node(self, n : Node) -> Any:
try:
result = super().run_node(n)
except Exception:
traceback.print_exc()
raise RuntimeError(
f"ShapeProp error for: node={n.format_node()} with "
f"meta={n.meta}"
)
found_tensor = False
def extract_tensor_meta(obj):
if isinstance(obj, torch.Tensor):
nonlocal found_tensor
found_tensor = True
return _extract_tensor_metadata(obj)
else:
return obj
meta = map_aggregate(result, extract_tensor_meta)
if found_tensor:
n.meta['tensor_meta'] = meta
n.meta['type'] = type(result)
return result
def propagate(self, *args):
"""
Run `module` via interpretation and return the result and
record the shape and type of each node.
Args:
*args (Tensor): the sample input.
Returns:
Any: The value returned from executing the Module
"""
return super().run(*args)
| pytorch-master | torch/fx/passes/shape_prop.py |
import torch
from torch.fx.graph_module import GraphModule
from typing import Callable, List, Dict, Any, Optional
from torch.fx._compatibility import compatibility
import inspect
__all__ = ['Partition', 'split_module']
@compatibility(is_backward_compatible=True)
class Partition:
def __init__(self, name: str):
self.name: str = name
self.submod_name = f'submod_{name}'
self.node_names: List[str] = []
self.inputs: Dict[str, None] = {}
self.outputs: Dict[str, None] = {}
self.partitions_dependent_on: Dict[str, None] = {}
self.partition_dependents: Dict[str, None] = {}
self.graph : torch.fx.graph.Graph = torch.fx.graph.Graph()
self.environment : Dict[torch.fx.node.Node, torch.fx.node.Node] = {}
self.targets : Dict[str, Any] = {}
def __repr__(self) -> str:
return f"name: {self.name},\n" \
f" nodes: {self.node_names},\n" \
f" inputs: {self.inputs},\n" \
f" outputs: {self.outputs},\n" \
f" partitions depenent on: {self.partitions_dependent_on},\n" \
f" parition dependents: {self.partition_dependents}"
# Creates subgraphs out of main graph
@compatibility(is_backward_compatible=True)
def split_module(
m: GraphModule,
root_m: torch.nn.Module,
split_callback: Callable[[torch.fx.node.Node], int],
qualname_map: Optional[Dict[str, str]] = None,
):
"""
Creates subgraphs out of main graph
Args:
m (GraphModule): Graph module to split
root_m (torch.nn.Module): root nn module. Not currently used. Included
because the root nn module is usually transformed via
torch.fx._symbolic_trace.symbolic_trace (see example below)
split_callback (Callable[[torch.fx.node.Node], int]): Callable function
that maps a given Node instance to a numeric partition identifier.
split_module will use this function as the policy for which operations
appear in which partitions in the output Module.
qualname_map: Optional[Dict[str, str]]: optional output parameter that returns a
mapping from new target names in the module after split to old target
names in the original module.
Returns:
GraphModule: the module after split.
Example:
This is a sample setup:
import torch
from torch.fx.symbolic_trace import symbolic_trace
from torch.fx.graph_module import GraphModule
from torch.fx.node import Node
from torch.fx.passes.split_module import split_module
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x, y):
z = self.linear(x + self.param).clamp(min=0.0, max=1.0)
w = self.linear(y).clamp(min=0.0, max=1.0)
return z + w
# symbolically trace model
my_module = MyModule()
my_module_traced = symbolic_trace(my_module)
# random mod partitioning
partition_counter = 0
NPARTITIONS = 3
def mod_partition(node: Node):
global partition_counter
partition = partition_counter % NPARTITIONS
partition_counter = (partition_counter + 1) % NPARTITIONS
return partition
# split module in module with submodules
module_with_submodules = split_module(
my_module_traced, my_module, mod_partition
)
Output looks like this. Original graph is broken into partitions
> print(module_with_submodules)
GraphModule(
(submod_0): GraphModule(
(linear): Linear(in_features=4, out_features=5, bias=True)
)
(submod_1): GraphModule(
(linear): Linear(in_features=4, out_features=5, bias=True)
)
(submod_2): GraphModule()
)
def forward(self, x, y):
param = self.param
submod_0 = self.submod_0(x, param, y); x = param = y = None
getitem = submod_0[0]
getitem_1 = submod_0[1]; submod_0 = None
submod_1 = self.submod_1(getitem, getitem_1); getitem = getitem_1 = None
getitem_2 = submod_1[0]
getitem_3 = submod_1[1]; submod_1 = None
submod_2 = self.submod_2(getitem_2, getitem_3); getitem_2 = getitem_3 = None
return submod_2
Output of split module is the same as output of input traced module.
This is an example within a test setting:
> orig_out = my_module_traced(x, y)
> submodules_out = module_with_submodules(x, y)
> self.assertEqual(orig_out, submodules_out)
True
"""
partitions: Dict[str, Partition] = {}
orig_nodes: Dict[str, torch.fx.node.Node] = {}
def record_cross_partition_use(def_node : torch.fx.node.Node, use_node : Optional[torch.fx.node.Node]): # noqa: B950
def_partition_name = getattr(def_node, '_fx_partition', None)
use_partition_name = getattr(use_node, '_fx_partition', None)
if def_partition_name != use_partition_name:
if def_partition_name is not None:
def_partition = partitions[def_partition_name]
def_partition.outputs.setdefault(def_node.name)
if use_partition_name is not None:
def_partition.partition_dependents.setdefault(use_partition_name)
if use_partition_name is not None:
use_partition = partitions[use_partition_name]
use_partition.inputs.setdefault(def_node.name)
if def_partition_name is not None:
use_partition.partitions_dependent_on.setdefault(def_partition_name)
# split nodes into parititons
for node in m.graph.nodes:
orig_nodes[node.name] = node
# TODO currently placeholders/parameters aren't put into random partitions,
# rather they're added to the graphs where they are used down below
if node.op in ["placeholder", "get_attr"]:
continue
if node.op == 'output':
torch.fx.graph.map_arg(node.args[0], lambda n: record_cross_partition_use(n, None))
continue
partition_name = str(split_callback(node))
# add node to partitions
partition = partitions.get(partition_name)
if partition is None:
partitions[partition_name] = partition = Partition(partition_name)
partition.node_names.append(node.name)
node._fx_partition = partition_name
torch.fx.graph.map_arg(node.args, lambda def_node: record_cross_partition_use(def_node, node))
torch.fx.graph.map_arg(node.kwargs, lambda def_node: record_cross_partition_use(def_node, node)) # noqa: B950
# find partitions with no dependencies
root_partitions : List[str] = []
for partition_name, partition in partitions.items():
if not len(partition.partitions_dependent_on):
root_partitions.append(partition_name)
# check partitions for circular dependencies and create topological partition ordering
sorted_partitions : List[str] = []
while root_partitions:
root_partition = root_partitions.pop()
sorted_partitions.append(root_partition)
for dependent in partitions[root_partition].partition_dependents:
partitions[dependent].partitions_dependent_on.pop(root_partition)
if not partitions[dependent].partitions_dependent_on:
root_partitions.append(dependent)
if len(sorted_partitions) != len(partitions):
raise RuntimeError("cycle exists between partitions!")
# add placeholders to parititons
for partition_name in sorted_partitions:
partition = partitions[partition_name]
for input in partition.inputs:
placeholder = partition.graph.placeholder(input)
placeholder.meta = orig_nodes[input].meta.copy()
partition.environment[orig_nodes[input]] = placeholder
# Transform nodes and collect targets for partition's submodule
for node in m.graph.nodes:
if hasattr(node, '_fx_partition'):
partition = partitions[node._fx_partition]
# swap out old graph nodes in kw/args with references to new nodes in this submodule
environment = partition.environment
gathered_args = torch.fx.graph.map_arg(node.args, lambda n : environment[n])
gathered_kwargs = torch.fx.graph.map_arg(node.kwargs, lambda n : environment[n])
if node.op not in ['call_module', 'get_attr']:
target = node.target
else:
target_atoms = node.target.split('.')
target_attr = m
for atom in target_atoms:
if not hasattr(target_attr, atom):
raise RuntimeError(f'Operator target {node.target} not found!')
target_attr = getattr(target_attr, atom)
# target = target_atoms[-1]
target = '_'.join(target_atoms)
partition.targets[target] = target_attr
# Fill in the passed-in mapping from new qualname to old qualname
if qualname_map is not None:
# When creating the split module later, the submodules will have
# path prefix matching the corresponding partition's submod_name
qualname = f'{partition.submod_name}.{target}'
qualname_map[qualname] = node.target
assert isinstance(gathered_args, tuple)
assert isinstance(gathered_kwargs, dict)
new_node = partition.graph.create_node(op=node.op, target=target, args=gathered_args,
kwargs=gathered_kwargs)
new_node.meta = node.meta.copy()
partition.environment[node] = new_node
# Set up values to construct base module
base_mod_env : Dict[str, torch.fx.node.Node] = {}
base_mod_graph : torch.fx.graph.Graph = torch.fx.graph.Graph()
base_mod_attrs : Dict[str, torch.fx.graph_module.GraphModule] = {}
for node in m.graph.nodes:
if node.op == 'placeholder':
default_value = node.args[0] if len(node.args) > 0 else inspect.Signature.empty
base_mod_env[node.name] = base_mod_graph.placeholder(
node.target, type_expr=node.type, default_value=default_value)
base_mod_env[node.name].meta = node.meta.copy()
elif node.op == 'get_attr':
base_mod_env[node.name] = base_mod_graph.get_attr(node.target)
base_mod_env[node.name].meta = node.meta.copy()
attr_val = m
for atom in node.target.split('.'):
if not hasattr(attr_val, atom):
raise RuntimeError(f'Node target {node.target} not found!')
attr_val = getattr(attr_val, atom)
base_mod_attrs[node.target] = attr_val
# Do some things iterating over the partitions in topological order again:
# 1) Finish off submodule Graphs by setting corresponding outputs
# 2) Construct GraphModules for each submodule
# 3) Construct the base graph by emitting calls to those submodules in
# topological order
for partition_name in sorted_partitions:
partition = partitions[partition_name]
# Set correct output values
output_vals = tuple(partition.environment[orig_nodes[name]] for name in partition.outputs)
output_vals = output_vals[0] if len(output_vals) == 1 else output_vals # type: ignore[assignment]
partition.graph.output(output_vals)
# Construct GraphModule for this partition
base_mod_attrs[partition.submod_name] = torch.fx.graph_module.GraphModule(partition.targets, partition.graph) # noqa: B950
# Emit call in base graph to this submodule
output_val = base_mod_graph.call_module(partition.submod_name, tuple(base_mod_env[name] for name in partition.inputs))
if len(partition.outputs) > 1:
# Unpack multiple return values from submodule
output_val_proxy = torch.fx.proxy.Proxy(output_val)
for i, output_name in enumerate(partition.outputs):
base_mod_env[output_name] = output_val_proxy[i].node # type: ignore[index]
else:
base_mod_env[list(partition.outputs)[0]] = output_val
for node in m.graph.nodes:
if node.op == 'output':
base_mod_graph.output(torch.fx.graph.map_arg(node.args[0], lambda n : base_mod_env[n.name])) # noqa: B950
return torch.fx.graph_module.GraphModule(base_mod_attrs, base_mod_graph)
| pytorch-master | torch/fx/passes/split_module.py |
import torch
from torch.fx import Node
from torch.fx._compatibility import compatibility
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.utils._pytree import tree_map
from torch.multiprocessing.reductions import StorageWeakRef
import _operator
from enum import Enum
import itertools
from typing import Set, Dict
from collections import defaultdict
__all__ = ['reinplace']
class _ViewType(Enum):
NonView = 0
SingleOutputView = 1
MultiOutputView = 2
def _is_view_op(tgt):
if tgt is not None and isinstance(tgt, torch._ops.OpOverload):
schema = tgt._schema
if len(schema.arguments) > 0:
first_arg = schema.arguments[0]
# check if op is a view
return first_arg.alias_info is not None and not first_arg.alias_info.is_write
def _get_view_type(tgt) -> _ViewType:
if tgt is not None and isinstance(tgt, torch._ops.OpOverload):
schema = tgt._schema
if len(schema.arguments) > 0:
first_arg = schema.arguments[0]
# check if op is a view
if first_arg.alias_info is not None and not first_arg.alias_info.is_write:
# check if op is a multi-output view
if '*' in first_arg.alias_info.after_set:
return _ViewType.MultiOutputView
else:
return _ViewType.SingleOutputView
return _ViewType.NonView
# Stores a bunch of metadata related to functionalization each node.
# Relevant metadata:
# n.meta['fake_result']: FakeTensor (same type as the output of the node, but with FakeTenors instead of Tensors)
# The fake tensor output from running the current node
# n.meta['view_of']: Node
# If the current node n is a view of some base tensor, the 'view_of' field tells us which
# view node was used to generate the current node (a view tensor).
# This information actually makes `fake_result` redundant, but we can use `fake_result`
# to sanity check that our aliasing information is correct.
@compatibility(is_backward_compatible=False)
class _FunctionalizationMetadataProp(torch.fx.Interpreter):
def run_node(self, node: Node):
self.node_counter += 1
result = super().run_node(node)
node.meta['fake_result'] = result
node.meta['node_idx'] = self.node_counter
# (1) Update metadata with the list of nodes that are used by this node
# copy_() doesn't read from its first argument; it writes to it, overwriting previous data.
# We don't want to treat it as "being used as an input".
node_args = node.args
if node.target is torch.ops.aten.copy_.default:
node_args = node_args[1:]
# (2) Update metadata to track aliasing information about view tensor nodes.
if node.op == 'call_function':
view_type = _get_view_type(node.target)
if view_type == _ViewType.SingleOutputView:
assert isinstance(node.args[0], Node)
node.meta['view_of'] = node.args[0]
elif view_type == _ViewType.MultiOutputView:
self.multi_output_view_nodes[node] = node.args[0]
# Check if we returned a multi-output view,
# and we're now grabbing the individual views from the output.
#
# For multi-output views, we want to map each output view to the base,
# but this mapping involves two separate nodes in FX IR.
# e.g. "a, b = x_1.split(...)" becomes:
# %split_tensor : [#users=2] = call_function[target=torch.ops.aten.split.Tensor](args = (%x_1, 2), kwargs = {})
# %getitem : [#users=1] = call_function[target=operator.getitem](args = (%split_tensor, 0), kwargs = {})
# %getitem_1 : [#users=1] = call_function[target=operator.getitem](args = (%split_tensor, 1), kwargs = {})
# And we'd like to set:
# getitem1.meta['view_of'] = x_1
elif node.target is _operator.getitem:
list_arg = node.args[0]
maybe_base_of_view = self.multi_output_view_nodes.get(list_arg, None)
if maybe_base_of_view is not None:
# Note: we could also track indexing info here for multi-output views.
# I don't think this metadata is strictly needed for de-functionalization.
assert isinstance(maybe_base_of_view, Node)
node.meta['view_of'] = maybe_base_of_view
if 'view_of' in node.meta:
# We're linking the current node with its first argument as views.
# Assert here that this is actually the case, and their storages are the same.
assert isinstance(node.meta['fake_result'], FakeTensor)
assert isinstance(node.meta['view_of'].meta['fake_result'], FakeTensor)
view_storage = StorageWeakRef(node.meta['fake_result'].storage())
base_storage = StorageWeakRef(node.meta['view_of'].meta['fake_result'].storage())
assert view_storage == base_storage
return result
def propagate(self, *args):
self.multi_output_view_nodes = {}
self.node_counter = -1
with FakeTensorMode.push() as mode:
fake_args = [mode.from_tensor(a) for a in args]
return super().run(*fake_args)
def _schemas_match(functional_schema, inplace_schema):
names_match = inplace_schema.name.endswith("_") and inplace_schema.name[:-1] == functional_schema.name
arg_types_match = len(functional_schema.arguments) == len(inplace_schema.arguments) and all(
a1.type == a2.type for a1, a2 in zip(functional_schema.arguments, inplace_schema.arguments))
# for the inplace op, its first argument should be mutable
assert inplace_schema.arguments[0].alias_info is not None and inplace_schema.arguments[0].alias_info.is_write
# and its remaining arguments shouldn't be.
assert all(a.alias_info is None for a in inplace_schema.arguments[1:])
return names_match and arg_types_match
# TODO: this should be beefed up to be able to properly re-inplace with:
# - mutating ops (e.g. _fused_moving_avg_obs_fq_helper)
# - out= ops (e.g. angle -> angle.out)
# TODO: we should also figure this info out using torchgen.
def _maybe_get_inplace_op(op):
# __module__ seems broken; it returns torch._ops.aten which doesn't exist
if not isinstance(op, torch._ops.OpOverload):
return None
# Some view ops have inplace variants (as_strided_, etc),
# but we do NOT want the reinplacing pass to directly add these into the program.
# (they'll require extra special handling, aren't aren't really useful for perf anyway)
if _is_view_op(op):
return None
op_namespace = op.__module__.split(".")[-1]
op_base_name = op.overloadpacket.__name__
maybe_namespace_module = getattr(torch.ops, op_namespace)
maybe_inplace_op = None if maybe_namespace_module is None else getattr(maybe_namespace_module, f'{op_base_name}_', None)
if maybe_inplace_op is None:
return None
inplace_overloads = [
getattr(maybe_inplace_op, overload_name) for overload_name in maybe_inplace_op.overloads()
]
inplace_overloads_with_matching_schemas = [
f
for f in inplace_overloads
if _schemas_match(op._schema, f._schema)
]
# This is for sanity: if foo() and foo_() are both operators,
# we expect them to have compatible schemas.
# (This is asserted by codegen for ATen, but might not be true
# for other arbitrary operators).
assert len(inplace_overloads_with_matching_schemas) == 1
inplace_op = inplace_overloads_with_matching_schemas[0]
return inplace_op
_VIEW_INVERSE_MAP = {
torch.ops.aten.diagonal_scatter.default: torch.ops.aten.diagonal.default,
torch.ops.aten.select_scatter.default: torch.ops.aten.select.int,
torch.ops.aten.slice_scatter.default: torch.ops.aten.slice.Tensor,
torch.ops.aten.as_strided_scatter.default: torch.ops.aten.as_strided.default,
}
# This function, given a set of set of (aliased) tensor nodes,
# Returns any nodes in the graph that *use* any of the aliases, that occur *after* op_index
# in the node ordering.
def _get_all_later_node_usages(tensor_aliases: Set[Node], op_index: int):
def _add_if_tensor(x, set_):
if isinstance(x, FakeTensor):
set_.add(StorageWeakRef(x.storage()))
nodes_used_after = set()
for t in tensor_aliases:
# get all nodes that use the current alias
usage_nodes = t.users
for n in usage_nodes:
# We only care about usages after the current node
if n.meta['node_idx'] <= op_index:
continue
# We also don't care about intermediate view ops.
# They only matter if their output is then used elsewhere
# (either in an out-of-place op, or as an output to the function).
if n in tensor_aliases:
if isinstance(n.target, torch._ops.OpOverload) or n.target == _operator.getitem:
continue
nodes_used_after.add(n)
return nodes_used_after
# Given an op that we're trying to re-inplace, "b = foo(a)",
# And given a {view}_scatter op that shows up later in the graph, "y = {view}_scatter(base, x, args...)"
# Then re-inplacing `foo()` would allow us to remove the `{view}_scatter` op entirely, IF:
# If there are any aliases in the alias_set(a) that satisfy:
# (1) The base of "alias", "alias_base", has the same size/stride/offset metadata as "base"
# (2) The output of running {view}(alias, args...) gives you the same size/stride/offset metadata
# as "alias"
def _get_view_inverse_node_usages(later_node_usages: Set[Node], self_aliases: Set[Node]) -> Set[Node]:
def matching_view_metadata(a, b):
return a.size() == b.size() and \
a.stride() == b.stride() and \
a.storage_offset() == b.storage_offset()
view_inverse_nodes = set()
# Go through them in node order, so we can see chains of view_scatter ops.
for n in sorted(later_node_usages, key=lambda x: x.meta['node_idx']):
if n.target not in _VIEW_INVERSE_MAP:
continue
base = n.args[0]
mutated_view = n.args[1]
assert isinstance(base, Node)
assert isinstance(base.meta['fake_result'], FakeTensor)
assert isinstance(mutated_view, Node)
assert isinstance(mutated_view.meta['fake_result'], FakeTensor)
# Check that this view_inverse op actually corresponds to taking doing the inverse
# of one of our existing self_alias nodes.
original_view = _VIEW_INVERSE_MAP[n.target]
for self_alias in self_aliases:
# We're looking for some alias of the self arg, "alias",
# that was created from some op `alias = foo(base, args...)`
# such that the current _scatter op "inverts" that foo call.
# We can check that by running the original op again, and checking that the strides match.
if 'view_of' not in self_alias.meta:
continue
self_alias_base = self_alias.meta['view_of']
try:
# The we're trying to re-use the args from the view_scatter call inside of the corresponding
# view op, which might throw. This just indicates that view_scatter op isn't a valid inverse
# of the current alias we're looking at.
view_replay_metadata = original_view(self_alias_base.meta['fake_result'], *n.args[2:], **n.kwargs)
expected_metadata = self_alias.meta['fake_result']
# If the alias and its base both have matching metadata, then this view_scatter op is valid to re-inplace.
if matching_view_metadata(self_alias_base.meta['fake_result'], base.meta['fake_result']) and \
matching_view_metadata(view_replay_metadata, expected_metadata):
view_inverse_nodes.add(n)
except Exception:
continue
return view_inverse_nodes
@compatibility(is_backward_compatible=True)
def reinplace(gm, *sample_args):
"""
Given an fx.GraphModule, modifies it to perform "reinplacing",
mutating the nodes of the graph.
We look for out-of-place op call sites like `b = a.add(...)`,
and convert them to be inplace (`b = a.add_(...)`),
as long as the input to the current operator ("a") isn't re-used
anywhere later in the graph.
This pass currently expects to operate on a **functional, ATen** graph.
This can be obtained by running `make_fx(functionalize(f))`.
Sample inputs are needed to determine aliasing relationships of the inputs.
In general, we can't reinplace node `b = a.add(...)` if "a" aliases any of the
inputs to the program.
Given a node "b = foo(a, ...)", the algorithm for re-inplacing is as follows:
(1) Check if foo has a mutating variant. If not, move to the next node.
Note that we ignore view ops (we don't bother to turn `as_strided()`
into `as_strided_()`), as it complicates the algorithm and doesn't
provide meaningful speedups.
Currently, we also only check for an inplace op, `foo_`.
Later, we should beef this up to check for out= or mutable ops.
(2) Check if "a" is an alias of any of the program inputs.
If it is, skip and move to the next node.
Inplace'ing an op that would cause it to mutate a program is not sound,
because that would be a side effect visible to the user.
NOTE: there's a future optimization that we should make:
if "a" is a (alias of a) program input, but later in the program
there is a node that looks like "a.copy_(...)",
Then re-inplacing is ok to do - we are temporarily re-using a's buffer,
which will later be overwritten by the copy_() call.
This will be an important optimization to have for programs that mutate
their inputs. It currently isn't implemented though.
(3) Check that "a" and all of its outstanding aliases are not used anywhere
later in the graph. If this is the case, then it's safe to re-inplace
to "b = foo_(a)".
There are a few caveats to this, explained in more detail below:
(a) If "a" is used later as an argument to a view op, that is okay.
It's only a problem if "a" (or that view) is later passed
into a normal operator, or if it is returned as the program output.
(b) If "a" is a repeat argument in `foo()`, then don't reinplace.
Most ATen kernels don't make any guarantees that this is sound,
e.g. if you do aten.mul_(a, a).
So we'll just ban re-inplacing in this case.
It's only a problem if "a" (or that view) is later passed
(c) If "a" is used as an input into a view "inverse" / "scatter"
operator, it is potentially fine to re-inplace
(and remove that scatter operator from the graph).
See below for a more detailed example.
NOTE: there is an optimization in this step that is crucial
to fully recovering performance from functionalization.
Given this program:
def f(x):
a = torch.ops.aten.add(x, x)
b = torch.ops.aten.diagonal(a)
torch.ops.aten.fill_(b, 0)
return d
Functionalization will emit the following:
def f(x):
a = torch.ops.aten.add(x, x)
b = torch.ops.aten.diagonal(a, 0, 1)
b_updated = torch.ops.aten.fill(b, 0)
a_updated = torch.ops.aten.diagonal_scatter(a, b_updated, 0, 1)
return a_updated
Ordinarily, we would not be able to reinplace the fill,
because "b" aliases with "a" which is used by the diagonal_scatter call.
"re-inplacing" is on the hook for figuring out that it is ok to
completely, the expensive diagonal_scatter call, if we re-inplace the add().
So, for every `alias in alias_set(a)`, instead of checking
that "alias" is not used anywhere later in the graph,
we check that
EITHER:
(a) alias is not used anywhere later in the graph
OR:
(b) alias is used exactly once later on in the graph,
in the following op:
out = foo_scatter(alias, x, args...)
where the following must hold:
(i) "foo_scatter" is the "inverse" operator for foo.
This only applies to "foo" ops that are view operators,
which view into a subset of the original tensor's memory.
In practice, there are ~4 operators where this applies:
diagonal -> diagonal_scatter
slice -> slice_scatter
select -> select_scatter
as_strided -> as_strided_scatter
(ii) "args..." are the same between the foo() and foo_scatter() calls.
(4) Finally, after converting "b = foo(a)" into "foo_(a)",
we need to find all later nodes that use "b" as an argument
and update them to take in "a" instead.
Note that for the majority of inplace ops, this isn't actually necessary
(because most inplace ops return "self" as their output).
This isn't generally true for all mutable ops though, which is why
we need to actually replace all of the arguments.
We also need to update our metadata of Dict[StorageWeakRef, Set[Node]],
That maps a given tensor storage to the set of all nodes that take in that storage
as an input.
Specifically, re-inplacing `b = foo(a)` causes "a" and "b"'s sets to get fused
together.
(5) Any "view_inverse/scatter" nodes that were identified as "it's ok to ignore them"
during step (3) get manually deleted from the graph.
Their outputs are no longer used, so technically standard DCE would be able
to do this, but we can no longer run FX's DCE pass now that we have mutable
ops in the graph.
"""
_FunctionalizationMetadataProp(gm).propagate(*sample_args)
# Useful debug printing
# def _print(x):
# if isinstance(x, FakeTensor):
# print(f'fake_result: {StorageWeakRef(x.storage()).cdata}')
# for n in gm.graph.nodes:
# print(n.format_node())
# if hasattr(n, 'meta'):
# print(f'node_idx: {n.meta["node_idx"]}')
# if 'fake_result' in n.meta:
# tree_map(_print, n.meta['fake_result'])
# if 'view_of' in n.meta:
# print(f'view_of: {str(n.meta["view_of"])}')
# print()
# We need to know which nodes correspond to inputs (or their aliases)
# so we know not to re-inplace them.
# NOTE: later, we'll need to add an optimization for fully recovering performance
# on programs that mutate inputs.
input_storages = set(StorageWeakRef(node.meta['fake_result'].storage()) for node in gm.graph.nodes if node.op == 'placeholder')
# We also need to know for a given node, what are all of its aliasing nodes.
storage_to_nodes: Dict[StorageWeakRef, Set[Node]] = defaultdict(set)
for n in gm.graph.nodes:
if 'fake_result' in n.meta:
# Tree-mapping because some ops can return lists of tensors.
def _add_to_map(x):
if isinstance(x, FakeTensor):
storage_to_nodes[StorageWeakRef(x.storage())].add(n)
tree_map(_add_to_map, n.meta['fake_result'])
# inplace-ify functional ops, subject to the constraints written below.
all_later_view_inverse_node_usages = set()
for idx, node in enumerate(gm.graph.nodes):
if node.op == 'call_function':
# Step 1: Check to see if this operator has an inplace variant.
maybe_inplace_op = _maybe_get_inplace_op(node.target)
if maybe_inplace_op is None:
continue
# This is a proxy check for ensuring that the first argument is "tensor-like"
# (This should be the case for all ops with inplace variants in ATen,
# although we technically don't have guarantees for custom ops).
assert len(node.target._schema.arguments) > 0
assert 'Tensor' in str(node.target._schema.arguments[0].type)
# Step 2: ensure that the op we're trying to re-inplace isn't a program input.
self_arg = node.args[0]
self_arg_name = self_arg.name
self_arg_storage = StorageWeakRef(self_arg.meta['fake_result'].storage())
if self_arg_storage in input_storages:
# TODO: later, add the optimization for handling `copy_()` calls in the graph.
continue
if len([x for x in node.args if x is self_arg]) > 1:
# Step (3b) in the original description.
# Calling stuff like aten.mul_(a, a) isn't guaranteed to be sound,
# so we prevent re-inplacing in this case.
continue
self_arg_storage = StorageWeakRef(self_arg.meta['fake_result'].storage())
curr_node_storage = StorageWeakRef(node.meta['fake_result'].storage())
self_aliases = storage_to_nodes[self_arg_storage]
# First, we find all later usages of any of the aliases of self_arg.
later_node_usages = _get_all_later_node_usages(self_aliases, node.meta['node_idx'])
# Then, we check if any of those later usages are actually view_scatter ops
# that are safe to fully remove.
later_view_inverse_node_usages = _get_view_inverse_node_usages(later_node_usages, self_aliases)
# Step 3: Check to see if the input to the op is re-used later in the graph.
# If not (same goes for its aliases), then this op is safe to re-in place.
# This is a slightly roundabout way to check that there are no later usages of the current self argument.
# (later_view_inverse_node_usages corresponds to "view_scatter" nodes that we are allowed to delete)
can_reinplace = len(later_node_usages - later_view_inverse_node_usages) == 0
if not can_reinplace:
continue
# Step 4: replace the current out-of-place op with its inplace variant.
node.target = maybe_inplace_op
# At this point, 'storage_to_nodes' will be stale.
# Now that we're inplacing `b = foo(a)`, we need to effectively
# union together the dict values for b and a's storage.
# Hmm... morally I think we also want to keep the `fake_result` metadata
# up to date here, but I'm not sure how easy it is to do.
# Maybe it's fine to wait until the end of the pass to update it.
storage_to_nodes[self_arg_storage].update(storage_to_nodes[curr_node_storage])
storage_to_nodes[curr_node_storage].update(storage_to_nodes[self_arg_storage])
# Need to remember the view_scatter view nodes we found so we can remove them alter.
all_later_view_inverse_node_usages.update(later_view_inverse_node_usages)
# Now that we've replaced b = a.foo() with a.foo_(),
# We need to replace any later usages of "b" with "a"
for old in itertools.chain([node], later_view_inverse_node_usages):
new = old.args[0]
nodes_to_update = [n for n in old.users if n.meta['node_idx'] > node.meta['node_idx']]
for node_to_update in nodes_to_update:
new_args = []
for arg_idx, a in enumerate(node_to_update.args):
if a == old:
new_args.append(new)
else:
new_args.append(a)
new_kwargs = {}
for kwarg_idx, (k, v) in enumerate(node_to_update.kwargs.items()):
if isinstance(v, Node) and v.name == old.name:
new_kwargs[k] = new
else:
new_kwargs[k] = v
node_to_update.args = tuple(new_args)
node_to_update.kwargs = new_kwargs
old_ref = StorageWeakRef(old.meta['fake_result'].storage())
node_ref = StorageWeakRef(node_to_update.meta['fake_result'].storage())
if old_ref == node_ref:
# This will happen if we're updating a view op, e.g.
# e.g. replacing
# x = view(old)
# x = view(new)
# When that happens, we need to make sure to keep our
# storage mapping up to date.
new_ref = StorageWeakRef(new.meta['fake_result'].storage())
# Technically, "old_ref" and all its aliases will remain
# in our mapping.
# That should be fine though, since we deleted "old"
# from the graph at this point.
storage_to_nodes[node_ref].update(storage_to_nodes[new_ref])
storage_to_nodes[new_ref].update(storage_to_nodes[node_ref])
# Step 5: delete any _scatter nodes that we de-functionalized
# Need to take care not to delete any of these nodes until after *all* modifications
# to the graph are finished.
for to_delete in all_later_view_inverse_node_usages:
gm.graph.erase_node(to_delete)
gm.recompile()
return gm
| pytorch-master | torch/fx/passes/reinplace.py |
from . import graph_drawer
from . import graph_manipulation
from . import net_min_base
from . import operator_support
from . import param_fetch
from . import reinplace
from . import shape_prop
from . import split_module
from . import split_utils
from . import splitter_base
from . import tools_common
| pytorch-master | torch/fx/passes/__init__.py |
from functools import wraps
from inspect import unwrap
from typing import Callable, List
# for callables which modify object inplace and return something other than
# the object on which they act
def inplace_wrapper(fn: Callable) -> Callable:
"""
Convenience wrapper for passes which modify an object inplace. This
wrapper makes them return the modified object instead.
Args:
fn (Callable[Object, Any])
Returns:
wrapped_fn (Callable[Object, Object])
"""
@wraps(fn)
def wrapped_fn(gm):
fn(gm)
return gm
return wrapped_fn
def loop_pass(base_pass: Callable, n_iter: int = None, predicate: Callable = None):
"""
Convenience wrapper for passes which need to be applied multiple times.
Exactly one of `n_iter`or `predicate` must be specified.
Args:
base_pass (Callable[Object, Object]): pass to be applied in loop
n_iter (int, optional): number of times to loop pass
predicate (Callable[Object, bool], optional):
"""
assert (n_iter is not None) ^ (
predicate is not None
), "Exactly one of `n_iter`or `predicate` must be specified."
@wraps(base_pass)
def new_pass(source):
output = source
if n_iter is not None and n_iter > 0:
for _ in range(n_iter):
output = base_pass(output)
elif predicate is not None:
while predicate(output):
output = base_pass(output)
else:
raise RuntimeError(
f"loop_pass must be given positive int n_iter (given "
f"{n_iter}) xor predicate (given {predicate})"
)
return output
return new_pass
# Pass Schedule Constraints:
#
# Implemented as 'depends on' operators. A constraint is satisfied iff a list
# has a valid partial ordering according to this comparison operator.
def _validate_pass_schedule_constraint(
constraint: Callable[[Callable, Callable], bool], passes: List[Callable]
):
for i, a in enumerate(passes):
for j, b in enumerate(passes[i + 1 :]):
if constraint(a, b):
continue
raise RuntimeError(
f"pass schedule constraint violated. Expected {a} before {b}"
f" but found {a} at index {i} and {b} at index{j} in pass"
f" list."
)
def this_before_that_pass_constraint(this: Callable, that: Callable):
"""
Defines a partial order ('depends on' function) where `this` must occur
before `that`.
"""
def depends_on(a: Callable, b: Callable):
if a == that and b == this:
return False
return True
return depends_on
def these_before_those_pass_constraint(these: Callable, those: Callable):
"""
Defines a partial order ('depends on' function) where `these` must occur
before `those`. Where the inputs are 'unwrapped' before comparison.
For example, the following pass list and constraint list would be invalid.
```
passes = [
loop_pass(pass_b, 3),
loop_pass(pass_a, 5),
]
constraints = [
these_before_those_pass_constraint(pass_a, pass_b)
]
```
Args:
these (Callable): pass which should occur first
those (Callable): pass which should occur later
Returns:
depends_on (Callable[[Object, Object], bool]
"""
def depends_on(a: Callable, b: Callable):
if unwrap(a) == those and unwrap(b) == these:
return False
return True
return depends_on
class PassManager:
"""
Construct a PassManager.
Collects passes and constraints. This defines the pass schedule, manages
pass constraints and pass execution.
Args:
passes (Optional[List[Callable]]): list of passes. A pass is a
callable which modifies an object and returns modified object
constraint (Optional[List[Callable]]): list of constraints. A
constraint is a callable which takes two passes (A, B) and returns
True if A depends on B and False otherwise. See implementation of
`this_before_that_pass_constraint` for example.
"""
passes: List[Callable] = []
constraints: List[Callable] = []
_validated: bool = False
def __init__(
self,
passes=None,
constraints=None,
):
if passes:
self.passes = passes
if constraints:
self.constraints = constraints
@classmethod
def build_from_passlist(cls, passes):
pm = PassManager(passes)
# TODO(alexbeloi): add constraint management/validation
return pm
def add_pass(self, _pass: Callable):
self.passes.append(_pass)
self._validated = False
def add_constraint(self, constraint):
self.constraints.append(constraint)
self._validated = False
def validate(self):
"""
Validates that current pass schedule defined by `self.passes` is valid
according to all constraints in `self.constraints`
"""
if self._validated:
return
for constraint in self.constraints:
_validate_pass_schedule_constraint(constraint, self.passes)
self._validated = True
def __call__(self, source):
self.validate()
out = source
for _pass in self.passes:
out = _pass(out)
return out
| pytorch-master | torch/fx/passes/pass_manager.py |
import torch.fx
from torch.fx import Node
from torch.fx._compatibility import compatibility
from torch._subclasses.fake_tensor import FakeTensorMode
__all__ = ['FakeTensorProp']
@compatibility(is_backward_compatible=False)
class FakeTensorProp(torch.fx.Interpreter):
"""
Execute an FX graph Node-by-Node and record a fake tensor representing
the metadata for the node. Unlike ShapeProp, (1) this propagation
is cheap--it does the propagation with meta tensors which do not actually
store data, and (2) the fake tensors have much more fine grained information,
e.g., they have accurate alias information that can be consulted by looking
at the storages.
Args:
module (GraphModule): The module to be executed
"""
def run_node(self, n: Node):
result = super().run_node(n)
n.meta['fake_result'] = result
return result
def propagate(self, *args):
with FakeTensorMode.push() as mode:
fake_args = [mode.from_tensor(a) for a in args]
return super().run(*fake_args)
| pytorch-master | torch/fx/passes/fake_tensor_prop.py |
import abc
import typing as t
import torch
import torch.fx
from torch.fx._compatibility import compatibility
from .shape_prop import TensorMetadata
from .tools_common import get_node_target, CALLABLE_NODE_OPS
__all__ = ['OperatorSupportBase', 'OperatorSupport', 'create_op_support', 'chain', 'OpSupports']
# fx.Node.target typename, as returned by `get_node_target()`
TargetTypeName = str
# Arguments' dtypes for a given node, see `OperatorSupport`
SupportedArgumentDTypes = t.Optional[
t.Tuple[
t.Sequence[t.Sequence[torch.dtype]],
t.Dict[str, t.Sequence[torch.dtype]],
]
]
SupportDict = t.Mapping[TargetTypeName, SupportedArgumentDTypes]
@compatibility(is_backward_compatible=False)
class OperatorSupportBase(abc.ABC):
"""Interface for determining if a fx.Node is supported by a backend"""
@abc.abstractmethod
def is_node_supported(
self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node
) -> bool:
raise NotImplementedError()
@compatibility(is_backward_compatible=False)
class OperatorSupport(OperatorSupportBase):
"""
`_support_dict` maps node.target typename to supported inputs dtypes.
node.target typename is retrieved using helper function `get_node_target()`
If supported inputs dtypes is None, it means any dtype is supported, else
we should see a tuple like (([dtypes], ...), {"name":[dtypes], ...}).
The first tuple ([dtypes], ...) indicates what dtypes are supported for
inputs in node.args and the second dict {"name": [dtypes], ...} indicates
what dtypes are supported for inputs in node.kwargs.
For inputs in args, if we don't want to check it, we can put None there,
e.g. (None, [torch.float]) indicates that we don't care about the type of
the first input in args. And for inputs in kwargs, if not listed, will not
be checked.
"""
_support_dict: SupportDict
def __init__(
self,
support_dict: t.Optional[SupportDict] = None
):
self._support_dict = support_dict or {}
def is_node_supported(
self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node
) -> bool:
"""
Args:
`sumodules`: mapping from module name to the module. This can be
retrieved by calling model.named_modules().
`node`: a Fx node that we want to determine whether it's supported.
Returns:
`is_supported`: whether the arg `node` is supported.
"""
if node.op not in CALLABLE_NODE_OPS:
return True
target = get_node_target(submodules, node)
# Target not found in _support_dict meaning that we don't support this op at all
if target not in self._support_dict:
return False
# The rule for target is None meaning that we accept any dtype
if self._support_dict[target] is None:
return True
args_dtypes, kwargs_dtypes = self._support_dict[target] # type: ignore[misc]
# Check args dtypes
for i, dtypes in enumerate(args_dtypes):
if len(node.args) <= i:
break
# None indicates we don't care about the dtype of args[i]
if dtypes is None:
continue
# If arg is not a node then we don't check it
if not isinstance(node.args[i], torch.fx.Node):
continue
arg_dtype = _get_arg_dtype(node.args[i]) # type: ignore[arg-type]
if arg_dtype not in dtypes:
return False
# Check kwargs dtypes
for k, dtypes in kwargs_dtypes.items():
if k not in node.kwargs:
continue
# If arg is not a node then we don't check it
if not isinstance(node.kwargs[k], torch.fx.Node):
continue
kwarg_dtype = _get_arg_dtype(node.kwargs[k]) # type: ignore[arg-type]
if kwarg_dtype not in dtypes:
return False
return True
# ======================================================================
# Functional interfaces and utils for defining basic operator support logic
# and composing them into more complex ones
# ======================================================================
IsNodeSupported = t.Callable[[t.Mapping[str, torch.nn.Module], torch.fx.Node], bool]
@compatibility(is_backward_compatible=False)
def create_op_support(is_node_supported: IsNodeSupported) -> OperatorSupportBase:
"""Wraps a `IsNodeSupported` function into an `OperatorSupportBase` instance
`IsNodeSupported` has the same call signature as
`OperatorSupportBase.is_node_supported`
"""
class FunctionalOperatorSupport(OperatorSupportBase):
def is_node_supported(
self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node
) -> bool:
return is_node_supported(submodules, node)
return FunctionalOperatorSupport()
@compatibility(is_backward_compatible=False)
def chain(*op_support: OperatorSupportBase) -> OperatorSupportBase:
"""Combines a sequence of `OperatorSupportBase` instances to form a single `OperatorSupportBase`
instance by evaluating each input `OperatorSupportBase` instance, and returns False if
any of it reports False.
"""
def _chain(submods, node) -> bool:
return all(
x.is_node_supported(submods, node)
for x in op_support
)
return create_op_support(_chain)
@compatibility(is_backward_compatible=False)
class OpSupports:
"""A set of atomic `OperatorSupportBase` instances that can be combined together
to form more complex operator support logic.
"""
@classmethod
def decline_if_input_dtype(cls, dtype: torch.dtype) -> OperatorSupportBase:
"""Report a node as non-supported, if any of its arguments is of dtype"""
def _decline_if_input_dtype(
submodules: t.Mapping[str, torch.nn.Module],
node: torch.fx.Node,
) -> bool:
for arg in node.all_input_nodes:
# escape dtype check for get_attr node
if arg.op == "get_attr":
continue
arg_dtype = _get_arg_dtype(arg)
if arg_dtype == dtype:
return False
return True
return create_op_support(_decline_if_input_dtype)
@classmethod
def decline_if_node_in_names(cls, disallow_set: t.Set[str]) -> OperatorSupportBase:
"""
If a node has a name that is in the disallow set, reported it as non-supported.
"""
def _decline_if_node_in_names(
submodules: t.Mapping[str, torch.nn.Module],
node: torch.fx.Node,
) -> bool:
if node.name in disallow_set:
return False
else:
return True
return create_op_support(_decline_if_node_in_names)
def _get_arg_dtype(arg: torch.fx.Node) -> t.Any:
assert isinstance(arg, torch.fx.Node)
tensor_meta = arg.meta.get("tensor_meta") # type: ignore[union-attr]
dtype = tensor_meta.dtype if isinstance(tensor_meta, TensorMetadata) else arg.meta["type"]
return dtype
| pytorch-master | torch/fx/passes/operator_support.py |
from typing import Any, Dict, List, NamedTuple, Optional
import torch
from torch.fx._compatibility import compatibility
from torch.fx.graph import Graph
from torch.fx.graph_module import GraphModule
from torch.fx.node import (
map_arg,
Node,
Target,
)
from torch.fx.passes.shape_prop import ShapeProp
__all__ = ['replace_target_nodes_with', 'size_bytes', 'get_size_of_all_nodes', 'get_tensor_meta',
'get_size_of_node']
@compatibility(is_backward_compatible=False)
def replace_target_nodes_with(
fx_module: GraphModule,
old_op: str,
old_target: Target,
new_op: str,
new_target: Target,
):
"""Modifies all nodes in fx_module.graph.nodes which match the specified op code and target,
and updates them to match the new op code and target"""
new_graph = Graph()
val_map: Dict[Node, Node] = {}
for node in fx_module.graph.nodes:
if node.op == old_op and node.target == old_target:
args = map_arg(node.args, lambda n: val_map[n])
kwargs = map_arg(node.kwargs, lambda n: val_map[n])
assert isinstance(args, tuple)
assert isinstance(kwargs, dict)
val_map[node] = new_graph.create_node(
new_op, new_target, args, kwargs, node.name
)
else:
val_map[node] = new_graph.node_copy(node, lambda n: val_map[n])
fx_module.graph = new_graph
@compatibility(is_backward_compatible=False)
class size_bytes(NamedTuple):
output_size: int
total_size: int
@compatibility(is_backward_compatible=False)
def get_size_of_all_nodes(
fx_module: GraphModule, args: Optional[List[torch.Tensor]] = None
) -> None:
"""Given a fx graph module, update each node with its total size (weights + bias + output)
and its output_size(output). For a non-module node, the total size is the output size.
return total size"""
if args is not None:
# Mark shape and dtype for each node (node.shape and node.dtype)
ShapeProp(fx_module).propagate(*args)
# Calculate the total size of the whole fx graph
total_size_of_graph = 0.0
for node in fx_module.graph.nodes:
if node.op == "output":
break
node.size_bytes = get_size_of_node(fx_module, node)
return
@compatibility(is_backward_compatible=False)
def get_tensor_meta(node: Node) -> Any:
tensor_meta = node.meta.get("tensor_meta")
if not tensor_meta:
raise RuntimeError(
f"Node {node} has no tensor metadata associated with it! "
f"Check that shape propagation has run."
)
return tensor_meta
@compatibility(is_backward_compatible=False)
def get_size_of_node(fx_module: GraphModule, node: Node) -> size_bytes:
"""Given a node with node.dtype and node.shape, return its total size and its output size.
total_size = weights + bias + output_size
"""
# Total num of elements
total_num_of_elems = 0
# For a module, conside all parameters
if node.op == "call_module":
submodule_dict = dict(fx_module.named_modules())
submodule = submodule_dict[node.target]
parameters = submodule.named_parameters()
# Parameters are named tuples
for name, p in parameters:
total_num_of_elems += p.numel()
# Don't forget the output size
# node.shape is the shape of this node's output
tensor_meta = get_tensor_meta(node)
output_elem = tensor_meta.shape.numel()
total_num_of_elems += output_elem
# Assume for now if it's quantized then it's qint8 or quint8
if tensor_meta.is_quantized:
size_per_elem_bytes = torch._empty_affine_quantized(
[], dtype=tensor_meta.dtype
).element_size()
else:
size_per_elem_bytes = torch.tensor([], dtype=tensor_meta.dtype).element_size()
total_size = size_per_elem_bytes * total_num_of_elems
output_size = size_per_elem_bytes * output_elem
return size_bytes(output_size, total_size)
| pytorch-master | torch/fx/passes/graph_manipulation.py |
from __future__ import absolute_import, division, print_function, unicode_literals
import hashlib
import torch
import torch.fx
from typing import Dict, Any, TYPE_CHECKING
from torch.fx.node import _get_qualified_name, _format_arg
from torch.fx.passes.shape_prop import TensorMetadata
from torch.fx._compatibility import compatibility
from itertools import chain
__all__ = ['FxGraphDrawer']
try:
import pydot
HAS_PYDOT = True
except ImportError:
HAS_PYDOT = False
_COLOR_MAP = {
"placeholder": '"AliceBlue"',
"call_module": "LemonChiffon1",
"get_param": "Yellow2",
"get_attr": "LightGrey",
"output": "PowderBlue",
}
_HASH_COLOR_MAP = [
"CadetBlue1",
"Coral",
"DarkOliveGreen1",
"DarkSeaGreen1",
"GhostWhite",
"Khaki1",
"LavenderBlush1",
"LightSkyBlue",
"MistyRose1",
"MistyRose2",
"PaleTurquoise2",
"PeachPuff1",
"Salmon",
"Thistle1",
"Thistle3",
"Wheat1",
]
_WEIGHT_TEMPLATE = {
"shape": "record",
"fillcolor": "Salmon",
"style": '"filled,rounded"',
"fontcolor": "#000000",
}
if HAS_PYDOT:
@compatibility(is_backward_compatible=False)
class FxGraphDrawer:
"""
Visualize a torch.fx.Graph with graphviz
Basic usage:
g = FxGraphDrawer(symbolic_traced, "resnet18")
with open("a.svg", "w") as f:
f.write(g.get_dot_graph().create_svg())
"""
def __init__(
self,
graph_module: torch.fx.GraphModule,
name: str,
ignore_getattr: bool = False,
ignore_parameters_and_buffers: bool = False,
skip_node_names_in_args: bool = True,
):
self._name = name
self._dot_graphs = {
name: self._to_dot(
graph_module, name, ignore_getattr, ignore_parameters_and_buffers, skip_node_names_in_args
)
}
for node in graph_module.graph.nodes:
if node.op != "call_module":
continue
leaf_node = self._get_leaf_node(graph_module, node)
if not isinstance(leaf_node, torch.fx.GraphModule):
continue
self._dot_graphs[f"{name}_{node.target}"] = self._to_dot(
leaf_node,
f"{name}_{node.target}",
ignore_getattr,
ignore_parameters_and_buffers,
skip_node_names_in_args,
)
def get_dot_graph(self, submod_name=None) -> pydot.Dot:
if submod_name is None:
return self.get_main_dot_graph()
else:
return self.get_submod_dot_graph(submod_name)
def get_main_dot_graph(self) -> pydot.Dot:
return self._dot_graphs[self._name]
def get_submod_dot_graph(self, submod_name) -> pydot.Dot:
return self._dot_graphs[f"{self._name}_{submod_name}"]
def get_all_dot_graphs(self) -> Dict[str, pydot.Dot]:
return self._dot_graphs
def _get_node_style(self, node: torch.fx.Node) -> Dict[str, str]:
template = {
"shape": "record",
"fillcolor": "#CAFFE3",
"style": '"filled,rounded"',
"fontcolor": "#000000",
}
if node.op in _COLOR_MAP:
template["fillcolor"] = _COLOR_MAP[node.op]
else:
# Use a random color for each node; based on its name so it's stable.
target_name = node._pretty_print_target(node.target)
target_hash = int(hashlib.md5(target_name.encode()).hexdigest()[:8], 16)
template["fillcolor"] = _HASH_COLOR_MAP[target_hash % len(_HASH_COLOR_MAP)]
return template
def _get_leaf_node(
self, module: torch.nn.Module, node: torch.fx.Node
) -> torch.nn.Module:
py_obj = module
assert isinstance(node.target, str)
atoms = node.target.split(".")
for atom in atoms:
if not hasattr(py_obj, atom):
raise RuntimeError(
str(py_obj) + " does not have attribute " + atom + "!"
)
py_obj = getattr(py_obj, atom)
return py_obj
def _typename(self, target: Any) -> str:
if isinstance(target, torch.nn.Module):
return torch.typename(target)
if isinstance(target, str):
return target
return _get_qualified_name(target)
def _get_node_label(
self,
module: torch.fx.GraphModule,
node: torch.fx.Node,
skip_node_names_in_args: bool,
) -> str:
def _get_str_for_args_kwargs(arg):
if isinstance(arg, tuple):
prefix, suffix = r"|args=(\l", r",\n)\l"
arg_strs_list = [_format_arg(a, max_list_len=8) for a in arg]
elif isinstance(arg, dict):
prefix, suffix = r"|kwargs={\l", r",\n}\l"
arg_strs_list = [
f"{k}: {_format_arg(v, max_list_len=8)}"
for k, v in arg.items()
]
else: # Fall back to nothing in unexpected case.
return ""
# Strip out node names if requested.
if skip_node_names_in_args:
arg_strs_list = [a for a in arg_strs_list if "%" not in a]
if len(arg_strs_list) == 0:
return ""
arg_strs = prefix + r",\n".join(arg_strs_list) + suffix
return arg_strs.replace("{", r"\{").replace("}", r"\}")
label = "{" + f"name=%{node.name}|op_code={node.op}\n"
if node.op == "call_module":
leaf_module = self._get_leaf_node(module, node)
label += r"\n" + self._typename(leaf_module) + r"\n|"
extra = ""
if hasattr(leaf_module, "__constants__"):
extra = r"\n".join(
[f"{c}: {getattr(leaf_module, c)}" for c in leaf_module.__constants__] # type: ignore[union-attr]
)
label += extra + r"\n"
else:
label += f"|target={self._typename(node.target)}" + r"\n"
if len(node.args) > 0:
label += _get_str_for_args_kwargs(node.args)
if len(node.kwargs) > 0:
label += _get_str_for_args_kwargs(node.kwargs)
label += f"|num_users={len(node.users)}" + r"\n"
tensor_meta = node.meta.get('tensor_meta')
label += self._tensor_meta_to_label(tensor_meta)
return label + "}"
def _tensor_meta_to_label(self, tm) -> str:
if tm is None:
return ""
elif isinstance(tm, TensorMetadata):
return self._stringify_tensor_meta(tm)
elif isinstance(tm, list):
result = ""
for item in tm:
result += self._tensor_meta_to_label(item)
return result
elif isinstance(tm, dict):
result = ""
for k, v in tm.items():
result += self._tensor_meta_to_label(v)
return result
elif isinstance(tm, tuple):
result = ""
for item in tm:
result += self._tensor_meta_to_label(item)
return result
else:
raise RuntimeError(f"Unsupported tensor meta type {type(tm)}")
def _stringify_tensor_meta(self, tm: TensorMetadata) -> str:
result = ""
if not hasattr(tm, "dtype"):
print("tm", tm)
result += "|" + "dtype" + "=" + str(tm.dtype) + r"\n"
result += "|" + "shape" + "=" + str(tuple(tm.shape)) + r"\n"
result += "|" + "requires_grad" + "=" + str(tm.requires_grad) + r"\n"
result += "|" + "stride" + "=" + str(tm.stride) + r"\n"
if tm.is_quantized:
assert tm.qparams is not None
assert "qscheme" in tm.qparams
qscheme = tm.qparams["qscheme"]
if qscheme in {
torch.per_tensor_affine,
torch.per_tensor_symmetric,
}:
result += "|" + "q_scale" + "=" + str(tm.qparams["scale"]) + r"\n"
result += "|" + "q_zero_point" + "=" + str(tm.qparams["zero_point"]) + r"\n"
elif qscheme in {
torch.per_channel_affine,
torch.per_channel_symmetric,
torch.per_channel_affine_float_qparams,
}:
result += "|" + "q_per_channel_scale" + "=" + str(tm.qparams["scale"]) + r"\n"
result += "|" + "q_per_channel_zero_point" + "=" + str(tm.qparams["zero_point"]) + r"\n"
result += "|" + "q_per_channel_axis" + "=" + str(tm.qparams["axis"]) + r"\n"
else:
raise RuntimeError(f"Unsupported qscheme: {qscheme}")
result += "|" + "qscheme" + "=" + str(tm.qparams["qscheme"]) + r"\n"
return result
def _get_tensor_label(self, t: torch.Tensor) -> str:
return str(t.dtype) + str(list(t.shape)) + r"\n"
def _to_dot(
self,
graph_module: torch.fx.GraphModule,
name: str,
ignore_getattr: bool,
ignore_parameters_and_buffers: bool,
skip_node_names_in_args: bool,
) -> pydot.Dot:
"""
Actual interface to visualize a fx.Graph. Note that it takes in the GraphModule instead of the Graph.
If ignore_parameters_and_buffers is True, the parameters and buffers
created with the module will not be added as nodes and edges.
"""
dot_graph = pydot.Dot(name, rankdir="TB")
for node in graph_module.graph.nodes:
if ignore_getattr and node.op == "get_attr":
continue
style = self._get_node_style(node)
dot_node = pydot.Node(
node.name, label=self._get_node_label(graph_module, node, skip_node_names_in_args), **style
)
dot_graph.add_node(dot_node)
def get_module_params_or_buffers():
for pname, ptensor in chain(
leaf_module.named_parameters(), leaf_module.named_buffers()
):
pname1 = node.name + "." + pname
label1 = (
pname1 + "|op_code=get_" + "parameter"
if isinstance(ptensor, torch.nn.Parameter)
else "buffer" + r"\l"
)
dot_w_node = pydot.Node(
pname1,
label="{" + label1 + self._get_tensor_label(ptensor) + "}",
**_WEIGHT_TEMPLATE,
)
dot_graph.add_node(dot_w_node)
dot_graph.add_edge(pydot.Edge(pname1, node.name))
if node.op == "call_module":
leaf_module = self._get_leaf_node(graph_module, node)
if not ignore_parameters_and_buffers and not isinstance(leaf_module, torch.fx.GraphModule):
get_module_params_or_buffers()
for node in graph_module.graph.nodes:
if ignore_getattr and node.op == "get_attr":
continue
for user in node.users:
dot_graph.add_edge(pydot.Edge(node.name, user.name))
return dot_graph
else:
if not TYPE_CHECKING:
@compatibility(is_backward_compatible=False)
class FxGraphDrawer:
def __init__(self, graph_module: torch.fx.GraphModule, name: str, ignore_getattr: bool = False):
raise RuntimeError('FXGraphDrawer requires the pydot package to be installed. Please install '
'pydot through your favorite Python package manager.')
| pytorch-master | torch/fx/passes/graph_drawer.py |
import argparse
from collections import defaultdict
from dataclasses import dataclass
from typing import NamedTuple, Sequence, Iterable, Any, List, Dict, Optional, Tuple
import logging
import torch
from torch.fx.passes.graph_manipulation import get_size_of_node
from torch.fx.node import map_arg
from torch.fx._compatibility import compatibility
from .operator_support import (
get_node_target,
OperatorSupportBase,
)
from .graph_drawer import FxGraphDrawer
from .shape_prop import ShapeProp
from .split_utils import split_by_tags
from .tools_common import (
FxNetAccFusionsFinder,
CALLABLE_NODE_OPS,
Tensors,
NodeList,
NodeSet,
is_node_output_tensor,
)
import warnings
__all__ = ['FxNetAccNodesFinder', 'FxNetSplitterInternalError', 'Subgraph', 'SplitResult', 'generate_inputs_for_submodules']
_LOGGER = logging.getLogger(__name__)
class _SplitterSettingBase:
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument(
"--min_acc_module_size",
default=1,
type=int,
help="Minimum size limit of an accelerator subgraph.",
)
parser.add_argument(
"--skip_fusion",
default=False,
action="store_true",
help="If true then no fusion groups. Fusion group is used to "
"enforce no non-tensor data flow between submodules. If we don't "
"have this constrain, setting this to false is recommended as it "
"can reduce overhead.",
)
parser.add_argument(
"--allow_non_tensor",
default=False,
action="store_true",
help="For some backends non-tensor data flow between cpu and them "
"are not allowed. Therefore, if a node supported by accelerator but "
"it has non-tensor inputs or outputs to a cpu node we would want to "
"consider it as a cpu node during splitting. However, for some backends "
"we might not care about non-tensor data flow and we can set this option "
"to true to disable the functionality that prevent non-tensor data flow.",
)
args, unknown = parser.parse_known_args()
self.min_acc_module_size: int = args.min_acc_module_size
self.skip_fusion: bool = args.skip_fusion
self.allow_non_tensor: bool = args.allow_non_tensor
@compatibility(is_backward_compatible=False)
class FxNetAccNodesFinder:
"""
Finds a set of nodes that can be supported on ACC, excluding nodes that have non-tensor
input/output to cpu nodes to prevent non-tensor data flow between backends and cpu.
I.e. if we have a chain:
ACC_NODE_1 -> ACC_NODE_2 -> ACC_NODE_3 -> CPU_NODE_1
where every ACC node produces non-tensor output, then they all should be treated as CPU nodes.
This behavior can be turned off by passing allow_non_tensor=True.
"""
def __init__(
self,
module: torch.fx.GraphModule,
operator_support: OperatorSupportBase,
allow_non_tensor: bool,
):
self.module = module
self.operator_support = operator_support
self.allow_non_tensor = allow_non_tensor
def reduce_acc_nodes_non_tensor_input_helper(
self, cpu_worklist: NodeList
):
"""
Transitively excludes nodes from ACC supported set.
For every node in the worklist:
- removes its downstream ACC nodes from ACC supported set,
- if any downstream ACC node produces non-tensor output,
then it gets added into the worklist.
"""
while cpu_worklist:
node = cpu_worklist.pop(0)
for user in node.users:
if user in self.acc_nodes:
self.acc_nodes.remove(user)
if not is_node_output_tensor(user):
cpu_worklist.append(user)
def reduce_acc_nodes_non_tensor_input(self):
"""
Excludes nodes from ACC supported set that have direct
upstream CPU nodes that produce non-tensor outputs.
"""
non_tensor_cpu_nodes: NodeList = []
for node in self.module.graph.nodes:
if node.op not in CALLABLE_NODE_OPS:
continue
if node in self.acc_nodes:
continue
if is_node_output_tensor(node):
continue
non_tensor_cpu_nodes.append(node)
self.reduce_acc_nodes_non_tensor_input_helper(non_tensor_cpu_nodes)
def reduce_acc_nodes_non_tensor_output(self):
"""
Excludes nodes from ACC supported set that produce non-tensor
outputs and have downstream CPU nodes.
"""
while True:
new_cpu_nodes: NodeList = []
for acc_node in self.acc_nodes:
if is_node_output_tensor(acc_node):
continue
for user in acc_node.users:
if user not in self.acc_nodes:
new_cpu_nodes.append(acc_node)
break
if not new_cpu_nodes:
break
for new_cpu_node in new_cpu_nodes:
self.acc_nodes.remove(new_cpu_node)
self.reduce_acc_nodes_non_tensor_input_helper(new_cpu_nodes)
def __call__(self) -> NodeSet:
submodules = dict(self.module.named_modules())
self.acc_nodes = {
n
for n in self.module.graph.nodes
if n.op in CALLABLE_NODE_OPS
and self.operator_support.is_node_supported(submodules, n)
}
if not self.allow_non_tensor:
self.reduce_acc_nodes_non_tensor_input()
self.reduce_acc_nodes_non_tensor_output()
return self.acc_nodes
@compatibility(is_backward_compatible=False)
class FxNetSplitterInternalError(Exception):
pass
@compatibility(is_backward_compatible=False)
@dataclass
class Subgraph:
is_acc: bool
nodes: NodeList
@compatibility(is_backward_compatible=False)
class SplitResult(NamedTuple):
"""
Stores the results of the splitter.
Attributes:
split_module: root module after splitting.
submodule_inputs: a dict that maps submodule name to its inputs.
non_acc_submodule_prefix: the prefix for non acc submodules. For
acc submodule the prefix is alwasy "_run_on_acc_".
"""
split_module: torch.fx.GraphModule
submodule_inputs: Dict[str, Any]
non_acc_submodule_prefix: str
@compatibility(is_backward_compatible=False)
def generate_inputs_for_submodules(
model: torch.nn.Module,
inputs: Sequence[Any],
target_submodules: Iterable[str]
) -> Dict[str, Any]:
"""
Generate inputs for targeting submdoules in the given model. Note that if two submodules refer to the same obj, this
function doesn't work.
Args:
model: root model.
inputs: inputs to the root model.
target_submodules: submodules that we want to generate inputs for.
Returns:
A dict that maps from submodule name to its inputs.
"""
handles = []
results = {}
submodule_to_names = dict((mod, name) for name, mod in model.named_modules())
def pre_forward(module, module_inputs):
results[submodule_to_names[module]] = module_inputs
try:
for name, mod in model.named_modules():
if name in target_submodules:
handles.append(mod.register_forward_pre_hook(pre_forward))
model(*inputs)
except Exception as e:
warnings.warn(f"Failed to generate submodule inputs because of the following error:\n{e}")
finally:
for h in handles:
h.remove()
return results
class _SplitterBase:
"""
Splits a GraphModule into sub-GraphModules for execution on CPU or the accelerator.
Output is a GraphModule with supported and unsupported operators grouped into as few sub-GraphModules as possible.
Assumes that only "call_module", "call_function" and "call_method" from FX IR can potentially be executed on the accelerator.
Given the following graph:
==> b ==>
// \\
a d
\\ //
==> c ==>
class SimpleModule(torch.nn.Module):
def forward(self, a):
b = torch.sin(a)
c = torch.cos(a)
d = b + c
return d
and providing "operator_support" that indicates that 'b' and 'c' can be executed on the accelerator,
we will get the following split result:
main:
def forward(self, a):
run_on_acc_0_0 = self._run_on_acc_0_0(a)
getitem = run_on_acc_0_0[0]
getitem_1 = run_on_acc_0_0[1]
run_on_cpu_1_1 = self._run_on_cpu_1_1(getitem, getitem_1)
return run_on_cpu_1_1
_run_on_acc_0_0:
def forward(self, a):
sin_1 = torch.sin(a)
cos_1 = torch.cos(a)
return (sin_1, cos_1)
_run_on_cpu_1_1:
def forward(self, sin_1, cos_1):
add_1 = sin_1 + cos_1
return add_1
"""
# PCIe bandwidth for the backend, default to 100 GB/s
PCIe_BW = 100 * 2 ** 30
def __init__(
self,
module: torch.fx.GraphModule,
sample_input: Sequence[Any],
operator_support: OperatorSupportBase,
settings: _SplitterSettingBase,
non_acc_submodule_name: str = "_run_on_cpu_",
):
"""
Preprocesses graph before splitting:
- finds nodes supported by ACC,
- finds fusion groups for ACC nodes having non-tensor IO,
- builds a graph of direct dependencies,
- builds a map of fused nodes to their fusions.
As a result we get self.acc_nodes, self.deps and self.fusions.
"""
assert isinstance(module, torch.fx.GraphModule)
self.module = module
ShapeProp(self.module).propagate(*sample_input)
self.settings = settings
self.operator_support = operator_support
self.sample_input = sample_input
self.acc_nodes = FxNetAccNodesFinder(self.module, self.operator_support, self.settings.allow_non_tensor)()
if self.settings.skip_fusion:
self.fusions = {}
else:
self.fusions = FxNetAccFusionsFinder(module, self.acc_nodes)()
# Modify deps to add more deps for fused nodes
self.deps = self.find_deps()
self.update_deps_for_fusions()
self.non_acc_submodule_name = non_acc_submodule_name
# ===============================================================
# Helpers for ctor and initial state
# ===============================================================
def find_deps(self) -> Dict[torch.fx.Node, NodeSet]:
"""
Builds a graph of node dependencies. Leaf nodes don't have any
dependencies and the "output" node doesn't have nodes depending on it.
Resulting graph has only direct dependencies, i.e. there are no
transitive dependencies.
"""
deps: Dict[torch.fx.Node, NodeSet] = defaultdict(set)
for node in self.module.graph.nodes:
if node.op not in CALLABLE_NODE_OPS:
continue
for user in node.users:
if user.op != "output":
deps[user].add(node)
return deps
def update_deps_for_fusions(self):
"""
Updates graph of dependencies so that:
- nodes from the same fusion depend on the same set of outer nodes,
- outer nodes depending on a fusion depend on all nodes in that fusion.
"""
for node in self.fusions:
fusion = self.fusions[node]
for fused_neighbor in fusion:
self.deps[node].update(self.deps[fused_neighbor] - fusion)
for user in fused_neighbor.users:
if user not in fusion:
self.deps[user].add(node)
# ===============================================================
# Helpers for preview
# ===============================================================
def _lower_model_to_backend(
self, mod: torch.fx.GraphModule, inputs: Tensors
) -> torch.nn.Module:
"""
Lower the model to a backend.
"""
return mod
def _find_culprit(
self, mod: torch.fx.GraphModule, inputs: Tensors
) -> str:
"""
When an error occurs during lowering or running the lowered mod, we use this
function to find culprits in the `mod` that causes the error.
"""
return "Unable to find a culprit because _find_culprit() function is not implemented."
def _draw_graph_based_on_node_support(
self, mod: torch.fx.GraphModule, supported_nodes: NodeList
):
color_map = {
"default": "AliceBlue",
"supported": "chartreuse1",
"unsupported": "crimson",
}
class CustomDrawer(FxGraphDrawer):
def _get_node_style(self, node):
template = super()._get_node_style(node)
if node in supported_nodes:
template["fillcolor"] = color_map["supported"]
elif node.op in CALLABLE_NODE_OPS:
template["fillcolor"] = color_map["unsupported"]
else:
template["fillcolor"] = color_map["default"]
return template
drawer = CustomDrawer(mod, "node_support", ignore_getattr=True)
dot_graph = drawer.get_main_dot_graph()
dot_graph.write_raw("node_support.dot")
def node_support_preview(self, dump_graph: bool = False):
submodules = dict(self.module.named_modules())
supported_nodes: NodeList = []
supported_node_types = defaultdict(set)
unsupported_node_types = defaultdict(set)
def get_dtype(arg):
tensor_meta = arg.meta.get("tensor_meta")
return getattr(tensor_meta, "dtype", None)
for node in self.module.graph.nodes:
if node.op not in CALLABLE_NODE_OPS:
continue
target = get_node_target(submodules, node)
# Store dtype of arg in node.args. If arg doesn't have dtype, i.e. not a tensor, we'll store None.
arg_dtypes = [
get_dtype(arg) if isinstance(arg, torch.fx.Node) else None
for arg in node.args
]
# Find last non-None element. If all elements are None, return max_len.
last_index = len(arg_dtypes) - next(
(
i
for i, dtype in enumerate(reversed(arg_dtypes))
if dtype is not None
),
len(arg_dtypes),
)
# Strip None elements at the end.
arg_dtypes_tuple = tuple(arg_dtypes[:last_index])
kwarg_dtypes_tuple = tuple(
(k, get_dtype(arg))
for k, arg in node.kwargs.items()
if isinstance(arg, torch.fx.Node)
)
if self.operator_support.is_node_supported(submodules, node):
supported_nodes.append(node)
supported_node_types[target].add((arg_dtypes_tuple, kwarg_dtypes_tuple))
else:
unsupported_node_types[target].add((arg_dtypes_tuple, kwarg_dtypes_tuple))
if dump_graph:
self._draw_graph_based_on_node_support(self.module, supported_nodes)
reports = "\nSupported node types in the model:\n"
for t, dtypes in supported_node_types.items():
for arg_dtypes_tuple, kwarg_dtypes_tuple in dtypes:
reports += f"{t}: ({arg_dtypes_tuple}, {dict(kwarg_dtypes_tuple)})\n"
reports += "\nUnsupported node types in the model:\n"
for t, dtypes in unsupported_node_types.items():
for arg_dtypes_tuple, kwarg_dtypes_tuple in dtypes:
reports += f"{t}: ({arg_dtypes_tuple}, {dict(kwarg_dtypes_tuple)})\n"
print(reports)
# Return reports for testing purpose
return reports
def split_preview(self, dump_graph: bool = False):
reports = ""
subgraphs = self.put_nodes_into_subgraphs()
acc_subgraphs_num = len([g for g in subgraphs if g.is_acc])
cpu_subgraphs_num = len(subgraphs) - acc_subgraphs_num
reports += f"Before removing small acc subgraphs, total {len(subgraphs)} subgraphs are created:"
reports += f" {acc_subgraphs_num} acc subgraphs and {cpu_subgraphs_num} cpu subgraphs.\n"
subgraphs = self.remove_small_acc_subgraphs(subgraphs)
acc_subgraphs_num = len([g for g in subgraphs if g.is_acc])
cpu_subgraphs_num = len(subgraphs) - acc_subgraphs_num
reports += f"After removing small acc subgraphs, total {len(subgraphs)} subgraphs are created:"
reports += f" {acc_subgraphs_num} acc subgraphs and {cpu_subgraphs_num} cpu subgraphs.\n"
for i, subgraph in enumerate(subgraphs):
reports += f"_run_on_acc_{i}: " if subgraph.is_acc else f"{self.non_acc_submodule_name}{i}: "
reports += f"{len(subgraph.nodes)} node(s)\n"
self.tag(subgraphs)
split_mod = self.split(remove_tag=True)
split_mod.eval()
if dump_graph:
drawer = FxGraphDrawer(
split_mod, "preview", ignore_getattr=True
)
dot_graphs = drawer.get_all_dot_graphs()
for name, dot_graph in dot_graphs.items():
dot_graph.write_raw(f"{name}.dot")
max_qps: float = self.PCIe_BW
bottleneck_module = ""
for node in split_mod.graph.nodes:
if node.op == "call_module" and "acc" in node.target:
reports += f"\nProcessing acc submodule {node.target}\n"
submod = getattr(split_mod, node.target)
def get_submod_inputs(main_mod, submod, example_inputs):
sub_inputs = None
def get_inputs(self, inputs):
nonlocal sub_inputs
sub_inputs = inputs
handle = submod.register_forward_pre_hook(get_inputs)
main_mod(*example_inputs)
handle.remove()
return sub_inputs
submod_inputs = get_submod_inputs(
split_mod, submod, self.sample_input
)
ShapeProp(submod).propagate(*submod_inputs)
total_input_bytes = 0
total_output_bytes = 0
reports += "Checking inputs...\n"
for n in submod.graph.nodes:
if n.op == "placeholder":
if not is_node_output_tensor(n):
reports += f"Input {n.name} is not a tensor, this might cause problems during lowering!\n"
else:
total_input_bytes += get_size_of_node(submod, n)[0]
if n.op == "output":
output_node = n
reports += "Checking outputs...\n"
def get_bytes(node: torch.fx.Node):
nonlocal total_output_bytes
nonlocal reports
if not is_node_output_tensor(node):
reports += f"Output {node.name} is not a tensor, this might cause problems during lowering!\n"
else:
total_output_bytes += get_size_of_node(submod, node)[0]
map_arg(output_node.args, get_bytes)
qps = self.PCIe_BW / max(total_input_bytes, total_output_bytes)
reports += f"Total input size in bytes is {total_input_bytes}, total output size in bytes is {total_output_bytes},"
reports += f" theoretical max qps (bounds by PCIe bandwidth) for this submodule is {qps}.\n"
if qps < max_qps:
max_qps = qps
bottleneck_module = node.target
try:
lowered_submod = self._lower_model_to_backend(submod, submod_inputs)
except RuntimeError:
reports += "Run into an error during lowering!\n"
reports += self._find_culprit(submod, submod_inputs)
continue
try:
lowered_submod(*submod_inputs)
except RuntimeError:
reports += "Run into an error during inference!\n"
reports += self._find_culprit(submod, submod_inputs)
else:
reports += "Lowering and running succeed!\n"
reports += f"\nTheoretical max qps (bounds by PCIe bandwidth) for this model is {max_qps},"
reports += f" bottleneck is submodule {bottleneck_module}."
print(reports)
# return the reports for testing purposes
return reports
# ===============================================================
# Helpers for extend_acc_subgraph() method
# ===============================================================
def find_reverse_deps(
self, tag_id: Optional[int] = None
) -> Dict[torch.fx.Node, NodeSet]:
"""
Builds reversed topological node dependencies, if tag_id is specified,
we ignore nodes that are in later subgraph i.e. nodes have greater tag_id.
"""
result: Dict[torch.fx.Node, NodeSet] = defaultdict(set)
for node in self.module.graph.nodes:
if node.op not in CALLABLE_NODE_OPS:
continue
for user in node.users:
if user.op not in CALLABLE_NODE_OPS:
continue
if tag_id is None or (int(user.tag.split("_")[-1]) < tag_id):
result[node].add(user)
return result
def update_reverse_deps_for_fusions(
self, deps: Dict[torch.fx.Node, NodeSet]
):
processed_node = set()
for node, fusion in self.fusions.items():
if node in processed_node:
continue
new_dep = set()
# Create a new dependency set which include all the
# dependencies of the nodes in the fusion group
for n in fusion:
new_dep.update(deps[n])
# Exclude nodes in the fusion
new_dep.difference_update(fusion)
# Update dependency
for n in fusion:
deps[n] = new_dep
for arg in n.all_input_nodes:
if arg not in fusion:
deps[arg].update(fusion)
processed_node.add(n)
def find_parent_nodes_of_subgraph(self, tag: str) -> NodeSet:
"""
Finds parent nodes of the `tag` subgraph.
Traverse the inputs of nodes in the subgraph, if input doesn't belong to the subgraph
and is not a placeholder, we consider it as the parent node of the subgraph.
"""
parent_nodes = set()
for node in self.module.graph.nodes:
if node.op in CALLABLE_NODE_OPS and node.tag == tag:
for arg in node.all_input_nodes:
if arg.op in CALLABLE_NODE_OPS and arg.tag != tag:
parent_nodes.add(arg)
return parent_nodes
def extend_acc_subgraph(self, tag: str):
"""
Extend the acc subgraph with `tag` going the reversed topological direction.
"""
# Dict that maps node to its users and ignore users that
# are in the subgraph that has greater tag
deps = self.find_reverse_deps(tag_id=int(tag.split("_")[-1]))
self.update_reverse_deps_for_fusions(deps)
# Parent nodes of the subgraph
parent_nodes = self.find_parent_nodes_of_subgraph(tag)
visited_nodes: NodeSet = set()
while parent_nodes:
node = None
# Find a acc node that depends on visited nodes only
for n in parent_nodes:
if deps[n] <= visited_nodes and n in self.acc_nodes:
node = n
break
if node is None:
break
# Put the node into `tag` subgraph
node.tag = tag # type: ignore[attr-defined]
parent_nodes.remove(node)
visited_nodes.add(node)
# If node is in a fusion group, add all fusion buddies to parent nodes
if node in self.fusions:
for fusion_node in self.fusions[node]:
if fusion_node not in visited_nodes:
parent_nodes.add(fusion_node)
# Add inputs of the node to parent nodes
for arg in node.all_input_nodes:
if arg.op in CALLABLE_NODE_OPS and arg not in visited_nodes:
parent_nodes.add(arg)
# ===============================================================
# Helpers for split() method
# ===============================================================
def starter_nodes(self) -> Tuple[NodeSet, NodeSet]:
"""
Finds nodes that consume module inputs or get_attr nodes.
"""
starter_cpu_nodes: NodeSet = set()
starter_acc_nodes: NodeSet = set()
for node in self.module.graph.nodes:
if node.op not in {"placeholder", "get_attr"}:
continue
for user in node.users:
if user in self.acc_nodes:
starter_acc_nodes.add(user)
else:
starter_cpu_nodes.add(user)
return starter_cpu_nodes, starter_acc_nodes
def put_nodes_into_subgraphs(self) -> List[Subgraph]:
# We start graph traversal from leaf nodes
current_cpu_nodes, current_acc_nodes = self.starter_nodes()
visited_nodes: NodeSet = set()
# Determine which subgraph to start from based on node dependency
acc_subgraph: bool = True
for n in current_cpu_nodes:
if self.deps[n] <= visited_nodes:
acc_subgraph = False
break
current_subgraph_nodes: NodeList = []
# Result accumulator
subgraphs: List[Subgraph] = []
while current_cpu_nodes or current_acc_nodes:
# Find the first node that should belong to the current subgraph and has all dependencies resolved
current_nodes = current_acc_nodes if acc_subgraph else current_cpu_nodes
node = next(
(n for n in current_nodes if self.deps[n] <= visited_nodes),
None,
)
# If nothing was found, then it's time to flip the mode and start a new subgraph
if node is None:
if not current_subgraph_nodes:
raise FxNetSplitterInternalError("Subgraph can't be empty")
subgraphs.append(
Subgraph(is_acc=acc_subgraph, nodes=current_subgraph_nodes)
)
acc_subgraph = not acc_subgraph
current_subgraph_nodes = []
continue
current_nodes.remove(node)
visited_nodes.add(node)
current_subgraph_nodes.append(node)
# Add fusion buddies
if node in self.fusions:
if node in self.acc_nodes:
current_acc_nodes.update(self.fusions[node] - visited_nodes)
else:
current_cpu_nodes.update(self.fusions[node] - visited_nodes)
# Put depending nodes into the queue
for user in node.users:
if user.op not in CALLABLE_NODE_OPS:
continue
# Add downstream nodes
if user in self.acc_nodes:
current_acc_nodes.add(user)
else:
current_cpu_nodes.add(user)
# Check if the last subgraph was not created
if current_subgraph_nodes:
subgraphs.append(
Subgraph(is_acc=acc_subgraph, nodes=current_subgraph_nodes)
)
if not subgraphs:
raise FxNetSplitterInternalError("Couldn't create subgraphs")
return subgraphs
def remove_small_acc_subgraphs(self, subgraphs: List[Subgraph]) -> List[Subgraph]:
"""
This pass finds ACC submodules with less than specified size and merges
them with adjacent CPU submodules.
"""
result: List[Subgraph] = []
for subgraph in subgraphs:
if subgraph.is_acc:
if len(subgraph.nodes) >= self.settings.min_acc_module_size:
result.append(subgraph)
else:
print(
"Eliminating acc subgraph because it's smaller than the threshold: "
f"{len(subgraph.nodes)} < {self.settings.min_acc_module_size}"
)
if result:
result[-1].nodes.extend(subgraph.nodes)
else:
subgraph.is_acc = False
result.append(subgraph)
else:
if result and not result[-1].is_acc:
result[-1].nodes.extend(subgraph.nodes)
else:
result.append(subgraph)
return result
def tag(self, subgraphs: List[Subgraph]):
self.tags: List[str] = []
for subgraph in subgraphs:
subgraph_name = self.non_acc_submodule_name
tag = f"_run_on_acc_{len(self.tags)}" if subgraph.is_acc else f"{self.non_acc_submodule_name}{len(self.tags)}"
self.tags.append(tag)
for node in subgraph.nodes:
if hasattr(node, "tag"):
raise FxNetSplitterInternalError(f"Node {node} was already tagged")
node.tag = tag # type: ignore[attr-defined]
def split(self, remove_tag: bool = False) -> torch.fx.GraphModule:
split_module = split_by_tags(self.module, self.tags)
if remove_tag:
for node in self.module.graph.nodes:
if hasattr(node, "tag"):
del node.tag
return split_module
def __call__(self) -> torch.fx.GraphModule:
subgraphs = self.put_nodes_into_subgraphs()
subgraphs = self.remove_small_acc_subgraphs(subgraphs)
acc_subgraphs_count = len([s for s in subgraphs if s.is_acc])
non_acc_subgraphs_count = len(subgraphs) - acc_subgraphs_count
print(f"Got {acc_subgraphs_count} acc subgraphs and {non_acc_subgraphs_count} non-acc subgraphs")
self.tag(subgraphs)
return self.split()
def generate_split_results(self) -> SplitResult:
split_module = self()
submodule_names = []
for name, mod in split_module.named_children():
submodule_names.append(name)
submodule_inputs = generate_inputs_for_submodules(split_module, self.sample_input, submodule_names)
return SplitResult(split_module, submodule_inputs, self.non_acc_submodule_name)
| pytorch-master | torch/fx/passes/splitter_base.py |
from typing import Dict, List, Set, Iterable, Optional
from torch.fx.passes.utils.fuser_utils import fuse_by_partitions
from torch.fx.passes.tools_common import NodeList
from torch.fx.graph_module import GraphModule
from torch.fx.node import Node, _get_qualified_name
from torch.fx.passes.operator_support import OperatorSupportBase
from collections import defaultdict
import logging
import itertools
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
class Partition:
def __init__(self, id: int = None, nodes: Iterable[Node] = None):
self.id = id
self.nodes: Set[Node] = set(nodes) if nodes is not None else set()
def __repr__(self) -> str:
return str(self.nodes)
def add_node(self, node: Node):
self.nodes.add(node)
def remove_node(self, node: Node):
self.nodes.remove(node)
def size(self):
return len(self.nodes)
class CapabilityBasedPartitioner:
def __init__(self,
graph_module: GraphModule,
operator_support: OperatorSupportBase,
allows_single_node_partition: bool = False
) -> None:
self.graph_module = graph_module
self.operator_support = operator_support
self.allows_single_node_partition = allows_single_node_partition
# map of node to it's upstream dependency nodes
# if A is found in dependency_map[B], then B depends on A (or a is an upstream depedency of b)
self.dependency_map = self.__build_dependency_map()
def __build_dependency_map(self) -> Dict[Node, Set[Node]]:
dependency_map = defaultdict(set)
# assumptions: nodes in graph are sorted in topological order
for node in self.graph_module.graph.nodes:
for input_node in node.all_input_nodes:
# add input_node and input_node's upstream dependency
dependency_map[node].add(input_node)
dependency_map[node].update(dependency_map[input_node])
return dependency_map
def __node_depends_on(self, a: Node, b: Node) -> int:
# Returns
# 1 if b depends on a (,or equivalently a is an upstream depedency of b)
# -1 if a depends on b (,or equivalently b is an upstream depedency of a)
# 0 if a and b doesn't have dependency between each other
if a in self.dependency_map[b]:
return 1
elif b in self.dependency_map[a]:
return -1
else:
return 0
def __partition_depends_on(self, partition_a: Partition, partition_b: Partition) -> int:
# Returns
# 1 if b depends on a (,or equivalently a is an upstream depedency of b)
# -1 if a depends on b (,or equivalently b is an upstream depedency of a)
# 0 if a and b doesn't have dependency between each other
# TODO: build a cache here to speedup the query
for node_a in partition_a.nodes:
for node_b in partition_b.nodes:
dependency = self.__node_depends_on(node_a, node_b)
if dependency != 0:
return dependency
return 0
def __get_supported_nodes(self) -> NodeList:
logging.debug("Collecting supported nodes...")
supported_nodes = []
for node in self.graph_module.graph.nodes:
if self.operator_support.is_node_supported(dict(self.graph_module.named_modules()), node):
supported_nodes.append(node)
return supported_nodes
def propose_partitions(self) -> List[Partition]:
candidates: NodeList = self.__get_supported_nodes()
# assumptions: nodes in candidate list is sorted in topological order
assignment: Dict[Node, int] = {} # maping from node to partition_id
partitions_by_id: Dict[int, Partition] = {} # mapping from partition_id to partition
new_partition_id = itertools.count()
def assign(node: Node, id: Optional[int] = None):
# If id is None, remove the node from original assigment
# node has been assigned before, clean up and re-assign
if node in assignment:
original_id = assignment[node]
del assignment[node]
partitions_by_id[original_id].remove_node(node)
if partitions_by_id[original_id].size() == 0:
del partitions_by_id[original_id]
if id is not None:
assignment[node] = id
if id not in partitions_by_id:
partitions_by_id[id] = Partition(id=id, nodes=[node])
else:
partitions_by_id[id].add_node(node)
logging.debug("Proposing partitions...")
# visit candidates in reversed topological order
for node in reversed(candidates):
# use Dict as an ordered set to ensure deterministic partitioning result, don't care value
user_partitions: Dict[Partition, None] = {}
for user_node in node.users:
if user_node in assignment:
id = assignment[user_node]
user_partitions[partitions_by_id[id]] = None
else:
user_partitions[Partition(nodes=[user_node])] = None
# Filter out all the partitions that has dependency on other users
# TODO: find a better way to do this, rather than pair-wise comparision
user_partitions_list = list(user_partitions.keys())
for i in range(len(user_partitions_list)):
for j in range(i + 1, len(user_partitions_list)):
pi = user_partitions_list[i]
pj = user_partitions_list[j]
dependency = self.__partition_depends_on(pi, pj)
if dependency == 1 and pj in user_partitions:
del user_partitions[pj]
elif dependency == -1 and pi in user_partitions:
del user_partitions[pi]
# We use the following rules for partition assignment:
# 1. If none of the candidates has been assigned to a partition, create a new partition
# 2. If there is one partition candidate, assign to the partition
# 3. If there are more than one partition candidates, assign current node to the first partition and
# merge the other partitions with first partition, since user_partitions doesn't have depedency between
# each other.
assigned_candidate_partition_ids = [partition.id for partition in user_partitions if partition.id is not None]
if len(assigned_candidate_partition_ids) == 0:
# create a new partition
assign(node, next(new_partition_id))
elif len(assigned_candidate_partition_ids) == 1:
id = assigned_candidate_partition_ids[0]
assign(node, id)
else:
# users are assigned to more than one partition, since user_partitions doesn't have
# dependency on each other, they can be fused into a single partition
id = assigned_candidate_partition_ids[0]
assign(node, id)
reassignment: Dict[Node, int] = {}
for other_id in assigned_candidate_partition_ids[1:]:
for other_node in partitions_by_id[other_id].nodes:
reassignment[other_node] = id
for other_node in reassignment:
assign(other_node, id)
# post processing to re-assign "getitem" nodes into upstream partition
logger.debug("Reassigning getitem nodes to its producer node's partition...")
nodes_reassignment: Dict[Node, int] = {}
for node in self.graph_module.graph.nodes:
is_tuple_output = True
for user in node.users:
if user.op != "call_function" or \
_get_qualified_name(user.target) != "_operator.getitem": # type: ignore[arg-type]
is_tuple_output = False
break
# node has tuple outputs, re-assign all following getitem node into node's partition
if is_tuple_output:
id = assignment.get(node, None) # type: ignore[arg-type]
for user in node.users:
if assignment.get(user, None) != id: # type: ignore[arg-type]
nodes_reassignment[user] = id
for node, id in nodes_reassignment.items():
assign(node, id)
# filter out single node partitions
if not self.allows_single_node_partition:
logger.debug("Filtering out single node partitions...")
non_compute_ops = {"torch.ops.aten.view", "_operator.getitem"}
partitions_to_remove: List[int] = []
for id, partition in partitions_by_id.items():
compute_node_count = 0
for node in partition.nodes:
if node.op == "call_function" and \
_get_qualified_name(node.target) not in non_compute_ops: # type: ignore[arg-type]
compute_node_count += 1
if compute_node_count <= 1:
partitions_to_remove.append(id)
for id in partitions_to_remove:
del partitions_by_id[id]
logging.debug("Partitions proposed:")
for id, partition in partitions_by_id.items():
logging.debug(f"partition #{id}", [node.name for node in partition.nodes])
return list(partitions_by_id.values())
def fuse_partitions(self, partitions: List[Partition]) -> GraphModule:
logging.debug("Fusing partitions...")
# fuse_by_partitions expects partitions in List[List[Node]]: [ [node0, node1], [node2, node3] ]
return fuse_by_partitions(self.graph_module, [list(partition.nodes) for partition in partitions])
def partition_and_fuse(self) -> GraphModule:
partitions = self.propose_partitions()
fused_gm = self.fuse_partitions(partitions)
return fused_gm
| pytorch-master | torch/fx/passes/infra/partitioner.py |
from . import pass_manager
| pytorch-master | torch/fx/passes/infra/__init__.py |
import abc
from collections import namedtuple
from typing import Optional
from torch.fx.graph_module import GraphModule
from torch.fx._compatibility import compatibility
__all__ = ['PassResult', 'PassBase']
@compatibility(is_backward_compatible=False)
class PassResult(namedtuple("PassResult", ["graph_module", "modified"])):
"""
Result of a pass:
graph_module: The modified graph module
modified: A flag for if the pass has modified the graph module
"""
def __new__(cls, graph_module, modified):
return super().__new__(cls, graph_module, modified)
@compatibility(is_backward_compatible=False)
class PassBase(abc.ABC):
"""
Base interface for implementing passes.
It is required to implement the `call` function so that we can directly
pass instances of the Pass directly to the PassManager and call them as a
function.
We can directly pass an instance of a class implementing this interface into
the PassManager's `passes` attribute.
"""
def __init__(self) -> None:
pass
def __call__(self, graph_module: GraphModule) -> Optional[PassResult]:
"""
Runs the precondition check, the pass itself, and the postcondition check.
"""
self.requires(graph_module)
res = self.call(graph_module)
self.ensures(graph_module)
return res
@abc.abstractmethod
def call(self, graph_module: GraphModule) -> Optional[PassResult]:
"""
The pass that is run through the given graph module. To implement a
pass, it is required to implement this function.
Args:
graph_module: The graph module we will run a pass on
"""
pass
def requires(self, graph_module: GraphModule) -> None:
"""
This function will be called before the pass is run and will check that
the given graph module contains the preconditions needed to run the
pass. It is not required to implement this function.
Args:
graph_module: The graph module we will run checks on
"""
pass
def ensures(self, graph_module: GraphModule) -> None:
"""
This function will be called after the pass is run and will check that
the given graph module contains the postconditions needed to run the
pass. It is not required to implement this function.
Args:
graph_module: The graph module we will run checks on
"""
pass
| pytorch-master | torch/fx/passes/infra/pass_base.py |
import inspect
from queue import Queue
from functools import wraps
from typing import Callable, Dict, List
import torch.nn as nn
from torch.fx.graph_module import GraphModule
from torch.fx._compatibility import compatibility
from torch.fx.passes.infra.pass_base import PassResult
__all__ = ['inplace_wrapper', 'pass_result_wrapper', 'this_before_that_pass_constraint', 'PassManager']
@compatibility(is_backward_compatible=False)
def inplace_wrapper(fn: Callable) -> Callable:
"""
Convenience wrapper for passes which modify an object inplace. This
wrapper makes them return a PassResult containing the modified object and
True for the "modified" flag.
Args:
fn (Callable[Module, Any])
Returns:
wrapped_fn (Callable[Module, PassResult])
"""
if fn is None:
return None
@wraps(fn)
def wrapped_fn(gm):
fn(gm)
return PassResult(gm, True)
return wrapped_fn
@compatibility(is_backward_compatible=False)
def pass_result_wrapper(fn: Callable) -> Callable:
"""
Wrapper for passes which currently do not return a PassResult.
This wrapper makes them return a PassResult containing the modified object
and True for the "modified" flag.
Args:
fn (Callable[Module, Any])
Returns:
wrapped_fn (Callable[Module, PassResult])
"""
if fn is None:
return None
@wraps(fn)
def wrapped_fn(gm):
gm = fn(gm)
return PassResult(gm, True)
return wrapped_fn
def _validate_pass_schedule_constraint(
constraint: Callable[[Callable, Callable], bool], passes: List[Callable]
) -> None:
for i, a in enumerate(passes):
for j, b in enumerate(passes[i + 1 :]):
if constraint(a, b):
continue
raise RuntimeError(
f"pass schedule constraint violated. Expected {a} before {b}"
f" but found {a} at index {i} and {b} at index{j} in pass"
f" list."
)
def _topological_sort_passes(
passes: List[Callable], constraints: List[Callable]
) -> List[Callable]:
"""
Args
passes: Passes that we are ordering
constraints: Constraints applied on these passes
Returns
A sorted list of callables and a boolean of if a circular dependency
existed
"""
if len(constraints) == 0:
return passes
# Contruct a graph mapping nodes to a list of their users
graph: Dict[Callable, List[Callable]] = {p : [] for p in passes}
indegree_map: Dict[Callable, int] = {p : 0 for p in passes}
candidates: Queue = Queue()
for a in passes:
for b in passes:
if a == b:
continue
for constraint in constraints:
if not constraint(a, b):
graph[b].append(a)
indegree_map[a] += 1
if indegree_map[a] == 0:
candidates.put(a)
visited: Dict[Callable, bool] = {p : False for p in passes}
sorted_passes: List[Callable] = []
while not candidates.empty():
p = candidates.get()
sorted_passes.append(p)
visited[p] = True
for n in graph[p]:
if not visited[n]:
indegree_map[n] -= 1
if indegree_map[n] == 0:
candidates.put(n)
# Check if there are unvisited nodes (aka cycles in the graph)
cycle_passes = list(filter(lambda p: indegree_map[p] != 0, indegree_map.keys()))
if len(cycle_passes) != 0:
error = f"Circular dependency detected within the following passes: {cycle_passes}"
raise RuntimeError(error)
return sorted_passes
@compatibility(is_backward_compatible=False)
def this_before_that_pass_constraint(this: Callable, that: Callable) -> Callable:
"""
Defines a partial order ('depends on' function) where `this` must occur
before `that`.
For example, the following pass list and constraint list would be invalid.
```
passes = [pass_b, pass_a]
constraints = [
this_before_that_pass_constraint(pass_a, pass_b)
]
```
Args:
this (Callable): pass which should occur first
that (Callable): pass which should occur later
Returns:
depends_on (Callable[[Object, Object], bool]
"""
def depends_on(a: Callable, b: Callable):
if a == that and b == this:
return False
return True
return depends_on
@compatibility(is_backward_compatible=False)
class PassManager:
"""
Construct a PassManager.
Collects passes and constraints. This defines the pass schedule, manages
pass constraints and pass execution.
Args:
passes (Optional[List[Callable]]): List of passes. A pass is a
callable which modifies an object and returns a PassResult
constraint (Optional[List[Callable]]): List of constraints. A
constraint is a callable which takes two passes (A, B) and returns
True if A depends on B and False otherwise. See implementation of
`this_before_that_pass_constraint` for example.
steps (int): Max number of times we run the passes (default = 1).
run_checks_after_each_pass (bool): Whether to run checks and linting
after each pass
suppress_check_failures (bool): Whether to raise errors when running
checks
"""
passes: List[Callable[[nn.Module], PassResult]] = []
constraints: List[Callable[[Callable, Callable], bool]] = []
_validated: bool = False
steps: int = 1
def __init__(
self,
passes=None,
constraints=None,
steps=None,
run_checks_after_each_pass: bool = False,
suppress_check_failures: bool = False,
):
if passes:
self.passes = passes
if constraints:
self.constraints = constraints
if steps:
self.steps = steps
self.run_checks_after_each_pass = run_checks_after_each_pass
self.suppress_check_failures = suppress_check_failures
def add_pass(self, _pass: Callable):
"""
Adds a pass into the current list of passes.
"""
self.passes.append(_pass)
self._validated = False
def add_constraint(self, constraint: Callable):
"""
Adds a constraint into the current list of constraints.
"""
self.constraints.append(constraint)
self._validated = False
def validate_constraints(self):
"""
Validates that current pass schedule defined by `self.passes` is valid
according to all constraints in `self.constraints`
"""
if self._validated:
return
for constraint in self.constraints:
_validate_pass_schedule_constraint(constraint, self.passes)
self._validated = True
def solve_constraints(self):
"""
Finds a valid traversal order based on the given constraints and orders
the passes based on this order.
If a circular dependency exists between the constraints and steps = 1,
then we will raise an error because if steps != 1 this means that we
will re-run the passes, allowing for circular dependencies.
"""
self.passes = _topological_sort_passes(self.passes, self.constraints)
self._validated = True
def add_checks(self, check: Callable) -> None:
"""
Adds a function which takes runs various checks on a given graph module.
This function is run before and after each pass if the
`run_checks_after_each_pass` flag is enabled.
"""
sig = inspect.signature(check)
if len(list(sig.parameters.values())) != 1:
raise TypeError("PassManager check function should only take in one variable, a module")
setattr(self, "check", check) # noqa: B010
def check(self, module: nn.Module) -> None:
pass
def __call__(self, module: nn.Module) -> PassResult:
"""
Runs a list of passes in the order based on `self.passes` on the given
graph module. Each time a pass is run, checks and linting will be run on
the graph module if `run_checks_after_each_pass` is set.
If the module is a graph module, we will run the list of passes until
the graph stops changing, or until `steps` number of times.
"""
# Order the passes based on the constraints
if not self._validated:
self.solve_constraints()
# Check graph invariants
self.check(module)
# Run the set of passes `steps` number of times or until the graph stops
# changing
overall_modified = False
for _ in range(self.steps):
modified = False
# Run the set of passes on the graph module
for fn in self.passes:
res = fn(module)
module = res.graph_module
modified = modified or res.modified
if isinstance(module, GraphModule):
module.recompile()
# Check graph invariants
if self.run_checks_after_each_pass:
self.check(module)
# If the graph no longer changes, then we can stop running these passes
overall_modified = overall_modified or modified
if not modified:
break
return PassResult(module, overall_modified)
| pytorch-master | torch/fx/passes/infra/pass_manager.py |
pytorch-master | torch/fx/passes/backends/__init__.py |
|
import torch
from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner
from torch.fx.passes.operator_support import OperatorSupport
from torch.fx.passes.tools_common import CALLABLE_NODE_OPS
from torch.fx.passes.fake_tensor_prop import FakeTensorProp
from torch.utils._pytree import tree_map
import operator
class CudaGraphsSupport(OperatorSupport):
# TODO: why is submodules passed here
def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
if node.op not in CALLABLE_NODE_OPS:
return False
if node.target in [torch.ops.aten.embedding_dense_backward.default]:
return False
if node.target in [operator.getitem]:
return True
found_not_cuda = False
def find_not_cuda(t):
nonlocal found_not_cuda
if isinstance(t, torch.Tensor) and t.device.type != 'cuda':
found_not_cuda = True
for n in node.all_input_nodes:
tree_map(find_not_cuda, n.meta['fake_result'])
tree_map(find_not_cuda, node.meta['fake_result'])
# NB: factory function is accounted for because the result would be
# cpu or cuda
return not found_not_cuda
def partition_cudagraphs(gm, inputs):
"""
Partition an FX graph into sub-GraphModules that can be validly run under
CUDA graphs. For a subgraph to be runnable under CUDA, all of the operations
must involve CUDA tensors only/
"""
FakeTensorProp(gm).propagate(*inputs)
supported_ops = CudaGraphsSupport()
# TODO: single node partition may be wrong due to the pessimization
# from copying in and out the data. Check in benchmarks, perhaps
partitioner = CapabilityBasedPartitioner(gm, supported_ops, allows_single_node_partition=True)
partitions = partitioner.propose_partitions()
fused_graph = partitioner.fuse_partitions(partitions)
return fused_graph
| pytorch-master | torch/fx/passes/backends/cudagraphs.py |
from typing import Dict
import torch
from torch.nn import Module
from torch._ops import OpOverload
from torch.fx import GraphModule
from torch.fx.node import Node, _get_qualified_name
from torch.fx.passes.operator_support import OperatorSupport
from torch.fx.passes.tools_common import CALLABLE_NODE_OPS
from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner
from torch._prims.executor import execute
from torch.fx.experimental.proxy_tensor import DecompositionInterpreter
from torch._decomp import decomposition_table
import typing as t
import logging
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
def aten_to_dtype(self, dtype: torch.dtype, **kwargs):
if len(kwargs) > 0 or not dtype:
raise RuntimeError("No support for other to.dtype() formats other than to.dtype(self, dtype)")
return torch._prims.convert_element_type(self, dtype)
# decomposition_table currently contains both aten2aten and aten2prim decomposition
# this is a hack to separate them, as we only need aten2prim decomposition for nvfuser-supported aten graph lowering
aten2aten_decomp = {}
aten2prim_decomp = {}
for op, decomp_fn in decomposition_table.items():
if "torch._refs" in decomp_fn.__module__:
aten2prim_decomp[op] = decomp_fn
else:
aten2aten_decomp[op] = decomp_fn
aten2aten_decomp_skips = {
"aten.native_layer_norm_backward.default",
"aten.embedding_dense_backward.default", # This is hurting nvfuser's perf
"aten.addmm.default"
}
for op, decomp_fn in decomposition_table.items():
if "torch._refs" in decomp_fn.__module__:
aten2prim_decomp[op] = decomp_fn
else:
if str(op) not in aten2aten_decomp_skips:
aten2aten_decomp[op] = decomp_fn
aten2prim_decomp[torch.ops.aten.to.dtype] = aten_to_dtype
class NvFuserOperatorSupport(OperatorSupport):
"""
Operator support for nvFuser backend.
Currently, partitioning is based on FX ATen graph. The fused subgraph will latter be decomposed into prims.
To determine if an ATen ops is supported by nvFuser, we shall check the prim ops used in its ref decomposition.
Only if all the prim ops in the ref has a nvfuser_impl, we say this Aten op is suppported by nvFuser.
Note: When adding a rule, please add it to the corresponding section and follow the
alphabetical order.
"""
def __init__(self):
# TODO: current list copied from torch/csrc/jit/codegen/cuda/parser.cpp is incorrect,
# as that file is solely for TorchScript and doesn't represent the actual status
# whether operation would be runnable by primTorch+nvFuser.
# We will iterate on this list to reflect the the reality.
support_dict = {
# ===============================================================
# call_function aten
# ===============================================================
# Following supported aten ops is copied from torch/csrc/jit/codegen/cuda/parser.cpp
# TODO: might need to update according to supported input types
"torch.ops.aten.add": None,
"torch.ops.aten.sub": None,
# "torch.ops.aten.rsub": None, # rsub decomp is supported at aten2aten level
"torch.ops.aten.div": None,
"torch.ops.aten.atan2": None,
"torch.ops.aten.mul": None,
"torch.ops.aten.max": None,
"torch.ops.aten.min": None,
"torch.ops.aten.pow": None,
"torch.ops.aten.remainder": None,
"torch.ops.aten.fmod": None,
"torch.ops.aten.bitwise_and": None,
"torch.ops.aten.__and__": None,
"torch.ops.aten.bitwise_or": None,
"torch.ops.aten.__or__": None,
"torch.ops.aten.bitwise_xor": None,
"torch.ops.aten.__xor__": None,
"torch.ops.aten.bitwise_left_shift": None,
"torch.ops.aten.__lshift__": None,
"torch.ops.aten.bitwise_right_shift": None,
"torch.ops.aten.__rshift__": None,
"torch.ops.aten.eq": None,
"torch.ops.aten.ne": None,
"torch.ops.aten.ge": None,
"torch.ops.aten.gt": None,
"torch.ops.aten.le": None,
"torch.ops.aten.lt": None,
"torch.ops.aten.abs": None,
"torch.ops.aten.bitwise_not": None,
"torch.ops.aten.ceil": None,
"torch.ops.aten.floor": None,
"torch.ops.aten.frac": None,
"torch.ops.aten.neg": None,
"torch.ops.aten.relu": None,
"torch.ops.aten.round": None,
"torch.ops.aten.silu": None,
"torch.ops.aten.trunc": None,
"torch.ops.aten.log": None,
"torch.ops.aten.log10": None,
"torch.ops.aten.log1p": None,
"torch.ops.aten.log2": None,
"torch.ops.aten.lgamma": None,
"torch.ops.aten.exp": None,
"torch.ops.aten.expm1": None,
"torch.ops.aten.erf": None,
"torch.ops.aten.erfc": None,
"torch.ops.aten.cos": None,
"torch.ops.aten.acos": None,
"torch.ops.aten.cosh": None,
"torch.ops.aten.sin": None,
"torch.ops.aten.asin": None,
"torch.ops.aten.sinh": None,
"torch.ops.aten.tan": None,
"torch.ops.aten.atan": None,
"torch.ops.aten.tanh": None,
"torch.ops.aten.atanh": None,
"torch.ops.aten.sqrt": None,
"torch.ops.aten.rsqrt": None,
"torch.ops.aten.reciprocal": None,
"torch.ops.aten.sigmoid": None,
"torch.ops.aten.isfinite": None,
"torch.ops.aten.isinf": None,
"torch.ops.aten.isnan": None,
"torch.ops.aten.isneginf": None,
"torch.ops.aten.isposinf": None,
"torch.ops.aten.isreal": None,
# "torch.ops.aten.rand_like": None, # causing Node empty_like_default does not support nvfuser
"torch.ops.aten.softplus": None,
"torch.ops.aten.threshold": None,
# relying on aten->aten->prim decomp, aten2aten is using unsupported aten.new_zero op
# "torch.ops.aten.threshold_backward": None,
"torch.ops.aten.clamp": None,
# "torch.ops.aten.clone": None,
# Failing with where(): incompatible function arguments: \
# [<torch._C._nvfuser.TensorView, tensor, <torch._C._nvfuser.TensorView]
# failing with BERT_pytorch_forward_0, which has aten.where.ScalarSelf in the decomps
# "torch.ops.aten.where": None,
# However, aten.where.self overload is fully supported
"torch.ops.aten.where.self": None,
"torch.ops.aten.lerp": None,
"torch.ops.aten.addcmul": None,
# "torch.ops.aten.native_dropout": None, # missing refs for aten.rank_like
"torch.ops.aten.dropout": None,
# "torch.ops.aten.native_dropout_backward": None, # missing refs for aten.type_as
"torch.ops.aten.instance_norm": None,
"torch.ops.aten._batch_norm_impl_index": None,
# "torch.ops.aten.native_batch_norm": None, # missing refs for aten.var
"torch.ops.aten.batch_norm": None,
"torch.ops.aten.cudnn_batch_norm": None,
"torch.ops.aten._batch_norm_impl_index_backward": None,
# "torch.ops.aten.native_batch_norm_backward": None, # should have been handled at aten2aten decomp
"torch.ops.aten.native_layer_norm": None,
"torch.ops.aten.layer_norm": None,
# relying on aten->aten->prim decomp, aten2aten is using unsupported aten.div
# "torch.ops.aten.native_layer_norm_backward": None,
"torch.ops.aten.softmax.int": None,
"torch.ops.aten.log_softmax.int": None,
# relying on aten->aten->prim decomp, aten2aten is using unsupported aten.amax
# "torch.ops.aten._softmax": None,
"torch.ops.aten._log_softmax_backward_data": None,
# "torch.ops.aten._softmax_backward_data": None, # Node _softmax_backward_data_default does not support nvfuser
# "torch.ops.aten.var.dim": None, # missing refs
"torch.ops.aten.std.dim": None,
"torch.ops.aten.sum": None,
# "torch.ops.aten.mean.dim": None, # missing refs
"torch.ops.aten._grad_sum_to_size": None,
"torch.ops.aten.sum_to_size": None,
"torch.ops.aten._autocast_to_reduced_precision": None,
"torch.ops.aten._autocast_to_full_precision": None,
# "torch.ops.aten.to.dtype": None, # causing segfault
# "torch.ops.aten.type_as": None, # missing refs
"torch.ops.aten.linear": None,
"torch.ops.aten.gelu": None,
# "torch.ops.aten.gelu_backward": None, # gelu_backward is handled at aten2aten decomp
# "torch.ops.aten.hardtanh": None, # has functional ref, using unsupported aten.clamp
"torch.ops.aten.leaky_relu": None,
"torch.ops.aten.square": None,
# relying on aten->aten->prim decomp, aten2aten is using unsupported aten.conj_physical
"torch.ops.aten.tanh_backward": None,
# "torch.ops.aten.amax": None, # missing prim decomp
# "torch.ops.aten.amin": None, # missing prim decomp
# "torch.ops.aten.reshape": None,
# "torch.ops.aten.view": None, # missing prim decomp
"torch.ops.aten.flatten.using_ints": None,
# ===============================================================
# call_function builtins and operator
# ===============================================================
"getattr": None,
"_operator.getitem": None,
}
super().__init__(support_dict)
def is_node_supported(
self, submodules: t.Mapping[str, Module], node: Node
) -> bool:
# nvFuser FX subgraph should be purely functional
if node.op not in CALLABLE_NODE_OPS:
return False
# ops in supported_dict doesn't have overload name
# use overloadpacket's qualified_name for OpOverload
if isinstance(node.target, OpOverload):
target = _get_qualified_name(node.target.overloadpacket)
if target in self._support_dict:
return True
return super().is_node_supported(submodules, node)
class NvFuserBackend:
def __init__(self):
self.supported_ops = NvFuserOperatorSupport()
# TODO: this is a naive implementation of cache without proper guard
self.partitioner_cache: Dict[GraphModule, GraphModule] = {}
# TODO: this is a naive implementation of cache without proper guard, this will only work for identical inputs
self.prim_decomp_cache: Dict[GraphModule, GraphModule] = {}
def lower_to_prims_and_execute(self, graph_module: GraphModule, *args, **kwargs):
# `graph_module` is an Aten-Fx graph
# "lowering to prims" and "trace execution" are grouped into this function, as they are both input dependent
if graph_module in self.prim_decomp_cache:
logging.debug("prim_decomp_cache hit!")
prim_module = self.prim_decomp_cache[graph_module]
else:
prim_graph = torch.fx.Graph()
DecompositionInterpreter(graph_module, prim_graph, decomposition_table=aten2prim_decomp).run(*args, **kwargs)
prim_module = torch.fx.GraphModule(graph_module, prim_graph)
self.prim_decomp_cache[graph_module] = prim_module
logging.debug("Lower to prims graph: ", prim_module.code)
# invokes trace executor for running the prim graph
return execute(prim_module, *args, executor="nvfuser")
def compile(self, graph_module: GraphModule) -> GraphModule:
# entry function for nvFuser backend
logging.debug("Compiling graph_module: ", graph_module.code)
# FX graph based partitioning based on nvfuser supported ops
if graph_module in self.partitioner_cache:
logging.debug("partitioner_cache hit!")
fused_graph_module = self.partitioner_cache[graph_module]
else:
partitioner = CapabilityBasedPartitioner(
graph_module, self.supported_ops, allows_single_node_partition=False)
fused_graph_module = partitioner.partition_and_fuse()
self.partitioner_cache[graph_module] = fused_graph_module
# Overriding fused_module's __call__() function with lower_to_prims_and_execute()
for node in fused_graph_module.graph.nodes:
# TODO: use a better way to identify fused submodule
if node.op == "call_module" and "fused_" in node.name:
fused_module = getattr(fused_graph_module, node.name)
fused_module._wrapped_call = self.lower_to_prims_and_execute
return fused_graph_module
def __call__(self, graph_module: GraphModule, _) -> GraphModule:
# wrap self.compile as __call__ function to fit the interface for AOTAutograd's fw_compiler
return self.compile(graph_module)
| pytorch-master | torch/fx/passes/backends/nvfuser.py |
pytorch-master | torch/fx/passes/tests/__init__.py |
|
import unittest
from ..pass_manager import (
inplace_wrapper,
PassManager,
these_before_those_pass_constraint,
this_before_that_pass_constraint,
)
class TestPassManager(unittest.TestCase):
def test_pass_manager_builder(self) -> None:
passes = [lambda x: 2 * x for _ in range(10)]
pm = PassManager(passes)
pm.validate()
def test_this_before_that_pass_constraint(self) -> None:
passes = [lambda x: 2 * x for _ in range(10)]
pm = PassManager(passes)
# add unfulfillable constraint
pm.add_constraint(this_before_that_pass_constraint(passes[-1], passes[0]))
self.assertRaises(RuntimeError, pm.validate)
def test_these_before_those_pass_constraint(self) -> None:
passes = [lambda x: 2 * x for _ in range(10)]
constraint = these_before_those_pass_constraint(passes[-1], passes[0])
pm = PassManager(
[inplace_wrapper(p) for p in passes]
)
# add unfulfillable constraint
pm.add_constraint(constraint)
self.assertRaises(RuntimeError, pm.validate)
| pytorch-master | torch/fx/passes/tests/test_pass_manager.py |
from .common import lift_subgraph_as_module, HolderModule, compare_graphs
| pytorch-master | torch/fx/passes/utils/__init__.py |
from torch.nn import Module
from torch.fx.graph_module import GraphModule
from torch.fx.graph import Graph
from torch.fx.passes.utils.matcher_utils import SubgraphMatcher
from torch.fx._compatibility import compatibility
__all__ = ['HolderModule', 'lift_subgraph_as_module', 'compare_graphs']
@compatibility(is_backward_compatible=False)
class HolderModule(Module):
"""
HolderModule is used to copy all the attributes from original module to submodules
that uses the attributes
"""
def __init__(self, d):
super().__init__()
for k, v in d.items():
self.add_module(k, v)
@compatibility(is_backward_compatible=False)
def lift_subgraph_as_module(gm: GraphModule, subgraph: Graph, class_name: str = 'GraphModule') -> GraphModule:
"""
Create a GraphModule for subgraph, which copies the necessory attributes from the original parent graph_module.
Args:
gm (GraphModule): parent graph module
subgraph (Graph): a valid subgraph that contains copied nodes from the parent graph
class_name (str): name for the submodule
"""
# Loop through all module calls (call_module) and param fetches (get_attr)
# in this component, creating HolderModules as necessary to match the path.
# e.g. if in the original module there's a get_attr node fetches "conv.weight".
# We create a HolderModule as root -> add a HolderModule named "conv" ->
# make "weight" a attribute of "conv" HolderModule and point to conv.weight in
# the original module.
submodule = HolderModule({})
for n in subgraph.nodes:
if n.op not in ("call_module", "get_attr"):
continue
target = n.target
assert isinstance(target, str)
target_name_parts = target.split(".")
curr = submodule
orig_gm = gm
for name in target_name_parts[:-1]:
if not hasattr(curr, name):
curr.add_module(name, HolderModule({}))
curr = getattr(curr, name)
orig_gm = getattr(orig_gm, name)
leaf_node_name = target_name_parts[-1]
leaf_node = getattr(orig_gm, leaf_node_name)
# Relies on custom __setattr__ magic.
setattr(curr, leaf_node_name, leaf_node)
return GraphModule(submodule, subgraph, class_name)
@compatibility(is_backward_compatible=False)
def compare_graphs(left: Graph, right: Graph) -> bool:
"""
Return True if two graphs are identical, i.e they
- have the same number of outputs in the same order
- have the same number of inputs in the same order
- have the same set of nodes, and identical connectivity
"""
matcher = SubgraphMatcher(left, match_output=True, match_placeholder=True)
matches = matcher.match(right)
return len(matches) > 0
| pytorch-master | torch/fx/passes/utils/common.py |
from dataclasses import dataclass, field
from collections import defaultdict
import copy
import torch.library
from torch.fx.graph import Graph
from torch.fx.node import Node
from torch.fx._compatibility import compatibility
from typing import Dict, List, Set
__all__ = ['SubgraphMatcher', 'InternalMatch']
pseudo = torch.library.Library("pseudo", "DEF")
pseudo.define("any() -> ()")
"""
pseudo.any is a wildcard node that can be matched with any fx node with arbitrary number of inputs and outputs.
For example, to match relu followed by one fx node:
def pattern(a):
y = a.relu()
z = torch.ops.pseudo.any(y)
return z
"""
pseudo.define("oneof(*, str[] targets) -> ()")
"""
pseudo.oneof is a special node that can be matched with a fx node whose target is in the permissible list.
`targets` must be be a list of qualified name for operators, e.g. ["operator.add", "torch.sigmoid",
"torch.ops.aten.foo", "torch.ops.prims.bar"]
For example, using following pattern with pseudo.oneof
def pattern(a):
y = a.relu()
z = torch.ops.pseudo.oneof(y, targets=["relu", "torch.sigmoid", "operator.add"])
return z
It will have 3 matches in the following function
def forward(y):
z = y.relu()
x = z.relu() # first match
x = x.relu()
x = torch.sigmoid(x) # second match
x = x.relu()
return x + 1 # third match
"""
@compatibility(is_backward_compatible=False)
@dataclass
class InternalMatch():
# Nodes from which the match was found
anchors: List[Node]
# Maps nodes in the pattern subgraph to nodes in the larger graph
nodes_map: Dict[Node, Node] = field(default_factory=dict)
# nodes in target graph that are matched placeholder in pattern
placeholder_nodes: List[Node] = field(default_factory=list)
# nodes in matched subgraph returned by output
returning_nodes: List[Node] = field(default_factory=list)
def __copy__(self):
return InternalMatch(anchors=self.anchors, nodes_map=self.nodes_map.copy(),
placeholder_nodes=self.placeholder_nodes.copy(),
returning_nodes=self.returning_nodes.copy())
@compatibility(is_backward_compatible=False)
class SubgraphMatcher:
def __init__(self, pattern: Graph,
match_output: bool = False,
match_placeholder: bool = False,
remove_overlapping_matches: bool = True) -> None:
"""
Args:
pattern: the targeted matching pattern, represented in fx.Graph.
match_output: If True, output node in the pattern graph will be treated as a part of the targeted pattern.
If False, output node is ignored during match.
match_placeholder: If True, placeholder node in the pattern graph will be treated as a part of
the targeted pattern. If False, placeholder nodes will be used a wildcard.
remove_overlapping_matches: If True, in the case of overlapping matches, only the first match
will be returned.
"""
self.pattern = pattern
self.match_output = match_output
self.match_placeholder = match_placeholder
self.remove_overlapping_matches = remove_overlapping_matches
if len(pattern.nodes) == 0:
raise ValueError("SubgraphMatcher cannot be initialized with an empty pattern")
for node in pattern.nodes:
if node.op != "output":
assert len(node.users) > 0, \
"SubgraphMatcher cannot be initialized with an pattern with dead code"
# TODO: assert pattern is a connected graph
self.pattern_placeholder_nodes = [n for n in pattern.nodes if n.op == "placeholder"]
output_node = next(iter(reversed(pattern.nodes)))
# nodes returned by outputs
self.pattern_returning_nodes: List[Node] = output_node.all_input_nodes
self.pattern_anchors: List[Node] = []
if match_output:
self.pattern_anchors = [output_node]
else:
# If a node has output_node as the ONLY user, then this node is a graph sink,
# and should be matched against as an anchor
self.pattern_anchors = [n for n in output_node.all_input_nodes if len(n.users) == 1]
def _nodes_are_equal(self, pn: Node, gn: Node) -> bool:
# TODO: match args and kwargs
# if exact match for placeholder is not required, then use placeholder as a wildcard
if not self.match_placeholder and pn.op == "placeholder":
return True
if pn.target == torch.ops.pseudo.any:
return True
if pn.target == torch.ops.pseudo.oneof:
permissible_targets: List[str] = pn.kwargs.get("targets", list()) # type: ignore[assignment]
assert isinstance(permissible_targets, list), \
"pseudo.oneof(permissible_targets=[\"foo\", \"bar\"]) only accept targets as a list"
assert len(permissible_targets) > 0, "please specific as least one target for pseudo.oneof"
if gn._pretty_print_target(gn.target) in permissible_targets:
return True
if pn.op == gn.op:
if pn.op == "placeholder" or pn.op == "output":
return True
return pn.target == gn.target
return False
def _is_contained(self, nodes_map: Dict[Node, Node]) -> bool:
# `lookup` represents all the nodes in `original_graph`
# that are part of `pattern`
lookup: Dict[Node, Node] = {gn : pn for pn, gn in nodes_map.items()}
for gn, pn in lookup.items():
# Placeholders can be used by other nodes in the graphs
if pn.op == "placeholder":
continue
# nodes returned by output are allowed to be used in other areas of the graph
if pn in self.pattern_returning_nodes:
continue
for user in gn.users:
# If this node has users that were not in `lookup`, then it must leak out of the
# pattern subgraph
if user not in lookup:
return False
return True
def _remove_overlapping_matches(self, matches: List[InternalMatch]) -> List[InternalMatch]:
non_overlapping_matches: List[InternalMatch] = list()
nodes_matched: Set[Node] = set()
for match in matches:
found_overlap = False
for pn, gn in match.nodes_map.items():
if pn.op not in {"placeholder", "output"} and gn in nodes_matched:
found_overlap = True
break
if not found_overlap:
non_overlapping_matches.append(match)
for pn, gn in match.nodes_map.items():
if pn.op not in {"placeholder", "output"}:
nodes_matched.add(gn)
return non_overlapping_matches
def _match_nodes(self, pn: Node, gn: Node, match: InternalMatch) -> bool:
# Check if we've already matched these nodes in the current
# traversal
if pn in match.nodes_map:
return match.nodes_map[pn] == gn
# TODO: use a more efficienty way to check if gn is matched before: two-way dict
if gn in match.nodes_map.values():
return False
if not self._nodes_are_equal(pn, gn):
return False
# Optimistically mark `pn` as a match for `gn`
match.nodes_map[pn] = gn
if pn.op == "placeholder":
return True
# Recursively traverse upwards to check if `pn` is a true
# match for `gn`
match_found = (len(pn.all_input_nodes) == len(gn.all_input_nodes) and
all(self._match_nodes(pn_, gn_, match) for pn_, gn_
in zip(pn.all_input_nodes, gn.all_input_nodes)))
if not match_found:
match.nodes_map.pop(pn)
return False
return True
def match(self, graph: Graph) -> List[InternalMatch]:
"""
Returns:
The matched subgraphs.
Thre returned subgraph would be fully self-contained, meaning the nodes (except placeholder
and nodes returned by output) can only be consumed by nodes within the matched subgraph.
Subgraph pattern matcher is implemented with the backtracking style in the following steps:
1. We first identify all the anchor nodes in the pattern graph. The anchor nodes
are the "sinks" (nodes with no user other than the output node) of the pattern graph.
One pattern graph could have multiple anchors if it has multiple return values.
2. In the target graph, we identify the potential candidate nodes that can be matched
with each anchor. These anchor-candidate pairs are the starting points for
pairwise per-node matching.
3. For each anchor-candidate pair, we simultaneously traverse backwards (DFS) in both
pattern and target graphs. For every pattern nodes along traversal path, we compare it
against the target nodes. In case any comparison failed, the match for this anchor-candidate
pair fails. A match is found when DFS completes traversing the graph. See `self._match_nodes`
for more details.
4. In the case of multiple anchors, every anchor will need to find a match using step 3.
In addition, the matches found between anchors need to have a common intersection node
in order for the match to be valid. This is implemented with backtracking. See `backtracking`
for more details.
Notice: graph traversal must be done in the reverser order because a tensor can have multiple
consumers, but can only have a single producer. Only with reverser order, we can we jointly
traverse the pattern and target graph in a deterministic path.
Warning: In theory, this backtracking algorithm have an **exponential** time complexity. However,
in practice, it's unlikely to blow up.
"""
# find candidate nodes to match with pattern anchors
match_candidates: Dict[Node, List[Node]] = defaultdict(list)
for pattern_anchor in self.pattern_anchors:
for node in graph.nodes:
if self._nodes_are_equal(pattern_anchor, node):
match_candidates[pattern_anchor].append(node)
match_candidates_list = list(match_candidates.items())
matches: List[InternalMatch] = []
def backtracking(anchor_index, match):
if anchor_index == len(match_candidates_list):
match.placeholder_nodes = [match.nodes_map[pn] for pn in self.pattern_placeholder_nodes]
match.returning_nodes = [match.nodes_map[pn] for pn in self.pattern_returning_nodes]
matches.append(match)
return
pattern_anchor, candidate_nodes = match_candidates_list[anchor_index]
saved_match = copy.copy(match)
for node in candidate_nodes:
match_found = self._match_nodes(pattern_anchor, node, match)
if match_found:
# match next anchor
backtracking(anchor_index + 1, match)
# revert to saved_match before matching with current anchor
match = copy.copy(saved_match)
match = InternalMatch(anchors=self.pattern_anchors)
backtracking(0, match)
# filter out the matches where the subgraph is not fully_contained
matches = [match for match in matches if self._is_contained(match.nodes_map)]
if self.remove_overlapping_matches:
matches = self._remove_overlapping_matches(matches)
return matches
| pytorch-master | torch/fx/passes/utils/matcher_utils.py |
import copy
from queue import SimpleQueue
from typing import List, Dict, Tuple
import torch.fx
from torch.fx.graph_module import GraphModule
from torch.fx.graph import Graph
from torch.fx.node import Node
from torch.fx.passes.tools_common import NodeList, NodeSet, legalize_graph
from torch.fx.passes.utils import lift_subgraph_as_module
def topo_sort(nodes: NodeList) -> NodeList:
# sort nodes according to the topological order
indegree_map = {node : 0 for node in nodes}
candidates: SimpleQueue = SimpleQueue()
for node in nodes:
for n in node.all_input_nodes:
if n in indegree_map:
indegree_map[node] += 1
if indegree_map[node] == 0:
candidates.put(node)
sorted_nodes: NodeList = list()
while not candidates.empty():
node = candidates.get()
sorted_nodes.append(node)
for n in node.users:
if n in indegree_map:
indegree_map[n] -= 1
if indegree_map[n] == 0:
candidates.put(n)
assert len(nodes) == len(sorted_nodes), "topological sorted nodes doesn't have same length as input nodes"
return sorted_nodes
def validate_partition(partition: NodeList) -> bool:
# verify the partition does't form a dependency cycle in the original graph
# returns True for valid partition, False for invalid
partition_set = set(partition)
outputs: NodeList = list()
for node in partition_set:
for user_node in node.users:
if user_node not in partition_set:
# external user node, need to expose as an output
outputs.append(user_node)
# perform DFS on the parition outputs
# if it reaches a node within the partition, then it found a cycle
visited: NodeSet = set()
def dfs_find_cycle(node):
if node in partition_set:
return True # found cycle, return
visited.add(node)
for user_node in node.users:
if user_node not in visited:
if dfs_find_cycle(user_node):
return True
return False
for output_node in outputs:
if dfs_find_cycle(output_node):
return False
return True
def fuse_as_graphmodule(gm: GraphModule,
nodes: NodeList,
module_name: str) -> Tuple[GraphModule, Tuple[Node, ...], Tuple[Node, ...]]:
"""
Fuse nodes in graph_module into a GraphModule.
Args:
gm (GraphModule): target graph_module
nodes (List[Node]): list of nodes in `gm` to fuse, where the node must be topologically sorted
module_name: class name for the fused GraphModule
Returns:
fused_gm (GraphModule): fused graph module, where its node is a copy of `nodes` in `gm`
original_inputs (Tuple[Node, ...]): input nodes to `nodes` in original `gm`
original_outputs (Tuple[Node, ...]): consumer nodes of `nodes` in original `gm`
"""
# assumption: nodes are already sorted in topo order
for node in nodes:
assert node.graph.owning_module is gm, f"{node} doesn't belong to passed in graph module {gm._get_name()}"
assert not node._erased, f"{node} has been removed from owning graph"
assert node in gm.graph.nodes, f"{node} is not found in graph module {gm._get_name()}"
# validates partition doesn't introduce dependency circles in the graph
assert validate_partition(nodes), "Invalid partition, found dependency cycles"
subgraph = Graph()
node_to_placeholder: Dict[Node, Node] = {} # mapping of nodes from old graph to placeholder in new graph
node_map: Dict[Node, Node] = {} # mapping of nodes from old graph to new graph
# handles inputs throught graph.node_copy's arg_transform functions
def remap_inputs(x):
if x.op == "get_attr":
# TODO: do we really need copy the get_attr node into the graph?
# do something here
pass
if x in nodes:
# x is inside subgraph, return the copied node
# the node should have been copied aleady, as we are copying graph in the topological order
return node_map[x]
if x not in node_to_placeholder:
# x is not in subgraph, create a new placeholder for subgraph
placeholder_node = subgraph.placeholder(x.name, type_expr=x.type)
# copy all meta fields, even if some fields might be irrelvant for the placeholder node
placeholder_node.meta = copy.copy(x.meta)
node_to_placeholder[x] = placeholder_node
return node_to_placeholder[x]
# copy nodes in topological order
for node in nodes:
new_node = subgraph.node_copy(node, remap_inputs)
node_map[node] = new_node
# handles outputs
output_mapping: Dict[Node, Node] = {} # mapping from old output to new outputs
for node in nodes:
for user_node in node.users:
if user_node not in nodes:
# external user node, need to expose as an output
output_mapping[node] = node_map[node]
# outs contain nodes in the new subgraph
outs = tuple(output_mapping.values())
# Take care of the args of FX output node. If there's a single
# output then the output node args is like (output_single), else
# if there're multiple outputs then the output node args is like
# ((output_0, output_1, ...)).
subgraph.output(outs[0] if len(outs) == 1 else outs)
# lint to ensure correctness
subgraph.lint()
fused_gm: GraphModule = lift_subgraph_as_module(gm, subgraph, class_name=module_name)
# sub_gm's input nodes in the original module
original_inputs: Tuple[Node, ...] = tuple(node_to_placeholder.keys())
# sub_gm's outputs node in the original module
original_outputs: Tuple[Node, ...] = tuple(output_mapping.keys())
return fused_gm, original_inputs, original_outputs
def insert_subgm(gm: GraphModule, sub_gm: GraphModule, orig_inputs: Tuple[Node, ...], orig_outputs: Tuple[Node, ...]):
# add sub_gm into gm
submodule_name = sub_gm.__class__.__name__
gm.add_submodule(submodule_name, sub_gm)
# Create a call_module node in main graph.
module_node = gm.graph.call_module(
submodule_name,
args=orig_inputs,
kwargs=None)
if len(orig_outputs) == 1:
# main_remapping[comp.orig_outputs[0]] = module_node
orig_outputs[0].replace_all_uses_with(module_node)
else:
for i, orig_output in enumerate(orig_outputs):
# Use Proxy to record getitem access.
proxy_out = torch.fx.Proxy(module_node)[i].node # type: ignore[index]
orig_output.replace_all_uses_with(proxy_out)
return gm
def erase_nodes(gm: GraphModule, nodes: NodeList):
# erase original nodes in inversed topological order
for node in reversed(nodes):
gm.graph.erase_node(node)
def fuse_by_partitions(gm: GraphModule, partitions: List[NodeList]) -> GraphModule:
for partition_id, nodes in enumerate(partitions):
sorted_nodes = topo_sort(nodes)
submodule_name = "fused_" + str(partition_id)
sub_gm, orig_inputs, orig_outputs = fuse_as_graphmodule(gm, sorted_nodes, submodule_name)
insert_subgm(gm, sub_gm, orig_inputs, orig_outputs)
erase_nodes(gm, sorted_nodes)
# topological sort original gm with newly created sub_gm
legalize_graph(gm)
return gm
| pytorch-master | torch/fx/passes/utils/fuser_utils.py |
pytorch-master | torch/fx/passes/dialect/__init__.py |
|
from typing import Dict, Tuple, Any
import torch
from torch.fx.passes.infra.pass_base import PassBase, PassResult
from torch.utils._pytree import tree_flatten
from torch.fx import GraphModule, Graph
from torch.fx import Node
aten = torch.ops.aten
# stateful ops are banned from CSE
rand_ops = set([aten.dropout, aten._fused_dropout, aten._standard_gamma, aten.bernoulli, aten.multinomial, aten.native_dropout, aten.normal, aten.poisson, aten.binomial, aten.rrelu, aten.rand_like, aten.rand, aten.randint, aten.randn, aten.randperm]) # noqa: E501
inplace_ops = set([aten.add_, aten.sub_, aten.mul_, aten.div_, aten.pow_, aten.lerp_, aten.relu_, aten.sigmoid_, aten.tanh_]) # noqa: E501
@torch.fx._compatibility.compatibility(is_backward_compatible=False)
def get_CSE_banned_ops():
return rand_ops.union(inplace_ops)
@torch.fx._compatibility.compatibility(is_backward_compatible=False)
class CSEPass(PassBase):
def __init__(self, banned_ops=None):
"""
This version of CSE Pass aims to be dialect agnostic, and it's implemented purely based on the connectivity between fx.Node.
For functional dialects, user would only need to specify the random ops in ban list.
Warning: CSE Pass cannot be safely applied on a FX graph in non-functional dialects.
If your dialect contains stateful operators, please customized the banned_ops.
"""
if banned_ops is None:
banned_ops = set()
self.banned_ops = banned_ops
super().__init__()
def call(self, graph_module: GraphModule) -> PassResult:
"""
Return a new copy of torch.fx.GraphModule with CSE applied to the input graph
Example usage:
from torch.fx.experimental.proxy_tensor import make_fx
def f(a):
b = a * a
c = a * a
return b+c
p = CSEPass()
traced_graph = make_fx(f)(torch.tensor(1))
print(traced_graph)
result = p(traced_graph)
print(result.graph_module)
"""
def get_aten_target(node):
if hasattr(node.target, 'overloadpacket'):
return node.target.overloadpacket
return node.target
modified = False
new_graph = Graph()
env: Dict[Node, Node] = {} # map from node in the old graph to node in the new graph
hash_env: Dict[Tuple[torch._ops.OpOverload, int], Node] = {} # map from hash to a node in the new graph
token_map: Dict[Tuple[torch._ops.OpOverload, int], Dict[str, Any]] = {} # map from hash to token
for n in graph_module.graph.nodes:
# The placeholder, output, and get_attr nodes are copied to the new grpah without change
# do not CSE away random operations
if n.op == 'placeholder' or n.op == 'output' or n.op == 'get_attr' or get_aten_target(n) in self.banned_ops:
new_node = new_graph.node_copy(n, lambda x: env[x])
env[n] = new_node
else: # n.op == 'call_function', should never see n.op == 'call_module' or 'call_method'
# substitute args and kwargs memebrs to their mapping in env if exists
# specs can be used to reconstruct nested list/dictionaries
def substitute(arg_list):
arg_list, spec = tree_flatten(arg_list)
for i in range(len(arg_list)):
v = arg_list[i]
if isinstance(v, Node) and v in env:
arg_list[i] = env[v]
return tuple(arg_list), spec
args, args_spec = substitute(n.args)
kwargs, kwargs_spec = substitute(n.kwargs)
# each token corresponds to a unique node
# nodes with the same token can be substituted
token = {"target": n.target, "args": args, "args_spec": args_spec,
"kwargs": kwargs, "kwargs_spec": kwargs_spec}
# hash substituted args to a number, do not hash specs because specs are not hashable
hash_arg = hash((args, kwargs))
hash_val = (n.target, hash_arg)
# check if a node has a substitute and can be eliminated
hash_val_in_hash_env = hash_val in hash_env
if hash_val_in_hash_env and token_map[hash_val] == token:
modified = True # substition happens and the graph is modified
env[n] = hash_env[hash_val]
continue
new_node = new_graph.node_copy(n, lambda x: env[x])
env[n] = new_node
if not hash_val_in_hash_env:
hash_env[hash_val] = new_node
token_map[hash_val] = token
csed_gm = GraphModule(graph_module, new_graph)
return PassResult(csed_gm, modified)
| pytorch-master | torch/fx/passes/dialect/common/cse_pass.py |
pytorch-master | torch/fx/passes/dialect/common/__init__.py |
|
from __future__ import annotations
from typing import Any, Union, Sequence, Optional, Tuple, List, Callable, Type, overload
from enum import Enum
from functools import reduce, cmp_to_key
import operator
import weakref
import torch
# nvFuser imports are conditional on being compiled with CUDA
if hasattr(torch._C, "_nvfuser"):
from torch._C._nvfuser import DataType # type: ignore[import]
_torch_dtype_to_nvfuser_dtype_map = {
torch.cdouble: DataType.ComplexDouble,
torch.cfloat: DataType.ComplexFloat,
torch.double: DataType.Double,
torch.float: DataType.Float,
torch.half: DataType.Half,
torch.bfloat16: DataType.BFloat16,
torch.long: DataType.Int,
torch.int: DataType.Int32,
torch.bool: DataType.Bool,
# Python scalars
complex: DataType.ComplexDouble,
float: DataType.Double,
int: DataType.Int,
bool: DataType.Bool,
}
else:
_torch_dtype_to_nvfuser_dtype_map = {}
def getnvFuserDtype(dtype: Union[torch.dtype, NumberTypeType]):
"""
Translates from torch.dtype to nvFuser's DataType enum
"""
return _torch_dtype_to_nvfuser_dtype_map[dtype]
ShapeType = Union[torch.Size, List[int], Tuple[int, ...]]
StrideType = Union[List[int], Tuple[int, ...]]
DimsType = Union[int, List[int], Tuple[int, ...]]
DimsSequenceType = Union[List[int], Tuple[int, ...]]
NumberTypeType = Union[Type[bool], Type[int], Type[float], Type[complex]]
NumberType = Union[bool, int, float, complex]
Number = (bool, int, float, complex)
DeviceLikeType = Union[str, torch.device]
Tensor = torch.Tensor
torch_function_passthrough = {
torch.Tensor.ndim.__get__, # type: ignore[attr-defined]
torch.Tensor.numel,
torch.Tensor.stride,
torch.Tensor.dtype.__get__, # type: ignore[attr-defined]
torch.Tensor.is_sparse.__get__, # type: ignore[attr-defined]
torch.Tensor.shape.__get__, # type: ignore[attr-defined]
torch.Tensor.device.__get__, # type: ignore[attr-defined]
torch.Tensor.requires_grad.__get__, # type: ignore[attr-defined]
torch.Tensor.layout.__get__, # type: ignore[attr-defined]
# For TorchRefsMode only
torch.Tensor.__format__,
torch.Tensor.__repr__,
torch.Tensor.requires_grad.__get__, # type: ignore[attr-defined]
}
TensorLikeType = torch.Tensor
TensorLike = torch.Tensor
TensorSequenceType = Union[List[TensorLikeType], Tuple[TensorLikeType, ...]]
TensorOrNumberLikeType = Union[TensorLikeType, NumberType]
def same_shape(a: ShapeType, b: ShapeType) -> bool:
if len(a) != len(b):
return False
for x, y in zip(a, b):
if x != y:
return False
return True
# TODO: look at using torch.testing.assert_close instead with an option
# to just compare metadata
def compare_tensor_meta(a: TensorLikeType, b: TensorLikeType, check_strides=False):
"""
Checks that two tensor likes have the same shape,
dtype and device.
In the future this will validate additional metadata, like
strides.
"""
assert isinstance(a, TensorLike)
assert isinstance(b, TensorLike)
if not same_shape(a.shape, b.shape):
msg = "Shapes {0} and {1} are not equal!".format(a.shape, b.shape)
raise AssertionError(msg)
if a.dtype != b.dtype:
msg = "Dtypes {0} and {1} are not equal!".format(a.dtype, b.dtype)
raise AssertionError(msg)
if a.device != b.device:
# Handles special cuda:0 vs cuda case
# TODO: we should review why this happens and see about fixing it
if (str(a.device) == "cuda:0" or str(a.device) == "cuda") and (
str(b.device) == "cuda:0" or str(b.device) == "cuda"
):
pass
else:
msg = "Devices {0} and {1} are not equal!".format(a.device, b.device)
raise AssertionError(msg)
# Stride checking is currently disabled, see https://github.com/pytorch/pytorch/issues/78050
if check_strides:
same_strides, idx = check_significant_strides(a, b)
if not same_strides:
msg = "Stride mismatch! Strides are {0} and {1} (mismatched at {2})!".format(
a.stride(), b.stride(), idx
)
raise RuntimeError(msg)
def check_significant_strides(
a: TensorLikeType, b: TensorLikeType
) -> Tuple[bool, Optional[int]]:
# NOTE: only on CUDA because CPU elementwise strides are incorrect in PyTorch
# See https://github.com/pytorch/pytorch/issues/77553
# Only compares strides that are "meaningful" -- strides for dimensions with length > 1
# and for tensors with more than one element
if (a.device.type == "cuda" or b.device.type == "cuda") and a.numel() > 0:
for idx in range(a.ndim):
if a.stride()[idx] != b.stride()[idx] and a.shape[idx] > 1:
return False, idx
return True, None
# This function is equivalent to compute_contiguous() from TensorImpl.cpp
def is_contiguous(a: TensorLikeType) -> bool:
"""
Tests whether a tensor is contiguous or not.
Tensors are contiguous when they have no elements,
one element, or when they have "nested" strides.
"""
if a.numel() < 2:
return True
expected_stride = 1
for x, y in reversed(tuple(zip(a.shape, a.stride()))):
# Skips checking strides when a dimension has length 1
if x == 1:
continue
if y != expected_stride:
return False
expected_stride = expected_stride * x
return True
# This function is equivalent to compute_channels_last_contiguous_2d() in TensorImpl.cpp
def is_channels_last_contiguous_2d(a: Tensor) -> bool:
# NHWC or not channels last 2D contiguous
if a.ndim != 4:
return False
expected_stride = 1
for idx in (1, 3, 2, 0):
length = a.shape[idx]
if length == 1:
continue
stride = a.stride()[idx]
if stride != expected_stride:
return False
expected_stride *= length
return True
def is_channels_last_contiguous_3d(a: Tensor) -> bool:
# NDHWC or not channels last 3D contiguous
if a.ndim != 5:
return False
expected_stride = 1
for idx in (1, 4, 3, 2, 0):
length = a.shape[idx]
if length == 1:
continue
stride = a.stride()[idx]
if stride != expected_stride:
return False
expected_stride *= length
return True
_memory_formats = set(
(
torch.contiguous_format,
torch.preserve_format,
torch.channels_last,
torch.channels_last_3d,
)
)
def validate_memory_format(memory_format: torch.memory_format):
check(
memory_format in _memory_formats,
lambda: f"Received unknown memory format {memory_format}!",
)
def is_contiguous_for_memory_format( # type: ignore[return]
a: Tensor, *, memory_format: torch.memory_format
) -> bool:
validate_memory_format(memory_format)
if memory_format == torch.contiguous_format:
return is_contiguous(a)
if memory_format == torch.channels_last:
return is_channels_last_contiguous_2d(a)
if memory_format == torch.channels_last_3d:
return is_channels_last_contiguous_3d(a)
check(
False,
lambda: f"is_contiguous received unsupported memory format {memory_format}",
)
# NOTE: that tensors with no elements and channels last is ???
def is_channels_last_contiguous(a: Tensor) -> bool:
"""
True when a tensor is channels-last contiguous.
This requires that:
- the tensor is conceptually either 4 (NHWC) or 5 (NDHWC) dimensions
- if we name the tensor's dimensions NCHW or NCDHW, then the strides are such that the
stride of the 'C' dimension (Cs) is 1 and the strides corresponding to
each dimension (Xs) can be ordered Cs <= Ws <= Hs <= (Ds) <= Ns and are
"nested" -- so Ws = Cs * Cl, where Cl is the length of the 'C' dimension,
for example.
"""
return is_channels_last_contiguous_2d(a) or is_channels_last_contiguous_3d(a)
def is_non_overlapping_and_dense(a: Tensor) -> bool:
"""
True when a tensor is non-overlapping and dense.
A tensor is non-overlapping and dense when there exists a permutation of
its dimensions that is contiguous.
"""
# Short-circuits if the tensor is already contiguous or channels-last contiguous
if is_contiguous(a) or is_channels_last_contiguous(a):
return True
# The following is equivalent to compute_non_overlapping_and_dense in TensorImpl.cpp
# Short-circuits for tensors of rank one, which are
# non-overlapping and "dense" if their stride is one
if a.ndim == 1:
return a.stride()[0] == 1
# Checks that there exists a permutation of the strides s.t. the tensor would be contiguous
# Sorts (length, stride) pairs by stride
lengths_and_strides = sorted(
tuple(zip(a.shape, a.stride())), key=operator.itemgetter(1)
)
expected_stride = 1
for length, stride in lengths_and_strides:
if length == 1:
continue
if stride != expected_stride:
return False
expected_stride *= length
return True
# NOTE: Based on the implementation in TensorIterator.cpp, but note that
# the note [Computing output strides] is incorrect, because it
# says that strides will be preserved even if they are not
# "non overlapping and dense", but this is incorrect. The
# output of elementwise operations are always given
# non overlapping and dense strides.
# This is also INCORRECT because it does not model TensorIterator's
# short-circuit, which can cause different strides.
def compute_elementwise_output_strides(*tensors) -> Tuple[int, ...]:
"""
Computes the output strides for elementwise operations.
"""
if len(tensors) == 0:
msg = "Can't compute elementwise output strides for zero tensors!"
raise ValueError(msg)
check_same_shape(*tensors, allow_cpu_scalar_tensors=True)
# Filters the tensors to actual tensors
tensors = tuple(
a for a in tensors if isinstance(a, TensorLike) and not is_cpu_scalar_tensor(a)
)
# Short-circuits for CPU scalar case
if len(tensors) == 0:
return ()
# Short-circuits for shapes with zero or one dimensions
# TODO: are these necessary?
ndim = tensors[0].ndim
if ndim == 0:
return ()
if ndim == 1:
return (1,)
shape = tensors[0].shape
def _cmp(idx_a, idx_b):
for tensor in tensors:
stride_a = tensor.stride()[idx_a]
stride_b = tensor.stride()[idx_b]
if stride_a == 0 or stride_b == 0:
continue
if stride_a < stride_b:
return -1
if stride_a > stride_b:
return 1
# stride_a == stride_b
if shape[idx_a] > shape[idx_b]:
return 1
# NOTE: this case is missing in the C++ impl
if shape[idx_a] < shape[idx_b]:
return -1
# Note: this case is hit if all strides are zero,
# or all strides are equal and all dimensions have the same length
return 0
perm = tuple(range(ndim))
perm = tuple(sorted(perm, key=cmp_to_key(_cmp), reverse=True))
permuted_shape = [-1] * ndim
for idx, x in enumerate(perm):
permuted_shape[idx] = shape[x]
new_strides = make_contiguous_strides_for(permuted_shape)
permuted_strides = [-1] * ndim
for idx, x in enumerate(perm):
permuted_strides[x] = new_strides[idx]
return tuple(permuted_strides)
#
# Common helper functions
#
def validate_dim_length(length: int):
"""
Validates that an object represents a valid
dimension length.
"""
assert length >= 0
def validate_shape(shape: ShapeType):
"""
Validates that a sequence represents a valid shape.
"""
assert isinstance(shape, Sequence)
for l in shape:
validate_dim_length(l)
def validate_strides(strides: StrideType):
"""
Verifies the object specifies valid strides.
"""
assert isinstance(strides, Sequence)
for stride in strides:
assert stride >= 0
def validate_idx(rank: int, idx: int):
"""
Validates that idx is a valid index for the given shape.
Assumes the index is already canonicalized.
"""
assert isinstance(idx, int)
assert isinstance(rank, int)
assert idx >= 0 and idx < rank or idx == 0
def validate_dimension_indices(rank: int, indices: DimsSequenceType):
for idx in indices:
validate_idx(rank, idx)
def validate_exclusive_idx(rank: int, ex_idx: int):
"""
Validates that ex_idx is a valid exclusive index
for the given shape.
"""
assert isinstance(ex_idx, int)
assert isinstance(rank, int)
assert ex_idx > 0 and ex_idx <= rank
# "Wraps" a dim (up to one time) for the given rank, allowing
# dims to be specified using negative indices
def canonicalize_dim(rank: int, idx: int) -> int:
# TODO: add a comment for why this is
_rank = rank if rank != 0 else 1
if idx >= 0 and idx < _rank:
return idx
if idx < 0:
_idx = idx + _rank
else:
_idx = idx
if _idx < 0 or _idx > _rank:
# Same error message as in aten/src/ATen/WrapDimUtils.h:49
msg = "Dimension out of range (expected to be in range of [{0}, {1}], but got {2})".format(
-rank, rank - 1, idx
)
raise IndexError(msg)
return _idx
# Takes a dimension or sequence of dimensions and "wraps" them,
# mapping negative offsets to positive ones
@overload
def canonicalize_dims(rank: int, indices: Sequence[int]) -> Tuple[int, ...]:
pass
@overload
def canonicalize_dims(rank: int, indices: int) -> int:
pass
def canonicalize_dims(rank, indices):
if isinstance(indices, int):
return canonicalize_dim(rank, indices)
return tuple(canonicalize_dim(rank, x) for x in indices)
def is_valid_permutation(rank: int, perm: DimsSequenceType) -> bool:
"""
Validates that perm is a permutation of length rank.
"""
if not isinstance(perm, Sequence):
return False
if not (tuple(sorted(perm)) == tuple(range(0, rank))):
return False
return True
def is_same_shape(a: Sequence, b: Sequence) -> bool:
"""
Compares two shapes a and b, returning True if they are the same
(their ranks and corresponding lengths match) and False otherwise.
"""
return tuple(a) == tuple(b)
def is_cpu_scalar_tensor(a: Any) -> bool:
return isinstance(a, TensorLike) and a.ndim == 0 and a.device.type == "cpu"
def check_same_device(*args, allow_cpu_scalar_tensors):
"""
Checks that all Tensors in args have the same device.
Raises a RuntimeError when:
- args contains an object whose type is not Tensor or Number
- two Tensor objects in args have different devices, unless one is a CPU scalar tensor and allow_cpu_scalar_tensors is True
"""
# Short-circuits if all (one or fewer) arguments are trivially on the same device
if len(args) <= 1:
return
# Note: cannot initialize device to the first arg's device (it may not have one)
device = None
for arg in args:
if isinstance(arg, Number):
continue
elif isinstance(arg, TensorLike):
if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):
continue
if device is None:
device = arg.device
if device != arg.device:
msg = (
"Tensor on device "
+ str(arg.device)
+ " is not on the expected device "
+ str(device)
+ "!"
)
raise RuntimeError(msg)
else:
msg = (
"Unexpected type when checking for same device, " + str(type(arg)) + "!"
)
raise RuntimeError(msg)
def canonicalize_device(device: DeviceLikeType) -> torch.device:
if isinstance(device, torch.device):
return device
assert isinstance(device, str)
return torch.device(device)
# Asserts if any of the following are true:
# - a non-scalar or non-Tensor is given
# - the shape of any tensors is distinct
def check_same_shape(*args, allow_cpu_scalar_tensors: bool):
"""
Checks that all Tensors in args have the same shape.
Raises a RuntimeError when:
- args contains an object whose type is not Tensor or Number
- two Tensor objects in args have different devices
"""
shape = None
for arg in args:
if isinstance(arg, Number):
continue
elif isinstance(arg, TensorLike):
if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):
continue
if shape is None:
shape = arg.shape
if not is_same_shape(shape, arg.shape):
msg = "Shape {0} is not the expected shape {1}!".format(
arg.shape, shape
)
raise RuntimeError(msg)
else:
msg = (
"Unexpected type when checking for same shape, " + str(type(arg)) + "!"
)
raise RuntimeError(msg)
# Acquires a common shape, if it exists, from one or more tensor arguments,
# filtering number arguments
def extract_shape(*args, allow_cpu_scalar_tensors: bool) -> Optional[ShapeType]:
shape = None
scalar_shape = None
for arg in args:
if isinstance(arg, Number):
continue
elif isinstance(arg, TensorLike):
if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):
scalar_shape = arg.shape
continue
if shape is None:
shape = arg.shape
if not is_same_shape(shape, arg.shape):
return None
else:
return None
return shape if shape is not None else scalar_shape
def extract_shape_from_varargs(
shape: Union[ShapeType, Tuple[ShapeType]],
validate=True,
) -> Tuple[int, ...]:
"""
Returns a shape from varargs.
In PyTorch, operations that accept shapes often accept them as varargs, like
foo(*shape). However a user can pass the shape as a sequence of integers,
like this:
foo(1, 2, 3)
or as a sequence of integers
foo((1, 2, 3))
In the first case shape will be a tuple of integers, and in the second case it's a tuple
containing a tuple of integers. This validates those inputs and canonicalizes them
to a tuple of integers.
"""
# Handles tuple unwrapping
if len(shape) == 1 and isinstance(shape[0], Sequence):
shape = shape[0]
if validate:
validate_shape(shape) # type: ignore[arg-type]
return shape # type: ignore[return-value]
_integer_dtypes = (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
_low_precision_dtypes = (torch.float16, torch.bfloat16, torch.complex32)
_float_dtypes = (torch.float16, torch.bfloat16, torch.float32, torch.float64)
_complex_dtypes = (torch.complex32, torch.complex64, torch.complex128)
def is_boolean_dtype(dtype: torch.dtype) -> bool:
assert isinstance(dtype, torch.dtype)
return dtype is torch.bool
def is_integer_dtype(dtype: torch.dtype) -> bool:
assert isinstance(dtype, torch.dtype)
return dtype in _integer_dtypes
def is_low_precision_dtype(dtype: torch.dtype) -> bool:
assert isinstance(dtype, torch.dtype)
return dtype in _low_precision_dtypes
def is_float_dtype(dtype: torch.dtype) -> bool:
assert isinstance(dtype, torch.dtype)
return dtype in _float_dtypes
def is_complex_dtype(dtype: torch.dtype) -> bool:
assert isinstance(dtype, torch.dtype)
return dtype in _complex_dtypes
def is_grad_dtype(dtype: torch.dtype) -> bool:
"""
Checks if the dtype can require a gradient.
"""
return is_float_dtype(dtype) or is_complex_dtype(dtype)
_complex_to_real_dtype_map = {
torch.complex128: torch.float64,
torch.complex64: torch.float32,
torch.complex32: torch.float16,
}
_real_to_complex_dtype_map = {
torch.float16: torch.complex32,
torch.bfloat16: torch.complex64,
torch.float32: torch.complex64,
torch.float64: torch.complex128,
}
def corresponding_real_dtype(dtype: torch.dtype) -> torch.dtype:
return _complex_to_real_dtype_map[dtype]
def corresponding_complex_dtype(dtype: torch.dtype) -> torch.dtype:
return _real_to_complex_dtype_map[dtype]
def dtype_to_type(dtype: torch.dtype) -> type:
"""
Computes the corresponding Python type (AKA "type kind") for the
given dtype.
"""
assert isinstance(dtype, torch.dtype)
if dtype is torch.bool:
return bool
if dtype in _integer_dtypes:
return int
if dtype in _float_dtypes:
return float
if dtype in _complex_dtypes:
return complex
raise ValueError("Invalid dtype!")
def type_to_dtype(typ: type) -> torch.dtype:
"""
Computes the corresponding dtype for a Number type.
"""
assert isinstance(typ, type)
if typ is bool:
return torch.bool
if typ is int:
return torch.long
if typ is float:
return torch.get_default_dtype()
if typ is complex:
return corresponding_complex_dtype(torch.get_default_dtype())
raise ValueError("Invalid type!")
def get_dtype(x: Union[torch.Tensor, NumberType]):
if isinstance(x, torch.Tensor):
return x.dtype
else:
return type_to_dtype(type(x))
_ordered_types = (bool, int, float, complex)
def check_fp_or_complex(
dtype: torch.dtype, fn_name: str, allow_low_precision_dtypes: bool = True
):
"""
Checks whether the input is floating point or complex.
If allow_low_precision_dtypes is True, it allows having float16, bfloat16, and complex32
"""
check(
is_float_dtype(dtype) or is_complex_dtype(dtype),
lambda: f"{fn_name}: Expected a floating point or complex tensor as input. Got {dtype}",
)
check(
allow_low_precision_dtypes or not is_low_precision_dtype(dtype),
lambda: f"{fn_name}: Half precision dtypes not supported. Got {dtype}",
)
def check_is_matrix(A: TensorLikeType, f_name: str, arg_name: str = "A"):
check(
len(A.shape) >= 2,
lambda: f"{f_name}: The input tensor {arg_name} must have at least 2 dimensions.",
)
def get_higher_type(a: type, b: type) -> type:
"""
Returns the higher of the two given Number types.
The types are ordered bool -> int -> float -> complex.
"""
# Type checking
assert a in _ordered_types
assert b in _ordered_types
if a is b:
return a
for typ in _ordered_types:
if a is typ:
return b
if b is typ:
return a
raise ValueError("Unknown Python scalar type!")
# Returns the higher of two torch datatypes a and b or, if the two
# are not ordered relative to each other, the next
# higher datatype
def get_higher_dtype(
a: Optional[Union[torch.dtype, TensorLikeType, NumberType]],
b: Optional[Union[torch.dtype, TensorLikeType, NumberType]],
) -> Optional[torch.dtype]:
"""
Computes the "lowest" datatype that is weakly
"higher" than both a and b.
"""
# Type checking
assert a is None or isinstance(a, (torch.dtype, TensorLike, Number))
assert b is None or isinstance(b, (torch.dtype, TensorLike, Number))
def _extract_dtype(
x: Optional[Union[torch.dtype, TensorLikeType, NumberType]]
) -> Optional[torch.dtype]:
if x is None:
return None
if isinstance(x, torch.dtype):
return x
if isinstance(x, TensorLike):
return x.dtype
if isinstance(x, Number):
return type_to_dtype(type(x))
raise RuntimeError("Unexpected type given to _extract_dtype!")
a, b = _extract_dtype(a), _extract_dtype(b)
if a is b:
return a
if a is None:
return b
if b is None:
return a
ordered_datatypes = (
(torch.bool,),
(torch.uint8, torch.int8),
(torch.int16,),
(torch.int32,),
(torch.int64,),
(torch.float16, torch.bfloat16),
(torch.float32,),
(torch.float64,),
(torch.complex32,),
(torch.complex64,),
(torch.complex128,),
)
for idx, dtypes in enumerate(ordered_datatypes):
if a in dtypes and b in dtypes:
return ordered_datatypes[idx + 1][0]
if a in dtypes:
return b
if b in dtypes:
return a
raise RuntimeError("Unexpected termination!")
# TODO: maybe unify with can_cast_to?
def is_weakly_lesser_type(a: type, b: type) -> bool:
"""
Compares two types, a and b, returning True if a is weakly "less" than b.
The comparison is determined by the following type ordering: bool, int, float, complex.
"""
ordered_types = (
bool,
int,
float,
complex,
)
assert a in ordered_types
assert b in ordered_types
for typ in ordered_types:
if a == typ:
return True
if b == typ:
return False
raise RuntimeError("Unexpected termination!")
def can_safe_cast_to(*, cast_to: torch.dtype, cast_from: torch.dtype) -> bool:
for fn in (is_complex_dtype, is_float_dtype, is_integer_dtype, is_boolean_dtype):
if fn(cast_to):
return True
if fn(cast_from):
return False
raise ValueError("Received unknown dtypes {0}, {1}!".format(cast_to, cast_from))
def check_same_dtype(*args):
"""
Checks that all Tensors in args have the same device and that all Numbers have the
same corresponding Python type.
Raises a RuntimeError when:
- args contains an object whose type is not Tensor or Number
- two Tensors objects in args have different dtypes
- two Number objects in args have different types
- there are Tensors and Numbers in args, and one of those Tensors corresponding
Python types is different from the type of one of those Numbers
"""
full_dtype = None
scalar_type = None
for arg in args:
if isinstance(arg, Number):
# Scalar type checking is disabled (and may be removed in the future)
continue
# if scalar_type is None:
# scalar_type = type(arg)
# if scalar_type is not type(arg):
# msg = (
# "Scalar of type "
# + str(type(arg))
# + " is not the expected type of "
# + str(scalar_type)
# + "!"
# )
# raise RuntimeError(msg)
elif isinstance(arg, TensorLike):
if full_dtype is None:
full_dtype = arg.dtype
if scalar_type is None:
scalar_type = dtype_to_type(arg.dtype)
if full_dtype is not arg.dtype:
msg = (
"Tensor with dtype "
+ str(arg.dtype)
+ " is not the expected dtype of "
+ str(full_dtype)
+ "!"
)
raise RuntimeError(msg)
arg_type = dtype_to_type(arg.dtype)
if arg_type is not scalar_type:
msg = (
"Tensor with corresponding Python type "
+ str(arg_type)
+ " is not the expected type of "
+ str(scalar_type)
+ "!"
)
raise RuntimeError(msg)
else:
msg = (
"Unexpected type when checking for same dtype, " + str(type(arg)) + "!"
)
raise RuntimeError(msg)
# Maps datatypes to their computation types for elementwise operations
_computation_dtype_map = {
torch.bfloat16: torch.float32,
torch.float16: torch.float32,
torch.complex32: torch.complex64,
}
def get_computation_dtype(dtype: torch.dtype) -> torch.dtype:
return _computation_dtype_map.get(dtype, dtype)
class ELEMENTWISE_TYPE_PROMOTION_KIND(Enum):
DEFAULT = (0,)
NO_OPMATH = (1,)
INT_TO_FLOAT = (2,)
ALWAYS_BOOL = (3,)
COMPLEX_TO_FLOAT = (4,)
BOOL_TO_LONG = (5,)
class REDUCTION_OUTPUT_TYPE_KIND(Enum):
SAME = (0,)
COMPLEX_TO_FLOAT = (1,) # for complex types outputs corresponding real type
KEEP_PROMOTED_TYPE = (2,) # keep output in opmath type, needed for mean
ALWAYS_BOOL = (3,)
# TODO: document type promotion kinds
def elementwise_dtypes(
*_args,
type_promotion_kind: ELEMENTWISE_TYPE_PROMOTION_KIND,
) -> Tuple[torch.dtype, torch.dtype]:
"""
Computes the computation and result dtypes for elementwise type promotion
on the given arguments and with the given elementwise type promotion kind.
Note that not all inputs to an elementwise operation necessarily participate in type promotion.
For example, the "alpha" parameter of torch.add does not participate in type promotion,
although it may be cast to the Python type corresponding to the computation dtype that
the type promotion algorithm determines.
Default elementwise type promotion, which all other type promotion kinds tweak (see below),
first decides which of four ordered types to use:
bool -> integer -> floating point -> complex
The selected type is the "lowest" type in the above list such that all number arguments
have a weakly "lower" type and all tensor arguments have a weakly lower corresponding
type for their dtype.
Once the type is determined, the particular result dtype is found. The dtypes are
partially ordered as follows:
bool -> uint8, int8 -> int16 -> int32 -> int64 ->
float16, bfloat16 -> float32 -> float64 -> complex32 -> complex64 -> complex128
The result dtype is selected by:
- if no tensor's dtype has the same corresponding type as the one selected,
then the result dtype is the (default) dtype corresponding to the selected type
(for example, 1.5 + an integer tensor has a result dtype of the default floating point dtype)
- if the result type is complex then the dtype is:
- the default complex dtype if there are no floating point or complex tensors
- if there are floating point or complex tensors with one or more dimensions, then
the complex dtype corresponding to the highest corresponding complex dtype among those tensors
(for example, double + cfloat -> cdouble)
- if there are only floating point or complex tensors with zero dimensions, then
the complex dtype corresponding to the highest corresponding complex dtype among those tensors
- if the first two cases do not apply, the result dtype is the highest dtype among
all tensors with one or more dimensions of the output type, and if there are no such
tensors then it's the highest dtype among all tensors with zero dimensions of the output type
(for example, long + half -> half, even if the half tensor has zero dimensions)
The "corresponding complex dtypes" are:
float16 -> complex32
bfloat16 -> complex64
float32 -> complex64
float64 -> complex128
complex32 -> complex32
complex64 -> complex64
complex128 -> complex128
The DEFAULT type promotion kind computes per above, and then uses the result dtype to pick a computation
dtype by mapping low precision floating point and complex dtypes as follows:
float16 -> float32
bfloat16 -> float32
complex32 -> complex64
This is referred to as "op math", and the NO_OPMATH type promotion kind disables this mapping, making the
computation dtype the same as the result dtype when it's selected. NO_OPMATH is appropriate for kernels
which perform no mathematical operations on their tensors (see below for examples).
The INT_TO_FLOAT type promotion kind maps boolean and integer maps result dtypes to the default floating point dtype,
and computation dtypes to the appropriate op math dtype.
The COMPLEX_TO_FLOAT type promotion kind maps complex result dtypes to the corresponding float dtype, following this
mapping:
complex32 -> float16
complex64 -> float32
complex128 -> float64
Note that COMPLEX_TO_FLOAT derives the computation dtype as the DEFAULT setting does.
The BOOL_TO_LONG type promotion kind maps boolean computation and result dtypes to long.
The ALWAYS_BOOL type promotion kind always sets the result dtype to bool.
Example operators for each type promotion option:
DEFAULT : add
NO_OPMATH : where, nextafter, cat
INT_TO_FLOAT : sin
COMPLEX_TO_FLOAT : abs
BOOL_TO_LONG : pow
ALWAYS_BOOL : eq
"""
args = tuple(x for x in _args if x is not None)
highest_type: type = bool
for x in args:
if not isinstance(x, (Number, TensorLike)):
msg = (
"Unexpected type {0} when computing elementwise type promotion!".format(
str(type(x))
)
)
raise ValueError(msg)
if isinstance(x, Number):
highest_type = get_higher_type(highest_type, type(x))
else:
# x is a TensorLike
highest_type = get_higher_type(highest_type, dtype_to_type(x.dtype))
result_dtype = None
def _find_highest_dtype_filtered(
args, filter, *, float_as_complex=False
) -> Optional[torch.dtype]:
zero_dim_tensor_dtype = None
one_plus_dim_tensor_dtype = None
for x in args:
if isinstance(x, TensorLike) and filter(x.dtype):
_dtype = x.dtype
if float_as_complex and is_float_dtype(_dtype):
_dtype = corresponding_complex_dtype(_dtype)
if x.ndim == 0:
zero_dim_tensor_dtype = get_higher_dtype(
zero_dim_tensor_dtype, _dtype
)
else:
# x.ndim > 0
one_plus_dim_tensor_dtype = get_higher_dtype(
one_plus_dim_tensor_dtype, _dtype
)
# Prefers dtype of tensors with one or more dimensions
if one_plus_dim_tensor_dtype is not None:
return one_plus_dim_tensor_dtype
return zero_dim_tensor_dtype
if highest_type is float:
result_dtype = _find_highest_dtype_filtered(args, is_float_dtype)
result_dtype = (
torch.get_default_dtype() if result_dtype is None else result_dtype
)
elif highest_type is complex:
result_dtype = _find_highest_dtype_filtered(
args,
lambda x: is_float_dtype(x) or is_complex_dtype(x),
float_as_complex=True,
)
if result_dtype is None:
result_dtype = corresponding_complex_dtype(torch.get_default_dtype())
elif highest_type is int:
result_dtype = _find_highest_dtype_filtered(args, is_integer_dtype)
result_dtype = torch.long if result_dtype is None else result_dtype
else:
# highest_type is bool
result_dtype = torch.bool
if type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT:
return get_computation_dtype(result_dtype), result_dtype
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH:
return result_dtype, result_dtype
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT:
if is_integer_dtype(result_dtype) or is_boolean_dtype(result_dtype):
result_dtype = torch.get_default_dtype()
return get_computation_dtype(result_dtype), result_dtype
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT:
# NOTE: computation can still occur in a complex dtype
computation_dtype = get_computation_dtype(result_dtype)
if is_complex_dtype(result_dtype):
result_dtype = corresponding_real_dtype(result_dtype)
return computation_dtype, result_dtype
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG:
if is_boolean_dtype(result_dtype):
return torch.long, torch.long
return get_computation_dtype(result_dtype), result_dtype
elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL:
return get_computation_dtype(result_dtype), torch.bool
else:
raise ValueError(
"Unknown type promotion kind {0}".format(str(type_promotion_kind))
)
def reduction_dtypes(
arg,
output_dtype_kind: REDUCTION_OUTPUT_TYPE_KIND,
dtype: Optional[torch.dtype] = None,
) -> Tuple[torch.dtype, Optional[torch.dtype]]:
# even though some reductions, like amin or amax, don't strictly require type promotion,
# all the math ops (including comparisons) are still defined only for a computation type,
# so promotion will still happen. We are doing it explicitly here
inp_dtype = dtype if dtype is not None else arg.dtype
computation_dtype = get_computation_dtype(inp_dtype)
if (
output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.SAME
or output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT
):
result_dtype = dtype if dtype else arg.dtype
if (
output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT
and is_complex_dtype(result_dtype)
):
result_dtype = corresponding_real_dtype(result_dtype)
elif output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.KEEP_PROMOTED_TYPE:
result_dtype = None
else: # ALWAYS_BOOL
result_dtype = torch.bool
return computation_dtype, result_dtype
def make_contiguous_strides_for(
shape: ShapeType, row_major: bool = True
) -> Tuple[int, ...]:
"""
Returns the strides of a contriguous tensor if row_major
If row_major=True, it returns the strides of a contiguous batch of Fortran-contiguous matrices
This is often used when calling external libraries like BLAS/LAPACK/cuSolver...
"""
validate_shape(shape)
if not shape:
return ()
multiplier = 1
strides = []
for l in reversed(shape):
strides.append(multiplier)
if l != 0:
multiplier *= l
result = tuple(reversed(strides))
if row_major:
return result
else:
if len(shape) < 2:
return result
return result[:-2] + (1, max(shape[-2], 1))
def make_channels_last_2d_strides_for(shape: ShapeType) -> Tuple[int, ...]:
# TODO: maybe inform the user of channels_last_3d if rank of the tensor is 5?
check(
len(shape) == 4,
lambda: "Only tensors of rank 4 can use the channels_last memory format",
)
multiplier = 1
strides = [0] * 4
for idx in (1, -1, -2, 0):
# NOTE: intentionally divergence from make_contiguous_strides_for
# This is consistent with eager
strides[idx] = multiplier
multiplier *= shape[idx]
return tuple(strides)
def make_channels_last_3d_strides_for(shape: ShapeType) -> Tuple[int, ...]:
check(
len(shape) == 5,
lambda: "Only tensors of rank 5 can use the channels_last_3d memory format",
)
multiplier = 1
strides = [0] * 5
for idx in (1, -1, -2, -3, 0):
# NOTE: intentionally divergence from make_contiguous_strides_for
# This is consistent with eager
strides[idx] = multiplier
multiplier *= shape[idx]
return tuple(strides)
def make_channels_last_strides_for(shape: ShapeType) -> Tuple[int, ...]:
ndim = len(shape) if isinstance(shape, Sequence) else 1
if ndim == 4:
return make_channels_last_2d_strides_for(shape)
elif ndim == 5:
return make_channels_last_3d_strides_for(shape)
else:
raise RuntimeError(
f"no channels last format strides exist in {ndim} dimensions"
)
def compute_reduction_output_shape(
shape: ShapeType, dimensions: Sequence
) -> Tuple[int, ...]:
for idx in dimensions:
validate_idx(len(shape), idx)
new_shape = []
for idx in range(len(shape)):
if idx in dimensions:
continue
new_shape.append(shape[idx])
return tuple(new_shape)
def validate_no_repeating_dims(dims: Sequence):
if len(dims) != len(set(dims)):
raise RuntimeError("duplicate value in the list of dims")
def reduction_dims(shape: ShapeType, dims: Optional[Sequence]) -> Tuple[int, ...]:
if dims is None:
return tuple(range(len(shape)))
dims = tuple(canonicalize_dim(len(shape), idx) for idx in dims)
validate_no_repeating_dims(dims)
return dims
def check_in_bounds_for_storage(
a: torch.TypedStorage, shape: ShapeType, strides: StrideType, storage_offset: int
):
"""
Determines if the given shape, strides, and offset are valid for the given storage.
"""
# Short-circuits if the shape has no elements
if reduce(operator.mul, shape) == 0:
return
length = a.size() - storage_offset
max_offset = 0
for x, y in zip(shape, strides):
max_offset = max_offset + (x - 1) * y
if max_offset >= length:
required_length = max_offset + storage_offset
msg = (
"Can't view a storage of size {0} with an offset of {1}, shape of {2}, and strides of {3}, "
"which requires a storage of size {4}".format(
a.size(), storage_offset, str(shape), str(strides), required_length
)
)
raise ValueError(msg)
def check(
b: bool, s: Callable[[], str], exc_type: Type[Exception] = RuntimeError
) -> None:
"""
Helper function for raising an error_type (default: RuntimeError) if a boolean condition fails.
Error message is a callable producing a string (to avoid wasting time
string formatting in non-error case, and also to make it easier for torchdynamo
to trace.)
"""
if not b:
raise exc_type(s())
# This combines is_channels_last_strides_2d and is_channels_last_strides_3d in
# c10/core/MemoryFormat.h into one function
def are_strides_like_channels_last(
shape: Sequence[int], strides: Sequence[int]
) -> bool:
ndim = len(shape)
if ndim == 4:
# Check for channels_last_2d
dim_order = [1, 3, 2, 0]
elif ndim == 5:
# Check for channels_last_3d
dim_order = [1, 4, 3, 2, 0]
else:
return False
if strides[1] == 0:
return False
min = 0
for d in dim_order:
if shape[d] == 0:
return False
if strides[d] < min:
return False
if d == 0 and min == strides[1]:
return False
min = strides[d]
if strides[d] > 1:
min *= shape[d]
return True
def suggest_memory_format(x: TensorLikeType) -> torch.memory_format:
if x.layout != torch.strided:
return torch.contiguous_format
if are_strides_like_channels_last(x.shape, x.stride()):
return torch.channels_last if x.ndim == 4 else torch.channels_last_3d
return torch.contiguous_format
def prod(xs: Sequence[NumberType]) -> NumberType:
"""Product of elements in input sequence. Returns 1 for empty sequence"""
return reduce(operator.mul, xs, 1)
| pytorch-master | torch/_prims_common/__init__.py |
import torch
from torch._prims_common import (
Number,
NumberType,
TensorLike,
TensorLikeType,
ELEMENTWISE_TYPE_PROMOTION_KIND,
)
import torch._prims_common as utils
from torch.utils._pytree import tree_flatten, tree_unflatten
from typing import Callable, Sequence, Union, Tuple, NamedTuple
import inspect
from functools import wraps, reduce
import operator
import warnings
from itertools import chain
# TODO: implement ref.cast with an option to enforce safe casting
def _maybe_convert_to_dtype(
a: Union[TensorLikeType, NumberType, Sequence], dtype: torch.dtype
) -> Union[TensorLikeType, NumberType, Sequence]:
import torch._prims as prims
if isinstance(a, TensorLike):
if a.dtype != dtype:
# NOTE: this is incorrect on the CPU
# See https://github.com/pytorch/pytorch/issues/77553
return prims.convert_element_type(a, dtype)
return a
if isinstance(a, Number):
return utils.dtype_to_type(dtype)(a)
if isinstance(a, Sequence):
return tuple(_maybe_convert_to_dtype(x, dtype) for x in a)
raise ValueError(
"Received type {0} that is neither a tensor or a number!".format(type(a))
)
def _maybe_convert_to_type(a: NumberType, typ: type) -> NumberType:
if not isinstance(a, Number):
msg = "Found unknown type {0} when trying to convert scalars!".format(type(a))
raise ValueError(msg)
if not utils.is_weakly_lesser_type(type(a), typ):
msg = "Scalar {0} of type {1} cannot be safely cast to type {2}!".format(
a, type(a), typ
)
raise ValueError(msg)
return typ(a)
def _annotation_has_type(*, typ, annotation):
if hasattr(annotation, "__args__"):
for a in annotation.__args__:
if _annotation_has_type(typ=typ, annotation=a):
return True
return False
return typ is annotation
class elementwise_type_promotion_wrapper(object):
"""
Adds elementwise type promotion to a Python reference implementation.
Takes two kwargs, type_promoting_args and type_promotion_kind.
type_promoting_args must be a string Sequence specifiying the argument names of all
arguments that participate in type promotion (and should be type promoted). If the
arg specifies a Sequence-type then every element of the Sequence will participate in
type promotion.
type_promotion_kind must be one of the kinds specified by ELEMENTWISE_TYPE_PROMOTION_KIND.
See its documentation for details.
Other type promotion behavior, like validating the Python type of scalar arguments, must
be handled separately.
"""
def __init__(
self,
*,
type_promotion_kind: ELEMENTWISE_TYPE_PROMOTION_KIND,
type_promoting_args: Sequence[str] = None,
):
self.type_promoting_arg_names = type_promoting_args
self.type_promotion_kind = type_promotion_kind
def __call__(self, fn: Callable) -> Callable:
sig = inspect.signature(fn)
@wraps(fn)
def _fn(*args, **kwargs):
bound = sig.bind(*args, **kwargs)
type_promoting_args = tuple(
bound.arguments[x]
for x in self.type_promoting_arg_names # type: ignore[union-attr]
if x in bound.arguments.keys()
)
flattened_type_promoting_args = tree_flatten(type_promoting_args)[0]
compute_dtype, result_dtype = utils.elementwise_dtypes(
*flattened_type_promoting_args,
type_promotion_kind=self.type_promotion_kind,
)
promoted_args = {
x: _maybe_convert_to_dtype(bound.arguments[x], compute_dtype)
for x in self.type_promoting_arg_names # type: ignore[union-attr]
if x in bound.arguments.keys()
}
bound.arguments.update(promoted_args)
result = fn(**bound.arguments)
# FIXME?: assumes result is a single tensor
assert isinstance(result, TensorLike)
return _maybe_convert_to_dtype(result, result_dtype)
_fn.__signature__ = sig # type: ignore[attr-defined]
return _fn
# TODO: handle tuples of tensors
def _maybe_resize_out(out: TensorLikeType, shape):
if out.numel() == 0:
return out.resize_(shape)
if out.numel() != reduce(operator.mul, shape, 1):
msg = (
"An output with one or more elements was resized since it had shape {0} "
"which does not match the required output shape {1}. "
"This behavior is deprecated, and in a future PyTorch release outputs will not "
"be resized unless they have zero elements. "
"You can explicitly reuse an out tensor t by resizing it, inplace, to zero elements with t.resize_(0).".format(
str(out.shape), str(shape)
)
)
warnings.warn(msg)
return out.resize_(shape)
return out
def _safe_copy_out(
*, copy_from: TensorLikeType, copy_to: TensorLikeType, exact_dtype: bool = False
):
# Checks same device
if copy_from.device != copy_to.device:
msg = "Attempting to copy from device {0} to device {1}, but cross-device copies are not allowed!".format(
copy_from.device, copy_to.device
)
raise RuntimeError(msg)
# Checks safe cast
if exact_dtype:
utils.check(
copy_from.dtype == copy_to.dtype,
lambda: f"Expected out tensor to have dtype {copy_from.dtype} "
"but got {copy_to.dtype} instead",
)
else:
utils.check(
utils.can_safe_cast_to(cast_from=copy_from.dtype, cast_to=copy_to.dtype),
lambda: f"Attempting to cast from {copy_from.dtype} to out tensor with dtype {copy_to.dtype}, "
"but this can't be cast because it is not safe!",
)
return copy_to.copy_(copy_from)
def out_wrapper(*out_names: str, exact_dtype: bool = False):
is_tensor = len(out_names) == 0
assert is_tensor or len(out_names) >= 2
def _out_wrapper(fn: Callable) -> Callable:
"""
Adds the out parameter to a Python reference.
"""
out_type = (
TensorLikeType
if is_tensor
else Tuple[tuple(TensorLikeType for _ in range(len(out_names)))]
)
return_type = (
TensorLikeType
if is_tensor
else NamedTuple(
f"return_types_{fn.__name__}", [(o, TensorLikeType) for o in out_names]
)
)
sig = inspect.signature(fn)
factory_kwargs = ("device", "dtype")
is_factory_fn = all(p in sig.parameters for p in factory_kwargs)
@wraps(fn)
def _fn(*args, out=None, **kwargs):
if is_factory_fn and out is not None:
for k in factory_kwargs:
out_attr = getattr(out, k)
if k not in kwargs:
kwargs[k] = out_attr
result = fn(*args, **kwargs)
assert (
isinstance(result, TensorLike)
and is_tensor
or isinstance(result, Tuple) # type: ignore[arg-type]
and len(result) == len(out_names)
)
if out is not None:
# Naively you might expect this assert to be true, but
# it's not:
#
# assert type(out) == type(result)
#
# The reason is that functions under this wrapper can
# get registered to the Meta dispatch key, and that
# means they can be executed in a context where tensor
# subclasses are disabled (with no_dispatch), which is a
# handy way for an is-a tensor subclass (e.g.,
# FakeTensor) to have the normal meta backend create a
# meta tensor, to be wrapped once it gets returned.
# In this situation, you will get a FakeTensor as
# the output tensor, but not the result--which will
# be a normal meta tensor, but this is perfectly
# harmless.
if is_tensor:
assert isinstance(out, TensorLike)
# These two operations are done in-place
_maybe_resize_out(out, result.shape)
_safe_copy_out(copy_from=result, copy_to=out, exact_dtype=exact_dtype) # type: ignore[arg-type]
else:
assert isinstance(out, Tuple) # type: ignore[arg-type]
utils.check(
len(out) == len(result),
lambda: f"expected tuple of {len(result)} elements but got {len(out)}",
TypeError,
)
for r, o in zip(result, out):
# These two operations are done in-place
_maybe_resize_out(o, r.shape)
_safe_copy_out(copy_from=r, copy_to=o, exact_dtype=exact_dtype) # type: ignore[arg-type]
else:
out = result
# mypy does not see through the definition of out_type given that it's in a different scope
return out if is_tensor else return_type(*out) # type: ignore[operator]
out_param = inspect.Parameter(
"out",
kind=inspect.Parameter.KEYWORD_ONLY,
default=None,
annotation=out_type,
)
# Mark that the function now returns a tuple
assert sig.return_annotation in (sig.empty, out_type)
params = chain(sig.parameters.values(), (out_param,))
_fn.__signature__ = inspect.Signature( # type: ignore[attr-defined]
parameters=params, return_annotation=return_type # type: ignore[arg-type]
)
_fn.__annotations__ = fn.__annotations__
_fn.__annotations__["out"] = out_type
_fn.__annotations__["return"] = return_type
return _fn
return _out_wrapper
def backwards_not_supported(prim):
class BackwardsNotSupported(torch.autograd.Function):
@staticmethod
def forward(ctx, args_spec, *flat_args):
args, kwargs = tree_unflatten(flat_args, args_spec) # type: ignore[arg-type]
g = torch._C._AutoDispatchBelowAutograd()
try:
return prim(*args, **kwargs)
finally:
del g
@staticmethod
def backward(ctx, *args):
raise RuntimeError("backwards not supported on prim")
@wraps(prim)
def _autograd_impl(*args, **kwargs):
flat_args, args_spec = tree_flatten((args, kwargs))
return BackwardsNotSupported.apply(args_spec, *flat_args)
return _autograd_impl
# TODO: when tracing this will add torch tensors and not TensorMeta objects
# to the trace -- we should fix this by adding a tracing context and NumberMeta classes
# TODO: this wrapper is currently untested
def elementwise_unary_scalar_wrapper(fn: Callable) -> Callable:
"""
Allows unary operators that accept tensors to work with Python numbers.
"""
sig = inspect.signature(fn)
@wraps(fn)
def _fn(*args, **kwargs):
if len(args) > 0 and isinstance(args[0], Number):
dtype = utils.type_to_dtype(type(args[0]))
args_ = list(args)
args_[0] = torch.tensor(args[0], dtype=dtype)
result = fn(*args_, **kwargs)
assert isinstance(result, torch.Tensor)
return result.item()
return fn(*args, **kwargs)
_fn.__signature__ = sig # type: ignore[attr-defined]
return _fn
| pytorch-master | torch/_prims_common/wrappers.py |
import io
import multiprocessing.queues
from multiprocessing.reduction import ForkingPickler
import pickle
class ConnectionWrapper(object):
"""Proxy class for _multiprocessing.Connection which uses ForkingPickler to
serialize objects"""
def __init__(self, conn):
self.conn = conn
def send(self, obj):
buf = io.BytesIO()
ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj)
self.send_bytes(buf.getvalue())
def recv(self):
buf = self.recv_bytes()
return pickle.loads(buf)
def __getattr__(self, name):
if 'conn' in self.__dict__:
return getattr(self.conn, name)
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, 'conn'))
class Queue(multiprocessing.queues.Queue):
def __init__(self, *args, **kwargs):
super(Queue, self).__init__(*args, **kwargs)
self._reader: ConnectionWrapper = ConnectionWrapper(self._reader)
self._writer: ConnectionWrapper = ConnectionWrapper(self._writer)
self._send = self._writer.send
self._recv = self._reader.recv
class SimpleQueue(multiprocessing.queues.SimpleQueue):
def _make_methods(self):
if not isinstance(self._reader, ConnectionWrapper):
self._reader: ConnectionWrapper = ConnectionWrapper(self._reader)
self._writer: ConnectionWrapper = ConnectionWrapper(self._writer)
super(SimpleQueue, self)._make_methods() # type: ignore[misc]
| pytorch-master | torch/multiprocessing/queue.py |
import sys
__all__ = ['register_after_fork']
if sys.platform == 'win32' or sys.version_info < (3, 7):
import multiprocessing.util as _util
def _register(func):
def wrapper(arg):
func()
_util.register_after_fork(_register, wrapper)
else:
import os
def _register(func):
os.register_at_fork(after_in_child=func)
def register_after_fork(func):
"""Register a callable to be executed in the child process after a fork.
Note:
In python < 3.7 this will only work with processes created using the
``multiprocessing`` module. In python >= 3.7 it also works with
``os.fork()``.
Args:
func (function): Function taking no arguments to be called in the child after fork
"""
_register(func)
| pytorch-master | torch/multiprocessing/_atfork.py |
"""
torch.multiprocessing is a wrapper around the native :mod:`multiprocessing`
module. It registers custom reducers, that use shared memory to provide shared
views on the same data in different processes. Once the tensor/storage is moved
to shared_memory (see :func:`~torch.Tensor.share_memory_`), it will be possible
to send it to other processes without making any copies.
The API is 100% compatible with the original module - it's enough to change
``import multiprocessing`` to ``import torch.multiprocessing`` to have all the
tensors sent through the queues or shared via other mechanisms, moved to shared
memory.
Because of the similarity of APIs we do not document most of this package
contents, and we recommend referring to very good docs of the original module.
"""
import torch
import sys
from .reductions import init_reductions
import multiprocessing
__all__ = ['set_sharing_strategy', 'get_sharing_strategy',
'get_all_sharing_strategies']
from multiprocessing import * # noqa: F403
__all__ += multiprocessing.__all__ # type: ignore[attr-defined]
# This call adds a Linux specific prctl(2) wrapper function to this module.
# See https://github.com/pytorch/pytorch/pull/14391 for more information.
torch._C._multiprocessing_init()
"""Add helper function to spawn N processes and wait for completion of any of
them. This depends `mp.get_context` which was added in Python 3.4."""
from .spawn import spawn, SpawnContext, start_processes, ProcessContext, \
ProcessRaisedException, ProcessExitedException
if sys.platform == 'darwin' or sys.platform == 'win32':
_sharing_strategy = 'file_system'
_all_sharing_strategies = {'file_system'}
else:
_sharing_strategy = 'file_descriptor'
_all_sharing_strategies = {'file_descriptor', 'file_system'}
def set_sharing_strategy(new_strategy):
"""Sets the strategy for sharing CPU tensors.
Args:
new_strategy (str): Name of the selected strategy. Should be one of
the values returned by :func:`get_all_sharing_strategies()`.
"""
global _sharing_strategy
assert new_strategy in _all_sharing_strategies
_sharing_strategy = new_strategy
def get_sharing_strategy():
"""Returns the current strategy for sharing CPU tensors."""
return _sharing_strategy
def get_all_sharing_strategies():
"""Returns a set of sharing strategies supported on a current system."""
return _all_sharing_strategies
init_reductions()
| pytorch-master | torch/multiprocessing/__init__.py |
from typing import Optional
import multiprocessing
import multiprocessing.connection
import signal
import sys
import warnings
from . import _prctl_pr_set_pdeathsig # type: ignore[attr-defined]
class ProcessException(Exception):
__slots__ = ["error_index", "error_pid"]
def __init__(self, msg: str, error_index: int, pid: int):
super().__init__(msg)
self.msg = msg
self.error_index = error_index
self.pid = pid
def __reduce__(self):
return type(self), (self.msg, self.error_index, self.pid)
class ProcessRaisedException(ProcessException):
"""
Exception is thrown when the process failed due to exception
raised by the code.
"""
def __init__(
self,
msg: str,
error_index: int,
error_pid: int,
):
super().__init__(msg, error_index, error_pid)
class ProcessExitedException(ProcessException):
"""
Exception is thrown when the process failed due to signal
or exited with a specific code.
"""
__slots__ = ["exit_code"]
def __init__(
self, msg: str, error_index: int, error_pid: int,
exit_code: int, signal_name: Optional[str] = None
):
super().__init__(msg, error_index, error_pid)
self.exit_code = exit_code
self.signal_name = signal_name
def __reduce__(self):
return (
type(self),
(self.msg, self.error_index, self.pid, self.exit_code, self.signal_name),
)
def _wrap(fn, i, args, error_queue):
# prctl(2) is a Linux specific system call.
# On other systems the following function call has no effect.
# This is set to ensure that non-daemonic child processes can
# terminate if their parent terminates before they do.
_prctl_pr_set_pdeathsig(signal.SIGINT)
try:
fn(i, *args)
except KeyboardInterrupt:
pass # SIGINT; Killed by parent, do nothing
except Exception:
# Propagate exception to parent process, keeping original traceback
import traceback
error_queue.put(traceback.format_exc())
sys.exit(1)
class ProcessContext:
def __init__(self, processes, error_queues):
self.error_queues = error_queues
self.processes = processes
self.sentinels = {
process.sentinel: index
for index, process in enumerate(processes)
}
def pids(self):
return [int(process.pid) for process in self.processes]
def join(self, timeout=None):
r"""
Tries to join one or more processes in this spawn context.
If one of them exited with a non-zero exit status, this function
kills the remaining processes and raises an exception with the cause
of the first process exiting.
Returns ``True`` if all processes have been joined successfully,
``False`` if there are more processes that need to be joined.
Args:
timeout (float): Wait this long before giving up on waiting.
"""
# Ensure this function can be called even when we're done.
if len(self.sentinels) == 0:
return True
# Wait for any process to fail or all of them to succeed.
ready = multiprocessing.connection.wait(
self.sentinels.keys(),
timeout=timeout,
)
error_index = None
for sentinel in ready:
index = self.sentinels.pop(sentinel)
process = self.processes[index]
process.join()
if process.exitcode != 0:
error_index = index
break
# Return if there was no error.
if error_index is None:
# Return whether or not all processes have been joined.
return len(self.sentinels) == 0
# Assume failure. Terminate processes that are still alive.
for process in self.processes:
if process.is_alive():
process.terminate()
process.join()
# There won't be an error on the queue if the process crashed.
failed_process = self.processes[error_index]
if self.error_queues[error_index].empty():
exitcode = self.processes[error_index].exitcode
if exitcode < 0:
name = signal.Signals(-exitcode).name
raise ProcessExitedException(
"process %d terminated with signal %s" %
(error_index, name),
error_index=error_index,
error_pid=failed_process.pid,
exit_code=exitcode,
signal_name=name
)
else:
raise ProcessExitedException(
"process %d terminated with exit code %d" %
(error_index, exitcode),
error_index=error_index,
error_pid=failed_process.pid,
exit_code=exitcode
)
original_trace = self.error_queues[error_index].get()
msg = "\n\n-- Process %d terminated with the following error:\n" % error_index
msg += original_trace
raise ProcessRaisedException(msg, error_index, failed_process.pid)
class SpawnContext(ProcessContext):
def __init__(self, processes, error_queues):
warnings.warn('SpawnContext is renamed to ProcessContext since 1.4 release.')
super(SpawnContext, self).__init__(processes, error_queues)
pass
# Note: [start_processes]
# mp.start_processes handles both start_method='spawn' and 'fork'. It's supposed to be a
# more generalized API than mp.spawn. Currently we only document mp.spawn as it's the
# CUDA compatible start_method. However, in environments like Ipython notebooks, 'fork'
# works better than 'spawn'. Every helper function we created for mp.spawn is indeed
# general enough, and backends like XLA can reuse them in Colab notebooks as well.
# Currently we only add this API first, we can consider adding it to documentation as
# needed in the future.
def start_processes(fn, args=(), nprocs=1, join=True, daemon=False, start_method='spawn'):
mp = multiprocessing.get_context(start_method)
error_queues = []
processes = []
for i in range(nprocs):
error_queue = mp.SimpleQueue()
process = mp.Process(
target=_wrap,
args=(fn, i, args, error_queue),
daemon=daemon,
)
process.start()
error_queues.append(error_queue)
processes.append(process)
context = ProcessContext(processes, error_queues)
if not join:
return context
# Loop on join until it returns True or raises an exception.
while not context.join():
pass
def spawn(fn, args=(), nprocs=1, join=True, daemon=False, start_method='spawn'):
r"""Spawns ``nprocs`` processes that run ``fn`` with ``args``.
If one of the processes exits with a non-zero exit status, the
remaining processes are killed and an exception is raised with the
cause of termination. In the case an exception was caught in the
child process, it is forwarded and its traceback is included in
the exception raised in the parent process.
Args:
fn (function): Function is called as the entrypoint of the
spawned process. This function must be defined at the top
level of a module so it can be pickled and spawned. This
is a requirement imposed by multiprocessing.
The function is called as ``fn(i, *args)``, where ``i`` is
the process index and ``args`` is the passed through tuple
of arguments.
args (tuple): Arguments passed to ``fn``.
nprocs (int): Number of processes to spawn.
join (bool): Perform a blocking join on all processes.
daemon (bool): The spawned processes' daemon flag. If set to True,
daemonic processes will be created.
start_method (str): (deprecated) this method will always use ``spawn``
as the start method. To use a different start method
use ``start_processes()``.
Returns:
None if ``join`` is ``True``,
:class:`~ProcessContext` if ``join`` is ``False``
"""
if start_method != 'spawn':
msg = ('This method only supports start_method=spawn (got: %s).\n'
'To use a different start_method use:\n\t\t'
' torch.multiprocessing.start_processes(...)' % start_method)
warnings.warn(msg)
return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')
| pytorch-master | torch/multiprocessing/spawn.py |
import torch
import torch.utils.hooks
from torch._namedtensor_internals import check_serializing_named_tensor
import os
import threading
import multiprocessing
from multiprocessing.util import register_after_fork
from multiprocessing.reduction import ForkingPickler
from typing import Union
try:
# Early load resource_sharer to prevent a partially initialized instance
# from being inherited in a forked child process. The reduce_storage method
# requires this module indirectly through DupFd(). The built-in mp.Queue
# class pickles arguments in a background thread which may overlap with the
# fork.
import multiprocessing.resource_sharer
except ImportError:
pass
class StorageWeakRef(object):
r"""A weak reference to a Storage.
The cdata member is a Python number containing the integer representation of
the Storage pointer."""
def __init__(self, storage):
self.cdata = storage._weak_ref()
# Save a direct reference to _free_weak_ref because the `torch` module
# might be cleared during Python shutdown before this module is cleared.
self._free_weak_ref = torch.Storage._free_weak_ref # type: ignore[attr-defined]
def expired(self):
return torch.Storage._expired(self.cdata) # type: ignore[attr-defined]
def __del__(self):
self._free_weak_ref(self.cdata)
def __hash__(self):
return self.cdata
def __eq__(self, other):
if id(self) == id(other):
return True
return self.cdata == other.cdata
class SharedCache(dict):
"""dictionary from multiprocessing handles to StorageWeakRef"""
def __init__(self):
# free_dead_references() is called if the len exceeds the current
# limit. The limit scales with the number of remaining live objects.
self.limit = 128
# `fork` inherits lock state, so in case we fork when the lock is held,
# we register a function to reset the lock to a new object to avoid
# possible deadlocks, following python multiprocessing library design.
self._after_fork()
register_after_fork(self, SharedCache._after_fork)
def _after_fork(self):
self.lock = threading.Lock()
def get(self, key):
with self.lock:
return dict.get(self, key)
def __setitem__(self, key, storage_ref):
with self.lock:
dict.__setitem__(self, key, storage_ref)
if len(self) > self.limit:
self.free_dead_references()
def free_dead_references(self):
live = 0
for key, storage_ref in list(self.items()):
if storage_ref.expired():
del self[key]
else:
live += 1
self.limit = max(128, live * 2)
# mapping from handles to StorageWeakRef objects
shared_cache = SharedCache()
def rebuild_event(device, handle):
return torch.cuda.Event.from_ipc_handle(device, handle)
def reduce_event(event):
handle = event.ipc_handle()
return (rebuild_event, (event.device, handle))
def rebuild_tensor(cls, storage, metadata):
storage_offset, size, stride, requires_grad = metadata
t = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
if cls == torch.nn.parameter.Parameter:
# we have to pass requires_grad into constructor, rather than set it as an
# attribute later, because it's an important check for Integer Tensors to
# have requires_grad=False (or else they raise an error)
t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
else:
t.requires_grad = requires_grad
return t
def rebuild_cuda_tensor(tensor_cls, tensor_size, tensor_stride, tensor_offset,
storage_cls, dtype, storage_device, storage_handle, storage_size_bytes, storage_offset_bytes,
requires_grad, ref_counter_handle, ref_counter_offset, event_handle, event_sync_required):
# If storage_handle is None, storage points to nullptr.
if storage_handle is None or storage_size_bytes == 0:
storage = storage_cls(0, dtype=dtype, device=storage_device)
else:
storage = storage_from_cache(storage_cls, (storage_handle, storage_offset_bytes))
if storage is None:
torch.cuda._lazy_init()
storage = storage_cls._new_shared_cuda(
storage_device,
storage_handle,
storage_size_bytes,
storage_offset_bytes,
ref_counter_handle,
ref_counter_offset,
event_handle,
event_sync_required)
shared_cache[(storage_handle, storage_offset_bytes)] = StorageWeakRef(storage)
else:
# We already ref counting this Storage, but producer needs new ref-counters to be released.
storage_cls._release_ipc_counter(ref_counter_handle, ref_counter_offset, device=storage_device)
t = torch._utils._rebuild_tensor(
torch.storage.TypedStorage(wrap_storage=storage.untyped(), dtype=dtype),
tensor_offset, tensor_size, tensor_stride)
if tensor_cls == torch.nn.parameter.Parameter:
# It is crucial for integer tensors to receive
# the requires_grad=False as an argument in the constructor
t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
else:
t.requires_grad = requires_grad
return t
def reduce_tensor(tensor):
storage = tensor.storage()
if tensor.requires_grad and not tensor.is_leaf:
raise RuntimeError("Cowardly refusing to serialize non-leaf tensor which requires_grad, "
"since autograd does not support crossing process boundaries. "
"If you just want to transfer the data, call detach() on the tensor "
"before serializing (e.g., putting it on the queue).")
check_serializing_named_tensor(tensor)
torch.utils.hooks.warn_if_has_hooks(tensor)
# Note [CUDA IPC and the caching allocator]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# When you send a CUDA tensor over IPC, you might expect that you will
# get out the same storage from the other end. However, the CUDA caching
# allocator makes it difficult to preserve this invariant. Consider
# the following situation: a tensor of size 0x100 points to offset 0x20 of
# a storage at 0xA100 of size 0x100. (For simplicity, all of these
# sizes are given in bytes). HOWEVER, with the caching allocator, this storage
# might be part of a larger cudaMalloc allocation 0xA000 of size 0x4000.
#
# When we want to send this CUDA tensor over IPC, we must send the
# *entire* cudaMalloc allocation, i.e., the 0xA000 region, not just
# the storage 0xA100 (because that is what CUDA supports). So, on the
# other end, there simply isn't any way to say, "Wait, you gave me
# a bigger region (0xA000) than the one I wanted (0xA100)".
#
# OK, so if you sent the cudaMalloc allocation, can you just wrap that up as
# one storage itself? No, because this cudaMalloc allocation might contain
# storages of mixed types: float, bytes, double... If you make the entire
# allocation a single storage of a type A, we'll hit an error when constructing
# a tensor of type B on the storage.
#
# cudaIpcMemHandle is an identifier to access the sender cudaMalloc allocation on the
# receiver side. However, cudaIpcMemHandles from each device in a given process may
# only be opened by one context per device per other process.
# If we open and close a memory handle multiples times in a process, CUDA is allowed
# to give it a different address; similarly, once we close the memory, we're not
# allowed to access it(and the storage/tensor built on top of it), even if it is
# still live in the original process. As we cannot make a cudaMalloc allocation
# to a single storage in one go, this requires us to cache the device pointer for
# each cudaIpcMemHandle on C++ side to reconstruct types of storages, while keep
# the old ones alives.
# See [https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html]
#
# This is fine, because all we need to do is to save our position in the allocation,
# and reconstruct storage and tensor from it.
# 0xA000 -> -------CUDA Allocation------
# | |
# | |
# | |
# | |
# 0xA100 -> --------storage1 begin------
# | |
# 0xA120 -> --------tensor1 begin ------
# | |
# | |
# | |
# | |
# | |
# 0xA160 -> --------tensor1 end---------
# | |
# | |
# | |
# 0xA200 -> --------storage1 end--------
# | |
# 0xE000 -> --------CUDA allocation-----
#
# To send tensor1, the following info are required from sender to receiver for
# storage recontruction.
# 1. cudaIpcMemHandle of 0xA000(which can be mapped to a basePtr in receiver process).
# basePtr may not be exactly 0xA000 since it's a different process.
# 2. offset(0xA100) of storage1 in the CUDA allocation.
# 3. size of storage1(0x100).
#
# On receiver side:
# 1. Get the devPtr of the MemHandle to access the memory, reconstruct a storage
# of the same type using (basePtr, offset, size).
# 2. we can reconstruct the tensor on top of the reconstructed storage
# Tensor(size=0x040, offset=0x020, storage=Storage(data=basePtr+0xA100, size=0x0100))
#
# This strategy has a few implications:
#
# 1. When we serialize a CUDA tensor for IPC, we cannot do it all in one
# go (non-compositionally), and this requires to have a global map
# memHandle -> devPtr for each process.
#
# 2. We MUST NOT let the new IPC tensor be resizable. Originally, a resize
# of the storage beyond 0x100 would merely have caused us to do a
# reallocation. You don't really want to do this, but if you did,
# all that would happen is that you would lose IPC sharing. But if
# you do this in the new world, we will happily let you write out of
# bounds of your "allocation", clobbering unrelated data in the cached
# allocator block. BAD!
#
# By the way, in old versions of PyTorch, we supported this situation
# natively using a "storage view", which permitted multiple storages to be
# views on each other. But this was the *only* use of storage views, so we
# eliminated it so that we could just use tensor views to implement the same
# thing.
#
if storage.is_cuda:
(device,
handle,
storage_size_bytes,
storage_offset_bytes,
ref_counter_handle,
ref_counter_offset,
event_handle,
event_sync_required) = storage._share_cuda_()
tensor_offset = tensor.storage_offset()
shared_cache[handle] = StorageWeakRef(storage)
# _backward_hooks purposely omitted here, see
# Note [Don't serialize hooks]
return (rebuild_cuda_tensor,
(type(tensor),
tensor.size(),
tensor.stride(),
tensor_offset, # tensor offset in its storage
type(storage),
tensor.dtype,
device,
handle, # identifier which CUDA allocation is the storage in.
storage_size_bytes, # size(in bytes) of the storage
storage_offset_bytes, # offset(in bytes) of the storage in the CUDA allocation
tensor.requires_grad,
ref_counter_handle,
ref_counter_offset,
event_handle,
event_sync_required))
# _backward_hooks purposely omitted here, see Note [Don't serialize hooks]
metadata = (tensor.storage_offset(), tensor.size(), tensor.stride(), tensor.requires_grad)
return (rebuild_tensor, (
type(tensor),
storage,
metadata))
def fd_id(fd):
# Returns a tuple which uniquely identifies a file descriptor. In Mac OS,
# this doesn't work with shared memory handles, which is why we don't
# support the "file_descriptor" sharing method on that platform.
stat = os.fstat(fd)
return (stat.st_ino, stat.st_dev)
def storage_from_cache(cls, key):
storage_ref = shared_cache.get(key)
if storage_ref is None:
return None
return torch.UntypedStorage._new_with_weak_ptr(storage_ref.cdata)
def rebuild_storage_fd(cls, df, size):
fd = df.detach()
try:
storage = storage_from_cache(cls, fd_id(fd))
if storage is not None:
return storage
storage = cls._new_shared_fd_cpu(fd, size)
shared_cache[fd_id(fd)] = StorageWeakRef(storage)
return storage
finally:
os.close(fd)
def rebuild_storage_filename(cls, manager, handle, size, dtype=None):
storage: Union[torch.TypedStorage, torch.UntypedStorage] = storage_from_cache(cls, handle)
if storage is not None:
return storage._shared_decref()
if dtype is None:
storage = torch.UntypedStorage._new_shared_filename_cpu(manager, handle, size)
else:
byte_size = size * torch._utils._element_size(dtype)
untyped_storage: torch.UntypedStorage = torch.UntypedStorage._new_shared_filename_cpu(manager, handle, byte_size)
storage = torch.TypedStorage(
wrap_storage=untyped_storage,
dtype=dtype)
shared_cache[handle] = StorageWeakRef(storage)
return storage._shared_decref()
def rebuild_storage_empty(cls):
return cls()
def rebuild_typed_storage(storage, dtype):
return torch.storage.TypedStorage(wrap_storage=storage, dtype=dtype)
# Use for torch.storage.TypedStorage
def reduce_typed_storage(storage):
return (rebuild_typed_storage, (storage._storage, storage.dtype))
def rebuild_typed_storage_child(storage, storage_type):
return storage_type(wrap_storage=storage)
# Use for child classes of torch.storage.TypedStorage, like torch.FloatStorage
def reduce_typed_storage_child(storage):
return (rebuild_typed_storage_child, (storage._storage, type(storage)))
def reduce_storage(storage):
from . import get_sharing_strategy
if storage.is_cuda:
raise RuntimeError("Cannot pickle CUDA storage; try pickling a CUDA tensor instead")
elif get_sharing_strategy() == 'file_system':
metadata = storage._share_filename_cpu_()
cache_key = metadata[1]
rebuild = rebuild_storage_filename
if isinstance(storage, torch.TypedStorage):
metadata += (storage.dtype,)
storage._shared_incref()
elif storage.size() == 0:
# This is special cased because Empty tensors
# (with size 0) cannot be mmapped.
return (rebuild_storage_empty, (type(storage),))
else:
fd, size = storage._share_fd_cpu_()
df = multiprocessing.reduction.DupFd(fd)
cache_key = fd_id(fd)
metadata = (df, size)
rebuild = rebuild_storage_fd # type: ignore[assignment]
shared_cache[cache_key] = StorageWeakRef(storage)
return (rebuild, (type(storage),) + metadata)
def init_reductions():
ForkingPickler.register(torch.cuda.Event, reduce_event)
for t in torch._storage_classes:
if t.__name__ == 'UntypedStorage':
ForkingPickler.register(t, reduce_storage)
else:
ForkingPickler.register(t, reduce_typed_storage_child)
ForkingPickler.register(torch.storage.TypedStorage, reduce_typed_storage)
for t in torch._tensor_classes:
ForkingPickler.register(t, reduce_tensor)
# TODO: Maybe this should be in tensor_classes? :)
ForkingPickler.register(torch.Tensor, reduce_tensor)
ForkingPickler.register(torch.nn.parameter.Parameter, reduce_tensor)
| pytorch-master | torch/multiprocessing/reductions.py |
import multiprocessing.pool
import multiprocessing.util as util
from .queue import SimpleQueue
def clean_worker(*args, **kwargs):
import gc
multiprocessing.pool.worker(*args, **kwargs)
# Regular multiprocessing workers don't fully clean up after themselves,
# so we have to explicitly trigger garbage collection to make sure that all
# destructors are called...
gc.collect()
class Pool(multiprocessing.pool.Pool):
"""Pool implementation which uses our version of SimpleQueue.
This lets us pass tensors in shared memory across processes instead of
serializing the underlying data."""
def _setup_queues(self):
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
# changed worker -> clean_worker
args = (self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild)
if hasattr(self, '_wrap_exception'):
args += (self._wrap_exception,)
w = self.Process(target=clean_worker, args=args)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
util.debug('added worker')
| pytorch-master | torch/multiprocessing/pool.py |
import torch
from torch.fx import GraphModule
from torch.nn import Module
from torch.fx.passes.backends.cudagraphs import partition_cudagraphs
from torch.multiprocessing.reductions import StorageWeakRef
from torch.utils._pytree import tree_map
import torchdynamo # type: ignore[import]
from torchdynamo.optimizations.training import AOTAutogradStrategy # type: ignore[import]
import operator
from collections import defaultdict
from typing import Set
# TODO: maybe this should live in torchdynamo instead
__all__ = ['aot_autograd_cudagraphs']
def cloner(t):
if isinstance(t, torch.Tensor):
return t.clone()
else:
return t
class CudaGraphModule(Module):
gm: GraphModule
mutated_inputs: Set[int]
def __init__(self, gm, mutated_inputs):
super().__init__()
self.gm = gm
self.mutated_inputs = mutated_inputs
warmed_up = False
# these are all None or all filled
graph = None
static_inputs = None
static_outputs = None
# NB: we override __call__ as we don't need any nn.Module machinery
# and to reduce overhead
def __call__(self, *args):
# TODO: once we've recorded here, we'd like to replace the __call__
# implementation with compiled bytecode that copies into static, replays
# the cuda graph, then copies out. First condition is the hotpath,
# needs optimizing
if self.graph is not None:
assert len(args) == len(self.static_inputs)
for dst, src in zip(self.static_inputs, args):
dst.copy_(src)
self.graph.replay()
for i in self.mutated_inputs:
args[i].copy_(self.static_inputs[i])
return tree_map(cloner, self.static_outputs)
elif self.warmed_up:
# record
self.static_inputs = [x.clone() for x in args]
self.graph = torch.cuda.CUDAGraph()
with torch.cuda.graph(self.graph):
self.static_outputs = self.gm(*self.static_inputs)
# NB: recording doesn't actually run the operations, so
# now we immediately replay the graph to serve up the result
self.graph.replay()
for i in self.mutated_inputs:
args[i].copy_(self.static_inputs[i])
return tree_map(cloner, self.static_outputs)
else:
# warmup
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
r = self.gm(*args)
torch.cuda.current_stream().wait_stream(stream)
self.warmed_up = True
return r
# Interpreter versions of these passes can be found at
# https://gist.github.com/ezyang/df2d746cac3b2c7d55c181e37c57ef23
def find_input_mutations(g):
FK = 'fake_result'
inputs = defaultdict(set)
input_idx = 0
mutated_inputs = set()
for n in g.nodes:
if n.op == 'placeholder':
inputs[StorageWeakRef(n.meta[FK].storage())].add(input_idx)
input_idx += 1
elif n.op == 'call_function':
if n.target is operator.getitem:
continue
schema = n.target._schema
for i, arg in enumerate(schema.arguments):
if i < len(n.args):
argument = n.args[i]
else:
if arg.name not in n.kwargs:
continue
argument = n.kwargs[arg.name]
mut_arg = False
if arg.alias_info:
if arg.alias_info.is_write:
mut_arg = True
if mut_arg:
# TODO: not correct for args that contain tensors in a struct
# like list
mutated_inputs |= inputs[StorageWeakRef(argument.meta[FK].storage())]
# TODO: error on unrecognized nodes
return mutated_inputs
# Mutates input graph
def apply_cuda_graphs(gm):
for n in gm.graph.nodes:
if n.op == 'call_module':
assert not n.kwargs
submod = gm.get_submodule(n.target)
gm.delete_submodule(n.target)
mutated_inputs = find_input_mutations(submod.graph)
gm.add_submodule(n.target, CudaGraphModule(submod, mutated_inputs))
# NB: we didn't actually change the graph, no need for recompile
def cudagraphs(model, inputs):
model = partition_cudagraphs(model, inputs)
apply_cuda_graphs(model)
return model
def raw_aot_autograd_cudagraphs(model, inputs):
kwargs = {
# these are taken from memory_efficient_fusion()
"fw_compiler": cudagraphs,
"bw_compiler": cudagraphs,
"hasher_type": "StaticShapeHasher",
}
def _wrapped_bw_compiler(*args, **kwargs):
# stop TorchDynamo from trying to compile our generated backwards pass
return torchdynamo.disable(bw_compiler(*args, **kwargs)) # type: ignore[operator]
bw_compiler = kwargs.get("bw_compiler") or kwargs["fw_compiler"]
kwargs["bw_compiler"] = _wrapped_bw_compiler
from functorch.compile import aot_module_simplified # type: ignore[import]
return aot_module_simplified(model, **kwargs)
class AOTAutogradCudaGraphs(AOTAutogradStrategy):
def candidate(self):
return raw_aot_autograd_cudagraphs(self.gm, self.example_inputs)
aot_autograd_cudagraphs = AOTAutogradCudaGraphs.compile_fn
| pytorch-master | torch/cuda/_dynamo_graphs.py |
import ctypes
import torch
from ._utils import _dummy_type
if not hasattr(torch._C, '_CudaStreamBase'):
# Define dummy base classes
torch._C.__dict__['_CudaStreamBase'] = _dummy_type('_CudaStreamBase')
torch._C.__dict__['_CudaEventBase'] = _dummy_type('_CudaEventBase')
class Stream(torch._C._CudaStreamBase):
r"""Wrapper around a CUDA stream.
A CUDA stream is a linear sequence of execution that belongs to a specific
device, independent from other streams. See :ref:`cuda-semantics` for
details.
Args:
device(torch.device or int, optional): a device on which to allocate
the stream. If :attr:`device` is ``None`` (default) or a negative
integer, this will use the current device.
priority(int, optional): priority of the stream. Can be either
-1 (high priority) or 0 (low priority). By default, streams have
priority 0.
.. note:: Although CUDA versions >= 11 support more than two levels of
priorities, in PyTorch, we only support two levels of priorities.
"""
def __new__(cls, device=None, priority=0, **kwargs):
# setting device manager is expensive, so we avoid it unless necessary
if device is None or "_cdata" in kwargs:
return super(Stream, cls).__new__(cls, priority=priority, **kwargs)
else:
with torch.cuda.device(device):
return super(Stream, cls).__new__(cls, priority=priority, **kwargs)
def wait_event(self, event):
r"""Makes all future work submitted to the stream wait for an event.
Args:
event (torch.cuda.Event): an event to wait for.
.. note:: This is a wrapper around ``cudaStreamWaitEvent()``: see
`CUDA Stream documentation`_ for more info.
This function returns without waiting for :attr:`event`: only future
operations are affected.
.. _CUDA Stream documentation:
https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html
"""
event.wait(self)
def wait_stream(self, stream):
r"""Synchronizes with another stream.
All future work submitted to this stream will wait until all kernels
submitted to a given stream at the time of call complete.
Args:
stream (Stream): a stream to synchronize.
.. note:: This function returns without waiting for currently enqueued
kernels in :attr:`stream`: only future operations are affected.
"""
self.wait_event(stream.record_event())
def record_event(self, event=None):
r"""Records an event.
Args:
event (torch.cuda.Event, optional): event to record. If not given, a new one
will be allocated.
Returns:
Recorded event.
"""
if event is None:
event = Event()
event.record(self)
return event
def query(self):
r"""Checks if all the work submitted has been completed.
Returns:
A boolean indicating if all kernels in this stream are completed."""
return super(Stream, self).query()
def synchronize(self):
r"""Wait for all the kernels in this stream to complete.
.. note:: This is a wrapper around ``cudaStreamSynchronize()``: see
`CUDA Stream documentation`_ for more info.
"""
super(Stream, self).synchronize()
@property
def _as_parameter_(self):
return ctypes.c_void_p(self.cuda_stream)
def __eq__(self, o):
if isinstance(o, Stream):
return super(Stream, self).__eq__(o)
return False
def __hash__(self):
return hash((self.cuda_stream, self.device))
def __repr__(self):
return ('<torch.cuda.Stream device={0} cuda_stream={1:#x}>'
.format(self.device, self.cuda_stream))
class ExternalStream(Stream):
r"""Wrapper around an externally allocated CUDA stream.
This class is used to wrap streams allocated in other libraries in order
to facilitate data exchange and multi-library interactions.
.. note:: This class doesn't manage the stream life-cycle, it is the user
responsibility to keep the referenced stream alive while this class is
being used.
Args:
stream_ptr(int): Integer representation of the `cudaStream_t` value.
allocated externally.
device(torch.device or int, optional): the device where the stream
was originally allocated. if device is specified incorrectly,
subsequent launches using this stream may fail.
"""
def __new__(cls, stream_ptr, device=None, **kwargs):
with torch.cuda.device(device):
return super(ExternalStream, cls).__new__(cls, stream_ptr=stream_ptr, **kwargs)
class Event(torch._C._CudaEventBase):
r"""Wrapper around a CUDA event.
CUDA events are synchronization markers that can be used to monitor the
device's progress, to accurately measure timing, and to synchronize CUDA
streams.
The underlying CUDA events are lazily initialized when the event is first
recorded or exported to another process. After creation, only streams on the
same device may record the event. However, streams on any device can wait on
the event.
Args:
enable_timing (bool, optional): indicates if the event should measure time
(default: ``False``)
blocking (bool, optional): if ``True``, :meth:`wait` will be blocking (default: ``False``)
interprocess (bool): if ``True``, the event can be shared between processes
(default: ``False``)
.. _CUDA Event Documentation:
https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__EVENT.html
"""
def __new__(cls, enable_timing=False, blocking=False, interprocess=False):
return super(Event, cls).__new__(
cls,
enable_timing=enable_timing, blocking=blocking, interprocess=interprocess)
@classmethod
def from_ipc_handle(cls, device, handle):
r"""Reconstruct an event from an IPC handle on the given device."""
return super(Event, cls).from_ipc_handle(device, handle)
def record(self, stream=None):
r"""Records the event in a given stream.
Uses ``torch.cuda.current_stream()`` if no stream is specified. The
stream's device must match the event's device."""
if stream is None:
stream = torch.cuda.current_stream()
super(Event, self).record(stream)
def wait(self, stream=None):
r"""Makes all future work submitted to the given stream wait for this
event.
Use ``torch.cuda.current_stream()`` if no stream is specified.
.. note:: This is a wrapper around ``cudaStreamWaitEvent()``: see
`CUDA Event documentation`_ for more info.
"""
if stream is None:
stream = torch.cuda.current_stream()
super(Event, self).wait(stream)
def query(self):
r"""Checks if all work currently captured by event has completed.
Returns:
A boolean indicating if all work currently captured by event has
completed.
"""
return super(Event, self).query()
def elapsed_time(self, end_event):
r"""Returns the time elapsed in milliseconds after the event was
recorded and before the end_event was recorded.
"""
return super(Event, self).elapsed_time(end_event)
def synchronize(self):
r"""Waits for the event to complete.
Waits until the completion of all work currently captured in this event.
This prevents the CPU thread from proceeding until the event completes.
.. note:: This is a wrapper around ``cudaEventSynchronize()``: see
`CUDA Event documentation`_ for more info.
"""
super(Event, self).synchronize()
def ipc_handle(self):
r"""Returns an IPC handle of this event. If not recorded yet, the event
will use the current device. """
return super(Event, self).ipc_handle()
@property
def _as_parameter_(self):
return ctypes.c_void_p(self.cuda_event)
def __repr__(self):
if self.cuda_event:
return '<torch.cuda.Event {0:#x}>'.format(self._as_parameter_.value)
else:
return '<torch.cuda.Event uninitialized>'
| pytorch-master | torch/cuda/streams.py |
pytorch-master | torch/cuda/error.py |
|
# The functions here have been moved to torch.nn.parallel.comm
from torch.nn.parallel.comm import broadcast, broadcast_coalesced, reduce_add, \
reduce_add_coalesced, scatter, gather
__all__ = ['broadcast', 'broadcast_coalesced', 'reduce_add', 'reduce_add_coalesced', 'scatter', 'gather']
| pytorch-master | torch/cuda/comm.py |
import collections
import contextlib
import warnings
from typing import Any, Dict, Union, Tuple
import torch
from . import is_initialized, _get_device_index, _lazy_init
from ._memory_viz import segments as _segments, memory as _memory
from torch.types import Device
from torch import _C
__all__ = ["caching_allocator_alloc", "caching_allocator_delete", "set_per_process_memory_fraction",
"empty_cache", "memory_stats", "memory_stats_as_nested_dict", "reset_accumulated_memory_stats",
"reset_peak_memory_stats", "reset_max_memory_allocated", "reset_max_memory_cached",
"memory_allocated", "max_memory_allocated", "memory_reserved", "max_memory_reserved",
"memory_cached", "max_memory_cached", "memory_snapshot", "memory_summary", "list_gpu_processes",
"mem_get_info"]
def _host_allocator():
_lazy_init()
return torch._C._cuda_cudaHostAllocator()
@contextlib.contextmanager
def _free_mutex():
torch._C._cuda_lock_mutex()
try:
yield
finally:
torch._C._cuda_unlock_mutex()
def caching_allocator_alloc(size, device: Union[Device, int] = None, stream=None):
r"""Performs a memory allocation using the CUDA memory allocator.
Memory is allocated for a given device and a stream, this
function is intended to be used for interoperability with other
frameworks. Allocated memory is released through
:func:`~torch.cuda.caching_allocator_delete`.
Args:
size (int): number of bytes to be allocated.
device (torch.device or int, optional): selected device. If it is
``None`` the default CUDA device is used.
stream (torch.cuda.Stream or int, optional): selected stream. If is ``None`` then
the default stream for the selected device is used.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
if device is None:
device = torch.cuda.current_device()
device = _get_device_index(device)
if stream is None:
stream = torch.cuda.current_stream(device)
if isinstance(stream, torch.cuda.streams.Stream):
stream = stream.cuda_stream
if not isinstance(stream, int):
raise TypeError('Invalid type for stream argument, must be '
'`torch.cuda.Stream` or `int` representing a pointer '
'to a exisiting stream')
with torch.cuda.device(device):
return torch._C._cuda_cudaCachingAllocator_raw_alloc(size, stream)
def caching_allocator_delete(mem_ptr):
r"""Deletes memory allocated using the CUDA memory allocator.
Memory allocated with :func:`~torch.cuda.caching_allocator_alloc`.
is freed here. The associated device and stream are tracked inside
the allocator.
Args:
mem_ptr (int): memory address to be freed by the allocator.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
torch._C._cuda_cudaCachingAllocator_raw_delete(mem_ptr)
def set_per_process_memory_fraction(fraction, device: Union[Device, int] = None) -> None:
r"""Set memory fraction for a process.
The fraction is used to limit an caching allocator to allocated memory on a CUDA device.
The allowed value equals the total visible memory multiplied fraction.
If trying to allocate more than the allowed value in a process, will raise an out of
memory error in allocator.
Args:
fraction(float): Range: 0~1. Allowed memory equals total_memory * fraction.
device (torch.device or int, optional): selected device. If it is
``None`` the default CUDA device is used.
.. note::
In general, the total available free memory is less than the total capacity.
"""
_lazy_init()
if device is None:
device = torch.cuda.current_device()
device = _get_device_index(device)
if not isinstance(fraction, float):
raise TypeError('Invalid type for fraction argument, must be `float`')
if fraction < 0 or fraction > 1:
raise ValueError('Invalid fraction value: {}. '
'Allowed range: 0~1'.format(fraction))
torch._C._cuda_setMemoryFraction(fraction, device)
def empty_cache() -> None:
r"""Releases all unoccupied cached memory currently held by the caching
allocator so that those can be used in other GPU application and visible in
`nvidia-smi`.
.. note::
:func:`~torch.cuda.empty_cache` doesn't increase the amount of GPU
memory available for PyTorch. However, it may help reduce fragmentation
of GPU memory in certain cases. See :ref:`cuda-memory-management` for
more details about GPU memory management.
"""
if is_initialized():
torch._C._cuda_emptyCache()
def memory_stats(device: Union[Device, int] = None) -> Dict[str, Any]:
r"""Returns a dictionary of CUDA memory allocator statistics for a
given device.
The return value of this function is a dictionary of statistics, each of
which is a non-negative integer.
Core statistics:
- ``"allocated.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of allocation requests received by the memory allocator.
- ``"allocated_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of allocated memory.
- ``"segment.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of reserved segments from ``cudaMalloc()``.
- ``"reserved_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of reserved memory.
- ``"active.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of active memory blocks.
- ``"active_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of active memory.
- ``"inactive_split.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of inactive, non-releasable memory blocks.
- ``"inactive_split_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of inactive, non-releasable memory.
For these core statistics, values are broken down as follows.
Pool type:
- ``all``: combined statistics across all memory pools.
- ``large_pool``: statistics for the large allocation pool
(as of October 2019, for size >= 1MB allocations).
- ``small_pool``: statistics for the small allocation pool
(as of October 2019, for size < 1MB allocations).
Metric type:
- ``current``: current value of this metric.
- ``peak``: maximum value of this metric.
- ``allocated``: historical total increase in this metric.
- ``freed``: historical total decrease in this metric.
In addition to the core statistics, we also provide some simple event
counters:
- ``"num_alloc_retries"``: number of failed ``cudaMalloc`` calls that
result in a cache flush and retry.
- ``"num_ooms"``: number of out-of-memory errors thrown.
The caching allocator can be configured via ENV to not split blocks larger than a
defined size (see Memory Management section of the Cuda Semantics documentation).
This helps avoid memory framentation but may have a performance
penalty. Additional outputs to assist with tuning and evaluating impact:
- ``"max_split_size"``: blocks above this size will not be split.
- ``"oversize_allocations.{current,peak,allocated,freed}"``:
number of over-size allocation requests received by the memory allocator.
- ``"oversize_segments.{current,peak,allocated,freed}"``:
number of over-size reserved segments from ``cudaMalloc()``.
Args:
device (torch.device or int, optional): selected device. Returns
statistics for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
result = []
def _recurse_add_to_result(prefix, obj):
if isinstance(obj, dict):
if len(prefix) > 0:
prefix += "."
for k, v in obj.items():
_recurse_add_to_result(prefix + k, v)
else:
result.append((prefix, obj))
stats = memory_stats_as_nested_dict(device=device)
_recurse_add_to_result("", stats)
result.sort()
return collections.OrderedDict(result)
def memory_stats_as_nested_dict(device: Union[Device, int] = None) -> Dict[str, Any]:
r"""Returns the result of :func:`~torch.cuda.memory_stats` as a nested dictionary."""
if not is_initialized():
return {}
device = _get_device_index(device, optional=True)
return torch._C._cuda_memoryStats(device)
def reset_accumulated_memory_stats(device: Union[Device, int] = None) -> None:
r"""Resets the "accumulated" (historical) stats tracked by the CUDA memory allocator.
See :func:`~torch.cuda.memory_stats` for details. Accumulated stats correspond to
the `"allocated"` and `"freed"` keys in each individual stat dict, as well as
`"num_alloc_retries"` and `"num_ooms"`.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
device = _get_device_index(device, optional=True)
return torch._C._cuda_resetAccumulatedMemoryStats(device)
def reset_peak_memory_stats(device: Union[Device, int] = None) -> None:
r"""Resets the "peak" stats tracked by the CUDA memory allocator.
See :func:`~torch.cuda.memory_stats` for details. Peak stats correspond to the
`"peak"` key in each individual stat dict.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
device = _get_device_index(device, optional=True)
return torch._C._cuda_resetPeakMemoryStats(device)
def reset_max_memory_allocated(device: Union[Device, int] = None) -> None:
r"""Resets the starting point in tracking maximum GPU memory occupied by
tensors for a given device.
See :func:`~torch.cuda.max_memory_allocated` for details.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. warning::
This function now calls :func:`~torch.cuda.reset_peak_memory_stats`, which resets
/all/ peak memory stats.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
warnings.warn(
"torch.cuda.reset_max_memory_allocated now calls torch.cuda.reset_peak_memory_stats, "
"which resets /all/ peak memory stats.",
FutureWarning)
return reset_peak_memory_stats(device=device)
def reset_max_memory_cached(device: Union[Device, int] = None) -> None:
r"""Resets the starting point in tracking maximum GPU memory managed by the
caching allocator for a given device.
See :func:`~torch.cuda.max_memory_cached` for details.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. warning::
This function now calls :func:`~torch.cuda.reset_peak_memory_stats`, which resets
/all/ peak memory stats.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
warnings.warn(
"torch.cuda.reset_max_memory_cached now calls torch.cuda.reset_peak_memory_stats, "
"which resets /all/ peak memory stats.",
FutureWarning)
return reset_peak_memory_stats(device=device)
def memory_allocated(device: Union[Device, int] = None) -> int:
r"""Returns the current GPU memory occupied by tensors in bytes for a given
device.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
This is likely less than the amount shown in `nvidia-smi` since some
unused memory can be held by the caching allocator and some context
needs to be created on GPU. See :ref:`cuda-memory-management` for more
details about GPU memory management.
"""
return memory_stats(device=device).get("allocated_bytes.all.current", 0)
def max_memory_allocated(device: Union[Device, int] = None) -> int:
r"""Returns the maximum GPU memory occupied by tensors in bytes for a given
device.
By default, this returns the peak allocated memory since the beginning of
this program. :func:`~torch.cuda.reset_peak_memory_stats` can be used to
reset the starting point in tracking this metric. For example, these two
functions can measure the peak allocated memory usage of each iteration in a
training loop.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
return memory_stats(device=device).get("allocated_bytes.all.peak", 0)
def memory_reserved(device: Union[Device, int] = None) -> int:
r"""Returns the current GPU memory managed by the caching allocator in bytes
for a given device.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
return memory_stats(device=device).get("reserved_bytes.all.current", 0)
def max_memory_reserved(device: Union[Device, int] = None) -> int:
r"""Returns the maximum GPU memory managed by the caching allocator in bytes
for a given device.
By default, this returns the peak cached memory since the beginning of this
program. :func:`~torch.cuda.reset_peak_memory_stats` can be used to reset
the starting point in tracking this metric. For example, these two functions
can measure the peak cached memory amount of each iteration in a training
loop.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
return memory_stats(device=device).get("reserved_bytes.all.peak", 0)
def memory_cached(device: Union[Device, int] = None) -> int:
r"""Deprecated; see :func:`~torch.cuda.memory_reserved`."""
warnings.warn(
"torch.cuda.memory_cached has been renamed to torch.cuda.memory_reserved",
FutureWarning)
return memory_reserved(device=device)
def max_memory_cached(device: Union[Device, int] = None) -> int:
r"""Deprecated; see :func:`~torch.cuda.max_memory_reserved`."""
warnings.warn(
"torch.cuda.max_memory_cached has been renamed to torch.cuda.max_memory_reserved",
FutureWarning)
return max_memory_reserved(device=device)
def memory_snapshot():
r"""Returns a snapshot of the CUDA memory allocator state across all devices.
Interpreting the output of this function requires familiarity with the
memory allocator internals.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
return torch._C._cuda_memorySnapshot()
def memory_summary(device: Union[Device, int] = None, abbreviated: bool = False) -> str:
r"""Returns a human-readable printout of the current memory allocator
statistics for a given device.
This can be useful to display periodically during training, or when
handling out-of-memory exceptions.
Args:
device (torch.device or int, optional): selected device. Returns
printout for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
abbreviated (bool, optional): whether to return an abbreviated summary
(default: False).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
device = _get_device_index(device, optional=True)
stats = memory_stats(device=device)
def _format_size(sz, pref_sz):
prefixes = ["B ", "KB", "MB", "GB", "TB", "PB"]
prefix = prefixes[0]
for new_prefix in prefixes[1:]:
if pref_sz < 768 * 1024:
break
prefix = new_prefix
sz //= 1024
pref_sz /= 1024
return "{:7d} {}".format(sz, prefix)
def _format_count(cnt, pref_cnt):
prefixes = [" ", "K", "M"]
prefix = prefixes[0]
for new_prefix in prefixes[1:]:
if pref_cnt < 750 * 1000:
break
prefix = new_prefix
cnt //= 1000
pref_cnt /= 1000
return "{:7d} {} ".format(cnt, prefix)
metrics_to_display = [
("allocated_bytes", "Allocated memory", _format_size),
("active_bytes", "Active memory", _format_size),
("reserved_bytes", "GPU reserved memory", _format_size),
("inactive_split_bytes", "Non-releasable memory", _format_size),
("allocation", "Allocations", _format_count),
("active", "Active allocs", _format_count),
("segment", "GPU reserved segments", _format_count),
("inactive_split", "Non-releasable allocs", _format_count),
]
lines = []
lines.append("=" * 75)
lines.append(" {_:16} PyTorch CUDA memory summary, device ID {device:<17d} ")
lines.append("-" * 75)
lines.append(" {_:9} CUDA OOMs: {num_ooms:<12d} | {_:6} cudaMalloc retries: {num_alloc_retries:<8d} ")
lines.append("=" * 75)
lines.append(" Metric | Cur Usage | Peak Usage | Tot Alloc | Tot Freed ")
for metric_key, metric_name, formatter in metrics_to_display:
lines.append("-" * 75)
submetrics = [("all", metric_name)]
if not abbreviated:
submetrics.append(("large_pool", " from large pool"))
submetrics.append(("small_pool", " from small pool"))
current_prefval, peak_prefval, allocated_prefval, freed_prefval = None, None, None, None
for submetric_key, submetric_name in submetrics:
prefix = metric_key + "." + submetric_key + "."
current = stats[prefix + "current"]
peak = stats[prefix + "peak"]
allocated = stats[prefix + "allocated"]
freed = stats[prefix + "freed"]
if current_prefval is None:
current_prefval = current
peak_prefval = peak
allocated_prefval = allocated
freed_prefval = freed
lines.append(" {:<21} | {} | {} | {} | {} ".format(
submetric_name,
formatter(current, current_prefval),
formatter(peak, peak_prefval),
formatter(allocated, allocated_prefval),
formatter(freed, freed_prefval)),
)
metrics_to_display = [
("oversize_allocations", "Oversize allocations", _format_count),
("oversize_segments", "Oversize GPU segments", _format_count),
]
for metric_key, metric_name, formatter in metrics_to_display:
lines.append("-" * 75)
prefix = metric_key + "."
current = stats[prefix + "current"]
peak = stats[prefix + "peak"]
allocated = stats[prefix + "allocated"]
freed = stats[prefix + "freed"]
lines.append(" {:<21} | {} | {} | {} | {} ".format(
metric_name,
formatter(current, current),
formatter(peak, peak),
formatter(allocated, allocated),
formatter(freed, freed)),
)
lines.append("=" * 75)
fmt_dict = {"_": "", "device": device}
for k, v in stats.items():
fmt_dict[k.replace(".", "-")] = v
return "|" + "|\n|".join(lines).format(**fmt_dict) + "|\n"
def list_gpu_processes(device: Union[Device, int] = None) -> str:
r"""Returns a human-readable printout of the running processes
and their GPU memory use for a given device.
This can be useful to display periodically during training, or when
handling out-of-memory exceptions.
Args:
device (torch.device or int, optional): selected device. Returns
printout for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
"""
try:
import pynvml # type: ignore[import]
except ModuleNotFoundError:
return("pynvml module not found, please install pynvml")
from pynvml import NVMLError_DriverNotLoaded
try:
pynvml.nvmlInit()
except NVMLError_DriverNotLoaded:
return ("cuda driver can't be loaded, is cuda enabled?")
device = _get_device_index(device, optional=True)
handle = pynvml.nvmlDeviceGetHandleByIndex(device)
procs = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
lines = []
lines.append(f"GPU:{device}")
if len(procs) == 0:
lines.append("no processes are running")
for p in procs:
mem = p.usedGpuMemory / (1024 * 1024)
lines.append(f"process {p.pid:>10d} uses {mem:>12.3f} MB GPU memory")
return "\n".join(lines)
def mem_get_info(device: Union[Device, int] = None) -> Tuple[int, int]:
r"""Returns the global free and total GPU memory occupied for a given
device using cudaMemGetInfo.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more
details about GPU memory management.
"""
if device is None:
device = torch.cuda.current_device()
device = _get_device_index(device)
return torch.cuda.cudart().cudaMemGetInfo(device)
def _record_memory_history(enabled: bool, device: Union[Device, int] = None):
with torch.cuda.device(device):
_C._cuda_recordMemoryHistory(enabled)
def _snapshot(device: Union[Device, int] = None):
with torch.cuda.device(device):
return _C._cuda_memorySnapshot()
def _save_segment_usage(filename='output.svg', snapshot=None):
if snapshot is None:
snapshot = memory_snapshot()
with open(filename, 'w') as f:
f.write(_segments(snapshot))
def _save_memory_usage(filename='output.svg', snapshot=None):
if snapshot is None:
snapshot = memory_snapshot()
with open(filename, 'w') as f:
f.write(_memory(snapshot))
| pytorch-master | torch/cuda/memory.py |
import torch
from torch import Tensor
from typing import Callable, List
import re
__all__ : List[str] = []
class _CodeParser:
def __init__(self, code_string: str):
optional_ws = r"\s*"
required_ws = r"\s+"
template_params = r"(?P<template_params>\<.+\>)"
return_type = r"(?P<return_type>\w+)"
function_name = r"(?P<function_name>\w+)"
function_params = r"(?P<function_params>\(.+\))"
function_body = r"(?P<function_body>\{.+\})"
pattern = \
optional_ws \
+ "template" \
+ optional_ws + template_params \
+ optional_ws + return_type \
+ required_ws + function_name \
+ optional_ws + function_params \
+ optional_ws + function_body \
+ optional_ws
result = re.match(pattern, code_string, re.DOTALL) # DOTALL for matching multiline
if result is None:
raise Exception(f"Couldn't parse code, please check correctness:\n {code_string}")
self.template_params = result["template_params"]
self.return_type = result["return_type"]
self.function_name = result["function_name"]
self.function_params = result["function_params"]
self.function_body = result["function_body"]
class _JittedFunction:
def __init__(self, code_string: str, return_by_ref: bool, num_outputs: int, **kwargs):
self.code_string = code_string
assert return_by_ref or num_outputs == 1, "Return by value only works for single output. "
self.return_by_ref = return_by_ref
self.num_outputs = num_outputs
parsed_code = _CodeParser(code_string)
self.kernel_name = parsed_code.function_name
self.kwargs_dict = kwargs
self.is_cuda_available = torch.cuda.is_available()
def __call__(self, *tensors: Tensor, **kwargs):
# Jiterator follow torch.cuda's lazy initialization behavior
# Defer checking cuda's availability at the function invocation time
assert self.is_cuda_available, "Jiterator is only supported on CUDA and ROCm GPUs, none are available."
assert len(tensors) <= 8, "jiterator only supports up to 8 tensor inputs."
expanded_kwargs = self.kwargs_dict.copy()
for key, value in kwargs.items():
if key in self.kwargs_dict:
expanded_kwargs[key] = value
else:
raise KeyError(f"{key} is not declared in function definition")
return torch._C._cuda_jiterator_compile_and_launch_kernel(
self.code_string,
self.kernel_name,
self.return_by_ref,
self.num_outputs,
tensors,
expanded_kwargs)
def _create_jit_fn(code_string: str, **kwargs) -> Callable:
"""
Create a jiterator-generated cuda kernel for an elementwise op.
The code string has to be a valid CUDA function that describes the computation for a single element. The code
string has to follow the c++ template pattern, as shown in the example below. This function will be inlined
into elementwise kernel template, and compiled on the fly. Compiled kernel will be cached in memory, as well as
local temp dir.
Jiterator-generated kernels accepts noncontiguous tensors, and supports boardcasting and type promotion.
Args:
code_string (str): CUDA code string to be compiled by jiterator. The entry functor must return by value.
kwargs (Dict, optional): Keyword arguments for generated function
Example::
code_string = "template <typename T> T my_kernel(T x, T y, T alpha) { return -x + alpha * y; }"
jitted_fn = create_jit_fn(code_string, alpha=1.0)
a = torch.rand(3, device='cuda')
b = torch.rand(3, device='cuda')
# invoke jitted function like a regular python function
result = jitted_fn(a, b, alpha=3.14)
code_string also allows mulitple function definitions, and the last function will be treated as the entry function.
Example::
code_string = "template <typename T> T util_fn(T x, T y) { return ::sin(x) + ::cos(y); }"
code_string += "template <typename T> T my_kernel(T x, T y, T val) { return ::min(val, util_fn(x, y)); }"
jitted_fn = create_jit_fn(code_string, val=0.0)
a = torch.rand(3, device='cuda')
b = torch.rand(3, device='cuda')
# invoke jitted function like a regular python function
result = jitted_fn(a, b) # using default val=0.0
Jiterator can be used together with python registration to override an operator's cuda kernel.
Following example is overriding gelu's cuda kernel with relu.
Example::
code_string = "template <typename T> T my_gelu(T a) { return a > 0 ? a : 0; }"
my_gelu = create_jit_fn(code_string)
my_lib = torch.library.Library("aten", "IMPL")
my_lib.impl('aten::gelu', my_gelu, "CUDA")
# torch.nn.GELU and torch.nn.function.gelu are now overridden
a = torch.rand(3, device='cuda')
torch.allclose(torch.nn.functional.gelu(a), torch.nn.functional.relu(a))
.. warning::
This API is in beta and may change in future releases.
.. warning::
This API only supports up to 8 inputs and 1 output
.. warning::
All input tensors must live in CUDA device
"""
return _JittedFunction(code_string, return_by_ref=False, num_outputs=1, **kwargs)
def _create_multi_output_jit_fn(code_string: str, num_outputs: int, **kwargs) -> Callable:
"""
Create a jiterator-generated cuda kernel for an elementwise op that supports returning one or more outputs.
Args:
code_string (str): CUDA code string to be compiled by jiterator. The entry functor must return value by reference.
num_outputs(int): number of outputs return by the kernel
kwargs (Dict, optional): Keyword arguments for generated function
Example::
code_string = "template <typename T> void my_kernel(T x, T y, T alpha, T& out) { out = -x + alpha * y; }"
jitted_fn = create_jit_fn(code_string, alpha=1.0)
a = torch.rand(3, device='cuda')
b = torch.rand(3, device='cuda')
# invoke jitted function like a regular python function
result = jitted_fn(a, b, alpha=3.14)
.. warning::
This API is in beta and may change in future releases.
.. warning::
This API only supports up to 8 inputs and 8 outputs
"""
return _JittedFunction(code_string, return_by_ref=True, num_outputs=num_outputs, **kwargs)
| pytorch-master | torch/cuda/jiterator.py |
import collections
import warnings
import torch.cuda
from typing import Optional, Sequence, Union
__all__ = ['all_reduce', 'reduce', 'broadcast', 'all_gather', 'reduce_scatter']
SUM = 0 # ncclRedOp_t
def is_available(tensors):
if not hasattr(torch._C, '_nccl_all_reduce'):
warnings.warn('PyTorch is not compiled with NCCL support')
return False
devices = set()
for tensor in tensors:
if tensor.is_sparse:
return False
if not tensor.is_contiguous():
return False
if not tensor.is_cuda:
return False
device = tensor.get_device()
if device in devices:
return False
devices.add(device)
return True
def version():
ver = torch._C._nccl_version()
major = ver >> 32
minor = (ver >> 16) & 65535
patch = ver & 65535
return (major, minor, patch)
def unique_id():
return torch._C._nccl_unique_id()
def init_rank(num_ranks, uid, rank):
return torch._C._nccl_init_rank(num_ranks, uid, rank)
def _check_sequence_type(inputs: Union[torch.Tensor, Sequence[torch.Tensor]]) -> None:
if not isinstance(inputs, collections.abc.Container) or isinstance(inputs, torch.Tensor):
raise TypeError("Inputs should be a collection of tensors")
def all_reduce(inputs, outputs=None, op=SUM, streams=None, comms=None):
_check_sequence_type(inputs)
if outputs is None:
outputs = inputs
_check_sequence_type(outputs)
torch._C._nccl_all_reduce(inputs, outputs, op, streams, comms)
# `output` used to be `outputs`, taking in a list of tensors. So we have two
# arguments for BC reasons.
def reduce(inputs: Sequence[torch.Tensor],
output: Optional[Union[torch.Tensor, Sequence[torch.Tensor]]] = None,
root: int = 0,
op: int = SUM,
streams: Optional[Sequence[torch.cuda.Stream]] = None,
comms=None, *,
outputs: Optional[Sequence[torch.Tensor]] = None) -> None:
_check_sequence_type(inputs)
_output: torch.Tensor
if outputs is not None:
if output is not None:
raise ValueError(
"'output' and 'outputs' can not be both specified. 'outputs' is deprecated in "
"favor of 'output', taking in a single output tensor. The signature of reduce is: "
"reduce(inputs, output=None, root=0, op=SUM, streams=None, comms=None).")
else:
warnings.warn(
"nccl.reduce with an output tensor list is deprecated. "
"Please specify a single output tensor with argument 'output' instead instead.")
_output = outputs[root]
elif not isinstance(output, torch.Tensor) and isinstance(output, collections.abc.Sequence):
# User called old API with positional arguments of list of output tensors.
warnings.warn(
"nccl.reduce with an output tensor list is deprecated. "
"Please specify a single output tensor.")
_output = output[root]
else:
_output = inputs[root] if output is None else output
torch._C._nccl_reduce(inputs, _output, root, op, streams, comms)
def broadcast(inputs: Sequence[torch.Tensor], root: int = 0, streams=None, comms=None) -> None:
_check_sequence_type(inputs)
torch._C._nccl_broadcast(inputs, root, streams, comms)
def all_gather(inputs: Sequence[torch.Tensor], outputs: Sequence[torch.Tensor], streams=None, comms=None) -> None:
_check_sequence_type(inputs)
_check_sequence_type(outputs)
torch._C._nccl_all_gather(inputs, outputs, streams, comms)
def reduce_scatter(inputs: Sequence[torch.Tensor],
outputs: Sequence[torch.Tensor],
op: int = SUM,
streams=None, comms=None) -> None:
_check_sequence_type(inputs)
_check_sequence_type(outputs)
torch._C._nccl_reduce_scatter(inputs, outputs, op, streams, comms)
| pytorch-master | torch/cuda/nccl.py |
r"""
This package adds support for CUDA tensor types, that implement the same
function as CPU tensors, but they utilize GPUs for computation.
It is lazily initialized, so you can always import it, and use
:func:`is_available()` to determine if your system supports CUDA.
:ref:`cuda-semantics` has more details about working with CUDA.
"""
import contextlib
import os
import torch
from torch.types import Device
import traceback
import warnings
import threading
from typing import List, Optional, Tuple, Union, Any
from ._utils import _get_device_index, _dummy_type
from .._utils import classproperty
from .graphs import CUDAGraph, graph_pool_handle, graph, \
make_graphed_callables, is_current_stream_capturing
from .streams import ExternalStream, Stream, Event
from .. import device as _device
import torch._C
try:
from torch._C import _cudart # type: ignore[attr-defined]
except ImportError:
_cudart = None
_initialized = False
_tls = threading.local()
_initialization_lock = threading.Lock()
_queued_calls = [] # don't invoke these until initialization occurs
_is_in_bad_fork = getattr(torch._C, "_cuda_isInBadFork", lambda: False)
_device_t = Union[_device, str, int, None]
class _LazySeedTracker:
# Since seeding is memory-less, only track the latest seed.
# Note: `manual_seed_all` followed by `manual_seed` overwrites
# the seed on current device. We track the order of **latest**
# calls between these two API.
def __init__(self):
self.manual_seed_all_cb = None
self.manual_seed_cb = None
self.call_order = []
def queue_seed_all(self, cb, traceback):
self.manual_seed_all_cb = (cb, traceback)
# update seed_all to be latest
self.call_order = [self.manual_seed_cb, self.manual_seed_all_cb]
def queue_seed(self, cb, traceback):
self.manual_seed_cb = (cb, traceback)
# update seed to be latest
self.call_order = [self.manual_seed_all_cb, self.manual_seed_cb]
def get_calls(self) -> List:
return self.call_order
_lazy_seed_tracker = _LazySeedTracker()
# Define dummy _CudaDeviceProperties type if PyTorch was compiled without CUDA
if hasattr(torch._C, '_CudaDeviceProperties'):
_CudaDeviceProperties = torch._C._CudaDeviceProperties
else:
_CudaDeviceProperties = _dummy_type('_CudaDeviceProperties') # type: ignore[assignment, misc]
# Global variables dynamically populated by native code
has_magma: bool = False
has_half: bool = False
default_generators: Tuple[torch._C.Generator] = () # type: ignore[assignment]
def is_available() -> bool:
r"""Returns a bool indicating if CUDA is currently available."""
if not hasattr(torch._C, '_cuda_getDeviceCount'):
return False
# This function never throws and returns 0 if driver is missing or can't
# be initialized
return torch._C._cuda_getDeviceCount() > 0
def is_bf16_supported():
r"""Returns a bool indicating if the current CUDA/ROCm device supports dtype bfloat16"""
# Check for ROCm, if true return true, no ROCM_VERSION check required,
# since it is supported on AMD GPU archs.
if torch.version.hip:
return True
cu_vers = torch.version.cuda
if cu_vers is not None:
cuda_maj_decide = int(cu_vers.split('.')[0]) >= 11
else:
cuda_maj_decide = False
return torch.cuda.get_device_properties(torch.cuda.current_device()).major >= 8 and cuda_maj_decide
def _sleep(cycles):
torch._C._cuda_sleep(cycles)
def _check_capability():
incorrect_binary_warn = """
Found GPU%d %s which requires CUDA_VERSION >= %d to
work properly, but your PyTorch was compiled
with CUDA_VERSION %d. Please install the correct PyTorch binary
using instructions from https://pytorch.org
"""
old_gpu_warn = """
Found GPU%d %s which is of cuda capability %d.%d.
PyTorch no longer supports this GPU because it is too old.
The minimum cuda capability supported by this library is %d.%d.
"""
if torch.version.cuda is not None: # on ROCm we don't want this check
CUDA_VERSION = torch._C._cuda_getCompiledVersion()
for d in range(device_count()):
capability = get_device_capability(d)
major = capability[0]
minor = capability[1]
name = get_device_name(d)
current_arch = major * 10 + minor
min_arch = min((int(arch.split("_")[1]) for arch in torch.cuda.get_arch_list()), default=35)
if current_arch < min_arch:
warnings.warn(old_gpu_warn % (d, name, major, minor, min_arch // 10, min_arch % 10))
elif CUDA_VERSION <= 9000 and major >= 7 and minor >= 5:
warnings.warn(incorrect_binary_warn % (d, name, 10000, CUDA_VERSION))
def _check_cubins():
incompatible_device_warn = """
{} with CUDA capability sm_{} is not compatible with the current PyTorch installation.
The current PyTorch install supports CUDA capabilities {}.
If you want to use the {} GPU with PyTorch, please check the instructions at https://pytorch.org/get-started/locally/
"""
if torch.version.cuda is None: # on ROCm we don't want this check
return
arch_list = get_arch_list()
if len(arch_list) == 0:
return
supported_sm = [int(arch.split('_')[1]) for arch in arch_list if 'sm_' in arch]
for idx in range(device_count()):
cap_major, cap_minor = get_device_capability(idx)
# NVIDIA GPU compute architectures are backward compatible within major version
supported = any([sm // 10 == cap_major for sm in supported_sm])
if not supported:
device_name = get_device_name(idx)
capability = cap_major * 10 + cap_minor
warnings.warn(incompatible_device_warn.format(device_name, capability, " ".join(arch_list), device_name))
def is_initialized():
r"""Returns whether PyTorch's CUDA state has been initialized."""
return _initialized and not _is_in_bad_fork()
def _lazy_call(callable, **kwargs):
if is_initialized():
callable()
else:
# TODO(torch_deploy): this accesses linecache, which attempts to read the
# file system to get traceback info. Patch linecache or do something
# else here if this ends up being important.
global _lazy_seed_tracker
if kwargs.get("seed_all", False):
_lazy_seed_tracker.queue_seed_all(callable, traceback.format_stack())
elif kwargs.get("seed", False):
_lazy_seed_tracker.queue_seed(callable, traceback.format_stack())
else:
# Don't store the actual traceback to avoid memory cycle
_queued_calls.append((callable, traceback.format_stack()))
_lazy_call(_check_capability)
_lazy_call(_check_cubins)
class DeferredCudaCallError(Exception):
pass
OutOfMemoryError = torch._C._OutOfMemoryError
def init():
r"""Initialize PyTorch's CUDA state. You may need to call
this explicitly if you are interacting with PyTorch via
its C API, as Python bindings for CUDA functionality will not
be available until this initialization takes place. Ordinary users
should not need this, as all of PyTorch's CUDA methods
automatically initialize CUDA state on-demand.
Does nothing if the CUDA state is already initialized.
"""
_lazy_init()
def _lazy_init():
global _initialized, _queued_calls
if is_initialized() or hasattr(_tls, 'is_initializing'):
return
with _initialization_lock:
# We be double-checked locking, boys! This is OK because
# the above test was GIL protected anyway. The inner test
# is for when a thread blocked on some other thread which was
# doing the initialization; when they get the lock, they will
# find there is nothing left to do.
if is_initialized():
return
# It is important to prevent other threads from entering _lazy_init
# immediately, while we are still guaranteed to have the GIL, because some
# of the C calls we make below will release the GIL
if _is_in_bad_fork():
raise RuntimeError(
"Cannot re-initialize CUDA in forked subprocess. To use CUDA with "
"multiprocessing, you must use the 'spawn' start method")
if not hasattr(torch._C, '_cuda_getDeviceCount'):
raise AssertionError("Torch not compiled with CUDA enabled")
if _cudart is None:
raise AssertionError(
"libcudart functions unavailable. It looks like you have a broken build?")
# This function throws if there's a driver initialization error, no GPUs
# are found or any other error occurs
torch._C._cuda_init()
# Some of the queued calls may reentrantly call _lazy_init();
# we need to just return without initializing in that case.
# However, we must not let any *other* threads in!
_tls.is_initializing = True
for calls in _lazy_seed_tracker.get_calls():
if calls:
_queued_calls.append(calls)
try:
for queued_call, orig_traceback in _queued_calls:
try:
queued_call()
except Exception as e:
msg = (f"CUDA call failed lazily at initialization with error: {str(e)}\n\n"
f"CUDA call was originally invoked at:\n\n{orig_traceback}")
raise DeferredCudaCallError(msg) from e
finally:
delattr(_tls, 'is_initializing')
_initialized = True
def cudart():
_lazy_init()
return _cudart
class cudaStatus(object):
SUCCESS: int = 0
ERROR_NOT_READY: int = 34
class CudaError(RuntimeError):
def __init__(self, code: int) -> None:
msg = _cudart.cudaGetErrorString(_cudart.cudaError(code))
super(CudaError, self).__init__('{0} ({1})'.format(msg, code))
def check_error(res: int) -> None:
if res != _cudart.cudaError.success:
raise CudaError(res)
class device(object):
r"""Context-manager that changes the selected device.
Args:
device (torch.device or int): device index to select. It's a no-op if
this argument is a negative integer or ``None``.
"""
def __init__(self, device: Any):
self.idx = _get_device_index(device, optional=True)
self.prev_idx = -1
def __enter__(self):
if self.idx == -1:
return
self.prev_idx = torch.cuda.current_device()
if self.prev_idx != self.idx:
torch.cuda.set_device(self.idx)
if not torch.jit.is_scripting():
_lazy_init()
def __exit__(self, type: Any, value: Any, traceback: Any):
if self.prev_idx != self.idx:
torch.cuda.set_device(self.prev_idx)
return False
class device_of(device):
r"""Context-manager that changes the current device to that of given object.
You can use both tensors and storages as arguments. If a given object is
not allocated on a GPU, this is a no-op.
Args:
obj (Tensor or Storage): object allocated on the selected device.
"""
def __init__(self, obj):
idx = obj.get_device() if obj.is_cuda else -1
super(device_of, self).__init__(idx)
def set_device(device: _device_t) -> None:
r"""Sets the current device.
Usage of this function is discouraged in favor of :any:`device`. In most
cases it's better to use ``CUDA_VISIBLE_DEVICES`` environmental variable.
Args:
device (torch.device or int): selected device. This function is a no-op
if this argument is negative.
"""
device = _get_device_index(device)
if device >= 0:
torch._C._cuda_setDevice(device)
def get_device_name(device: Optional[_device_t] = None) -> str:
r"""Gets the name of a device.
Args:
device (torch.device or int, optional): device for which to return the
name. This function is a no-op if this argument is a negative
integer. It uses the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
Returns:
str: the name of the device
"""
return get_device_properties(device).name
def get_device_capability(device: Optional[_device_t] = None) -> Tuple[int, int]:
r"""Gets the cuda capability of a device.
Args:
device (torch.device or int, optional): device for which to return the
device capability. This function is a no-op if this argument is
a negative integer. It uses the current device, given by
:func:`~torch.cuda.current_device`, if :attr:`device` is ``None``
(default).
Returns:
tuple(int, int): the major and minor cuda capability of the device
"""
prop = get_device_properties(device)
return prop.major, prop.minor
def get_device_properties(device: _device_t) -> _CudaDeviceProperties:
r"""Gets the properties of a device.
Args:
device (torch.device or int or str): device for which to return the
properties of the device.
Returns:
_CudaDeviceProperties: the properties of the device
"""
_lazy_init() # will define _get_device_properties
device = _get_device_index(device, optional=True)
if device < 0 or device >= device_count():
raise AssertionError("Invalid device id")
return _get_device_properties(device) # type: ignore[name-defined]
def can_device_access_peer(device: _device_t, peer_device: _device_t) -> bool:
r"""Checks if peer access between two devices is possible.
"""
_lazy_init()
device = _get_device_index(device, optional=True)
peer_device = _get_device_index(peer_device)
if device < 0 or device >= device_count():
raise AssertionError("Invalid device id")
if peer_device < 0 or peer_device >= device_count():
raise AssertionError("Invalid peer device id")
return torch._C._cuda_canDeviceAccessPeer(device, peer_device)
class StreamContext(object):
r"""Context-manager that selects a given stream.
All CUDA kernels queued within its context will be enqueued on a selected
stream.
Args:
Stream (Stream): selected stream. This manager is a no-op if it's
``None``.
.. note:: Streams are per-device.
"""
cur_stream : Optional['torch.cuda.Stream']
def __init__(self, stream: Optional['torch.cuda.Stream']):
self.stream = stream
self.idx = _get_device_index(None, True)
if not torch.jit.is_scripting():
if self.idx is None:
self.idx = -1
self.src_prev_stream = None if not torch.jit.is_scripting() else torch.cuda.default_stream(None)
self.dst_prev_stream = None if not torch.jit.is_scripting() else torch.cuda.default_stream(None)
def __enter__(self):
# Local cur_stream variable for type refinement
cur_stream = self.stream
# Return if stream is None or CUDA device not available
if cur_stream is None or self.idx == -1:
return
self.src_prev_stream = torch.cuda.current_stream(None)
# If the stream is not on the current device, then
# set the current stream on the device
if self.src_prev_stream.device != cur_stream.device:
with device(cur_stream.device):
self.dst_prev_stream = torch.cuda.current_stream(cur_stream.device)
torch.cuda.set_stream(cur_stream)
def __exit__(self, type: Any, value: Any, traceback: Any):
# Local cur_stream variable for type refinement
cur_stream = self.stream
# If stream is None or no CUDA device available, return
if cur_stream is None or self.idx == -1:
return
# Reset the stream on the original device
# and destination device
if self.src_prev_stream.device != cur_stream.device: # type: ignore[union-attr]
torch.cuda.set_stream(self.dst_prev_stream) # type: ignore[arg-type]
torch.cuda.set_stream(self.src_prev_stream) # type: ignore[arg-type]
def stream(stream: Optional['torch.cuda.Stream']) -> StreamContext:
r"""Wrapper around the Context-manager StreamContext that
selects a given stream.
Arguments:
stream (Stream): selected stream. This manager is a no-op if it's
``None``.
..Note:: In eager mode stream is of type Stream class while in JIT it is
an object of the custom class ``torch.classes.cuda.Stream``.
"""
return StreamContext(stream)
def set_stream(stream: Stream):
r"""Sets the current stream.This is a wrapper API to set the stream.
Usage of this function is discouraged in favor of the ``stream``
context manager.
Args:
stream (Stream): selected stream. This function is a no-op
if this argument is ``None``.
"""
if stream is None:
return
torch._C._cuda_setStream(stream._cdata)
def device_count() -> int:
r"""Returns the number of GPUs available."""
if is_available():
return torch._C._cuda_getDeviceCount()
else:
return 0
def get_arch_list() -> List[str]:
r"""Returns list CUDA architectures this library was compiled for."""
if not is_available():
return []
arch_flags = torch._C._cuda_getArchFlags()
if arch_flags is None:
return []
return arch_flags.split()
def get_gencode_flags() -> str:
r"""Returns NVCC gencode flags this library was compiled with."""
arch_list = get_arch_list()
if len(arch_list) == 0:
return ""
arch_list_ = [arch.split("_") for arch in arch_list]
return " ".join([f"-gencode compute=compute_{arch},code={kind}_{arch}" for (kind, arch) in arch_list_])
def current_device() -> int:
r"""Returns the index of a currently selected device."""
_lazy_init()
return torch._C._cuda_getDevice()
def synchronize(device: _device_t = None) -> None:
r"""Waits for all kernels in all streams on a CUDA device to complete.
Args:
device (torch.device or int, optional): device for which to synchronize.
It uses the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
"""
_lazy_init()
with torch.cuda.device(device):
return torch._C._cuda_synchronize()
def ipc_collect():
r"""Force collects GPU memory after it has been released by CUDA IPC.
.. note::
Checks if any sent CUDA tensors could be cleaned from the memory. Force
closes shared memory file used for reference counting if there is no
active counters. Useful when the producer process stopped actively sending
tensors and want to release unused memory.
"""
_lazy_init()
return torch._C._cuda_ipc_collect()
def current_stream(device: Optional[_device_t] = None) -> Stream:
r"""Returns the currently selected :class:`Stream` for a given device.
Args:
device (torch.device or int, optional): selected device. Returns
the currently selected :class:`Stream` for the current device, given
by :func:`~torch.cuda.current_device`, if :attr:`device` is ``None``
(default).
"""
_lazy_init()
return Stream(_cdata=torch._C._cuda_getCurrentStream(
_get_device_index(device, optional=True)))
def default_stream(device: Optional[_device_t] = None) -> Stream:
r"""Returns the default :class:`Stream` for a given device.
Args:
device (torch.device or int, optional): selected device. Returns
the default :class:`Stream` for the current device, given by
:func:`~torch.cuda.current_device`, if :attr:`device` is ``None``
(default).
"""
_lazy_init()
return Stream(_cdata=torch._C._cuda_getDefaultStream(
_get_device_index(device, optional=True)))
def current_blas_handle():
r"""Returns cublasHandle_t pointer to current cuBLAS handle"""
_lazy_init()
return torch._C._cuda_getCurrentBlasHandle()
def set_sync_debug_mode(debug_mode: Union[int, str]) -> None:
r"""Sets the debug mode for cuda synchronizing operations.
Args:
debug_mode(str or int): if "default" or 0, don't error or warn on synchronizing operations,
if "warn" or 1, warn on synchronizing operations, if "error" or 2, error out synchronizing operations.
Warning:
This is an experimental feature, and not all synchronizing operations will trigger warning or error. In
particular, operations in torch.distributed and torch.sparse namespaces are not covered yet.
"""
_lazy_init()
if isinstance(debug_mode, str):
if debug_mode == "default":
debug_mode = 0
elif debug_mode == "warn":
debug_mode = 1
elif debug_mode == "error":
debug_mode = 2
else:
raise RuntimeError("invalid value of debug_mode, expected one of `default`, `warn`, `error`")
torch._C._cuda_set_sync_debug_mode(debug_mode)
def get_sync_debug_mode() -> int:
r"""Returns current value of debug mode for cuda synchronizing operations."""
_lazy_init()
return torch._C._cuda_get_sync_debug_mode()
def memory_usage(device: Optional[Union[Device, int]] = None) -> int:
r"""Returns the percent of time over the past sample period during which global (device)
memory was being read or written. as given by `nvidia-smi`.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
Warning: Each sample period may be between 1 second and 1/6 second,
depending on the product being queried.
"""
try:
import pynvml # type: ignore[import]
except ModuleNotFoundError:
raise ModuleNotFoundError("pynvml module not found, please install pynvml")
from pynvml import NVMLError_DriverNotLoaded
try:
pynvml.nvmlInit()
except NVMLError_DriverNotLoaded:
raise RuntimeError("cuda driver can't be loaded, is cuda enabled?")
device = _get_device_index(device, optional=True)
handle = pynvml.nvmlDeviceGetHandleByIndex(device)
return pynvml.nvmlDeviceGetUtilizationRates(handle).memory
def utilization(device: Optional[Union[Device, int]] = None) -> int:
r"""Returns the percent of time over the past sample period during which one or
more kernels was executing on the GPU as given by `nvidia-smi`.
Args:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
Warning: Each sample period may be between 1 second and 1/6 second,
depending on the product being queried.
"""
try:
import pynvml # type: ignore[import]
except ModuleNotFoundError:
raise ModuleNotFoundError("pynvml module not found, please install pynvml")
from pynvml import NVMLError_DriverNotLoaded
try:
pynvml.nvmlInit()
except NVMLError_DriverNotLoaded:
raise RuntimeError("cuda driver can't be loaded, is cuda enabled?")
device = _get_device_index(device, optional=True)
handle = pynvml.nvmlDeviceGetHandleByIndex(device)
return pynvml.nvmlDeviceGetUtilizationRates(handle).gpu
from .memory import * # noqa: F403
from .random import * # noqa: F403
################################################################################
# Define Storage and Tensor classes
################################################################################
@staticmethod # type: ignore[misc]
def _lazy_new(cls, *args, **kwargs):
_lazy_init()
# We may need to call lazy init again if we are a forked child
# del _CudaBase.__new__
return super(_CudaBase, cls).__new__(cls, *args, **kwargs)
class _CudaBase(object):
is_cuda = True
is_sparse = False
def type(self, *args, **kwargs):
# We could use a Protocol here to tell mypy that self has `get_device` method
# but it is only available in the typing module on Python >= 3.8
# or on typing_extensions module on Python >= 3.6
with device(self.get_device()): # type: ignore[attr-defined]
return super(_CudaBase, self).type(*args, **kwargs) # type: ignore[misc]
__new__ = _lazy_new
from torch.storage import _LegacyStorage
class _CudaLegacyStorage(_LegacyStorage):
@classmethod
def from_buffer(cls, *args, **kwargs):
raise RuntimeError('from_buffer: Not available for CUDA storage')
@classmethod
def _new_with_weak_ptr(cls, *args, **kwargs):
raise RuntimeError('_new_with_weak_ptr: Not available for CUDA storage')
@classmethod
def _new_shared_filename(cls, manager, obj, size, *, device=None, dtype=None):
raise RuntimeError('_new_shared_filename: Not available for CUDA storage')
class ByteStorage(_CudaLegacyStorage):
@classproperty
def dtype(self):
return torch.uint8
class DoubleStorage(_CudaLegacyStorage):
@classproperty
def dtype(self):
return torch.double
class FloatStorage(_CudaLegacyStorage):
@classproperty
def dtype(self):
return torch.float
class HalfStorage(_CudaLegacyStorage):
@classproperty
def dtype(self):
return torch.half
class LongStorage(_CudaLegacyStorage):
@classproperty
def dtype(self):
return torch.long
class IntStorage(_CudaLegacyStorage):
@classproperty
def dtype(self):
return torch.int
class ShortStorage(_CudaLegacyStorage):
@classproperty
def dtype(self):
return torch.short
class CharStorage(_CudaLegacyStorage):
@classproperty
def dtype(self):
return torch.int8
class BoolStorage(_CudaLegacyStorage):
@classproperty
def dtype(self):
return torch.bool
class BFloat16Storage(_CudaLegacyStorage):
@classproperty
def dtype(self):
return torch.bfloat16
class ComplexDoubleStorage(_CudaLegacyStorage):
@classproperty
def dtype(self):
return torch.cdouble
class ComplexFloatStorage(_CudaLegacyStorage):
@classproperty
def dtype(self):
return torch.cfloat
del _LegacyStorage
del _CudaLegacyStorage
torch._storage_classes.add(DoubleStorage)
torch._storage_classes.add(FloatStorage)
torch._storage_classes.add(LongStorage)
torch._storage_classes.add(IntStorage)
torch._storage_classes.add(ShortStorage)
torch._storage_classes.add(CharStorage)
torch._storage_classes.add(ByteStorage)
torch._storage_classes.add(HalfStorage)
torch._storage_classes.add(BoolStorage)
torch._storage_classes.add(BFloat16Storage)
torch._storage_classes.add(ComplexDoubleStorage)
torch._storage_classes.add(ComplexFloatStorage)
from . import sparse
from . import profiler
from . import nvtx
from . import amp
from . import jiterator
| pytorch-master | torch/cuda/__init__.py |
import torch
from typing import cast, Iterable, List, Union
from . import _lazy_init, _lazy_call, device_count, current_device
from .. import Tensor
__all__ = ['get_rng_state', 'get_rng_state_all',
'set_rng_state', 'set_rng_state_all',
'manual_seed', 'manual_seed_all',
'seed', 'seed_all', 'initial_seed']
def get_rng_state(device: Union[int, str, torch.device] = 'cuda') -> Tensor:
r"""Returns the random number generator state of the specified GPU as a ByteTensor.
Args:
device (torch.device or int, optional): The device to return the RNG state of.
Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device).
.. warning::
This function eagerly initializes CUDA.
"""
_lazy_init()
if isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device('cuda', device)
idx = device.index
if idx is None:
idx = current_device()
default_generator = torch.cuda.default_generators[idx]
return default_generator.get_state()
def get_rng_state_all() -> List[Tensor]:
r"""Returns a list of ByteTensor representing the random number states of all devices."""
results = []
for i in range(device_count()):
results.append(get_rng_state(i))
return results
def set_rng_state(new_state: Tensor, device: Union[int, str, torch.device] = 'cuda') -> None:
r"""Sets the random number generator state of the specified GPU.
Args:
new_state (torch.ByteTensor): The desired state
device (torch.device or int, optional): The device to set the RNG state.
Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device).
"""
new_state_copy = new_state.clone(memory_format=torch.contiguous_format)
if isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device('cuda', device)
def cb():
idx = cast(torch.device, device).index
if idx is None:
idx = current_device()
default_generator = torch.cuda.default_generators[idx]
default_generator.set_state(new_state_copy)
_lazy_call(cb)
def set_rng_state_all(new_states: Iterable[Tensor]) -> None:
r"""Sets the random number generator state of all devices.
Args:
new_states (Iterable of torch.ByteTensor): The desired state for each device"""
for i, state in enumerate(new_states):
set_rng_state(state, i)
def manual_seed(seed: int) -> None:
r"""Sets the seed for generating random numbers for the current GPU.
It's safe to call this function if CUDA is not available; in that
case, it is silently ignored.
Args:
seed (int): The desired seed.
.. warning::
If you are working with a multi-GPU model, this function is insufficient
to get determinism. To seed all GPUs, use :func:`manual_seed_all`.
"""
seed = int(seed)
def cb():
idx = current_device()
default_generator = torch.cuda.default_generators[idx]
default_generator.manual_seed(seed)
_lazy_call(cb, seed=True)
def manual_seed_all(seed: int) -> None:
r"""Sets the seed for generating random numbers on all GPUs.
It's safe to call this function if CUDA is not available; in that
case, it is silently ignored.
Args:
seed (int): The desired seed.
"""
seed = int(seed)
def cb():
for i in range(device_count()):
default_generator = torch.cuda.default_generators[i]
default_generator.manual_seed(seed)
_lazy_call(cb, seed_all=True)
def seed() -> None:
r"""Sets the seed for generating random numbers to a random number for the current GPU.
It's safe to call this function if CUDA is not available; in that
case, it is silently ignored.
.. warning::
If you are working with a multi-GPU model, this function will only initialize
the seed on one GPU. To initialize all GPUs, use :func:`seed_all`.
"""
def cb():
idx = current_device()
default_generator = torch.cuda.default_generators[idx]
default_generator.seed()
_lazy_call(cb)
def seed_all() -> None:
r"""Sets the seed for generating random numbers to a random number on all GPUs.
It's safe to call this function if CUDA is not available; in that
case, it is silently ignored.
"""
def cb():
random_seed = 0
seeded = False
for i in range(device_count()):
default_generator = torch.cuda.default_generators[i]
if not seeded:
default_generator.seed()
random_seed = default_generator.initial_seed()
seeded = True
else:
default_generator.manual_seed(random_seed)
_lazy_call(cb)
def initial_seed() -> int:
r"""Returns the current random seed of the current GPU.
.. warning::
This function eagerly initializes CUDA.
"""
_lazy_init()
idx = current_device()
default_generator = torch.cuda.default_generators[idx]
return default_generator.initial_seed()
| pytorch-master | torch/cuda/random.py |
import pickle
import sys
import os
import io
import subprocess
from typing import Dict, Any
__all__ = ["format_flamegraph", "segments", "memory", "compare", "stats", "Bytes"]
def _frame_fmt(f):
i = f['line']
fname = f['filename'].split('/')[-1]
func = f['name']
return f'{fname}:{i}:{func}'
def format_flamegraph(flamegraph_lines, flamegraph_script=None):
if flamegraph_script is None:
flamegraph_script = f'/tmp/{os.getuid()}_flamegraph.pl'
if not os.path.exists(flamegraph_script):
import urllib.request
print(f"Downloading flamegraph.pl to: {flamegraph_script}")
urllib.request.urlretrieve(
'https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph.pl', flamegraph_script)
subprocess.run(['chmod', '+x', flamegraph_script])
args = [flamegraph_script, '--countname', 'bytes']
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, encoding='utf-8')
assert p.stdin is not None
assert p.stdout is not None
p.stdin.write(flamegraph_lines)
p.stdin.close()
result = p.stdout.read()
p.stdout.close()
p.wait()
assert p.wait() == 0
return result
def _write_blocks(f, prefix, blocks):
for b in blocks:
if 'history' not in b:
f.write(f'{prefix};{b["state"]} {b["size"]}\n')
continue
accounted_for_size = 0
for h in b['history']:
sz = h['real_size']
accounted_for_size += sz
frames = h['frames']
if frames:
frame_s = ';'.join([_frame_fmt(f) for f in reversed(frames)])
else:
frame_s = "<non-python>"
f.write(f'{prefix};{b["state"]};{frame_s} {sz}\n')
gaps = b['size'] - accounted_for_size
if gaps:
f.write(f'{prefix};{b["state"]};<gaps> {gaps}\n')
def segments(snapshot, format_flamegraph=format_flamegraph):
f = io.StringIO()
for seg in snapshot:
prefix = f'stream_{seg["stream"]};seg_{seg["address"]}'
_write_blocks(f, prefix, seg['blocks'])
return format_flamegraph(f.getvalue())
def memory(snapshot, format_flamegraph=format_flamegraph):
f = io.StringIO()
for seg in snapshot:
prefix = f'stream_{seg["stream"]}'
_write_blocks(f, prefix, seg['blocks'])
return format_flamegraph(f.getvalue())
def compare(before, after, format_flamegraph=format_flamegraph):
def _seg_key(seg):
return (seg['address'], seg['total_size'])
def _seg_info(seg):
return f'stream_{seg["stream"]};seg_{seg["address"]}'
f = io.StringIO()
before_segs = set(_seg_key(seg) for seg in before)
after_segs = set(_seg_key(seg) for seg in after)
print(f'only_before = {list(a for a,_ in (before_segs - after_segs))}')
print(f'only_after = {list(a for a,_ in (after_segs - before_segs))}')
for seg in before:
if _seg_key(seg) not in after_segs:
_write_blocks(f, f'only_before;{_seg_info(seg)}', seg['blocks'])
for seg in after:
if _seg_key(seg) not in before_segs:
_write_blocks(f, f'only_after;{_seg_info(seg)}', seg['blocks'])
return format_flamegraph(f.getvalue())
class Bytes:
def __init__(self, value):
self.value = value
def __add__(self, rhs):
return Bytes(self.value + rhs)
def __repr__(self):
num = self.value
# https://stackoverflow.com/questions/1094841/get-human-readable-version-of-file-size
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return f"{num:3.1f}{unit}B"
num /= 1024.0
return f"{num:.1f}YiB"
def stats(snapshot):
result : Dict[str, Any] = {}
result['segments'] = len(snapshot)
result['total_size'] = Bytes(0)
for seg in snapshot:
total_size = 0
for b in seg['blocks']:
if b['state'] not in result:
result[b['state']] = Bytes(0)
total_size += b['size']
result[b['state']] += b['size']
assert seg['total_size'] == total_size
result['total_size'] += total_size
return result
if __name__ == "__main__":
import os.path
thedir = os.path.realpath(os.path.dirname(__file__))
if thedir in sys.path:
# otherwise we find cuda/random.py as random...
sys.path.remove(thedir)
from pprint import pprint
import argparse
fn_name = 'torch.cuda.memory_dbg.snapshot()'
pickled = f'pickled memory statistics from {fn_name}'
parser = argparse.ArgumentParser(description=f'Visualize memory dumps produced by {fn_name}')
subparsers = parser.add_subparsers(dest='action')
def _output(p):
p.add_argument('-o', '--output', default='output.svg', help='flamegraph svg (default: output.svg)')
stats_a = subparsers.add_parser('stats', description='Prints overall allocation statistics')
stats_a.add_argument('input', help=pickled)
description = 'Generate a flamegraph that visualizes what memory is stored in each allocator segment (aka block)'
segments_a = subparsers.add_parser('segments', description=description)
segments_a.add_argument('input', help=pickled)
_output(segments_a)
description = "Generate a flamegraph the program locations contributing to CUDA memory usage."
memory_a = subparsers.add_parser('memory', description=description)
memory_a.add_argument('input', help=pickled)
_output(memory_a)
description = 'Generate a flamegraph that shows segments (aka blocks) that have been added ' \
'or removed between two different memorys snapshots.'
compare_a = subparsers.add_parser('compare', description=description)
compare_a.add_argument('before', help=pickled)
compare_a.add_argument('after', help=pickled)
_output(compare_a)
args = parser.parse_args()
def _read(name):
if name == '-':
return sys.stdin.buffer
else:
return open(name, 'rb')
def _write(name, data):
with open(name, 'w') as f:
f.write(data)
if args.action == 'segments':
data = pickle.load(_read(args.input))
_write(args.output, segments(data))
elif args.action == 'memory':
data = pickle.load(_read(args.input))
_write(args.output, memory(data))
elif args.action == 'stats':
data = pickle.load(_read(args.input))
pprint(stats(data))
elif args.action == 'compare':
before = pickle.load(_read(args.before))
after = pickle.load(_read(args.after))
_write(args.output, compare(before, after))
| pytorch-master | torch/cuda/_memory_viz.py |
# The Tensor classes are added to this module by python_tensor.cpp
| pytorch-master | torch/cuda/sparse.py |
from contextlib import contextmanager
try:
from torch._C import _nvtx
except ImportError:
class _NVTXStub(object):
@staticmethod
def _fail(*args, **kwargs):
raise RuntimeError("NVTX functions not installed. Are you sure you have a CUDA build?")
rangePushA = _fail
rangePop = _fail
markA = _fail
_nvtx = _NVTXStub() # type: ignore[assignment]
__all__ = ["range_push", "range_pop", "range_start", "range_end", "mark", "range"]
def range_push(msg):
"""
Pushes a range onto a stack of nested range span. Returns zero-based
depth of the range that is started.
Args:
msg (str): ASCII message to associate with range
"""
return _nvtx.rangePushA(msg)
def range_pop():
"""
Pops a range off of a stack of nested range spans. Returns the
zero-based depth of the range that is ended.
"""
return _nvtx.rangePop()
def range_start(msg) -> int:
"""
Mark the start of a range with string message. It returns an unique handle
for this range to pass to the corresponding call to rangeEnd().
A key difference between this and range_push/range_pop is that the
range_start/range_end version supports range across threads (start on one
thread and end on another thread).
Returns: A range handle (uint64_t) that can be passed to range_end().
Args:
msg (str): ASCII message to associate with the range.
"""
return _nvtx.rangeStartA(msg)
def range_end(range_id) -> None:
"""
Mark the end of a range for a given range_id.
Args:
range_id (int): an unique handle for the start range.
"""
_nvtx.rangeEnd(range_id)
def mark(msg):
"""
Describe an instantaneous event that occurred at some point.
Args:
msg (str): ASCII message to associate with the event.
"""
return _nvtx.markA(msg)
@contextmanager
def range(msg, *args, **kwargs):
"""
Context manager / decorator that pushes an NVTX range at the beginning
of its scope, and pops it at the end. If extra arguments are given,
they are passed as arguments to msg.format().
Args:
msg (str): message to associate with the range
"""
range_push(msg.format(*args, **kwargs))
yield
range_pop()
| pytorch-master | torch/cuda/nvtx.py |
import gc
import torch
from ._utils import _dummy_type
if not hasattr(torch._C, '_CudaStreamBase'):
# Define dummy base classes
torch._C.__dict__['_CUDAGraph'] = _dummy_type('_CUDAGraph')
torch._C.__dict__['_graph_pool_handle'] = _dummy_type('_graph_pool_handle')
torch._C.__dict__['_cuda_isCurrentStreamCapturing'] = _dummy_type('_cuda_isCurrentStreamCapturing')
from torch._C import _CUDAGraph # noqa: F401
from torch._C import _graph_pool_handle
from torch._C import _cuda_isCurrentStreamCapturing
def is_current_stream_capturing():
r"""
Returns True if CUDA graph capture is underway on the current CUDA stream, False otherwise.
If a CUDA context does not exist on the current device, returns False without initializing the context.
"""
return _cuda_isCurrentStreamCapturing()
# Python shim helps Sphinx process docstrings more reliably.
def graph_pool_handle():
r"""
Returns an opaque token representing the id of a graph memory pool.
See :ref:`Graph memory management<graph-memory-management>`.
.. warning::
This API is in beta and may change in future releases.
"""
return _graph_pool_handle()
# Python shim helps Sphinx process docstrings more reliably.
class CUDAGraph(torch._C._CUDAGraph):
r"""
Wrapper around a CUDA graph.
.. warning::
This API is in beta and may change in future releases.
"""
def __new__(cls):
return super(CUDAGraph, cls).__new__(cls)
def __init__(self):
super(CUDAGraph, self).__init__()
def capture_begin(self, pool=None):
r"""
Begins capturing CUDA work on the current stream.
Typically, you shouldn't call ``capture_begin`` yourself.
Use :class:`~torch.cuda.graph` or :func:`~torch.cuda.make_graphed_callables`,
which call ``capture_begin`` internally.
Arguments:
pool (optional): Token (returned by :func:`~torch.cuda.graph_pool_handle` or
:meth:`other_Graph_instance.pool()<torch.cuda.CUDAGraph.pool>`) that hints this graph may share memory
with the indicated pool. See :ref:`Graph memory management<graph-memory-management>`.
"""
# I'm not sure if pybind11 converts a None arg to the default defined on the C++ side,
# so I'm not taking any chances.
if pool is None:
super(CUDAGraph, self).capture_begin()
else:
super(CUDAGraph, self).capture_begin(pool)
def capture_end(self):
r"""
Ends CUDA graph capture on the current stream.
After ``capture_end``, ``replay`` may be called on this instance.
Typically, you shouldn't call ``capture_end`` yourself.
Use :class:`~torch.cuda.graph` or :func:`~torch.cuda.make_graphed_callables`,
which call ``capture_end`` internally.
"""
super(CUDAGraph, self).capture_end()
def replay(self):
r"""
Replays the CUDA work captured by this graph.
"""
super(CUDAGraph, self).replay()
def reset(self):
r"""
Deletes the graph currently held by this instance.
"""
super(CUDAGraph, self).reset()
def pool(self):
r"""
Returns an opaque token representing the id of this graph's memory pool.
This id can optionally be passed to another graph's ``capture_begin``,
which hints the other graph may share the same memory pool.
"""
return super(CUDAGraph, self).pool()
class graph(object):
r"""
Context-manager that captures CUDA work into a :class:`torch.cuda.CUDAGraph`
object for later replay.
See :ref:`CUDA Graphs <cuda-graph-semantics>` for a general introduction,
detailed use, and constraints.
Arguments:
cuda_graph (torch.cuda.CUDAGraph): Graph object used for capture.
pool (optional): Opaque token (returned by a call to :func:`~torch.cuda.graph_pool_handle()` or
:meth:`other_Graph_instance.pool()<torch.cuda.CUDAGraph.pool>`) hinting this graph's capture
may share memory from the specified pool. See :ref:`Graph memory management<graph-memory-management>`.
stream (torch.cuda.Stream, optional): If supplied, will be set as the current stream in the context.
If not supplied, ``graph`` sets its own internal side stream as the current stream in the context.
.. note::
For effective memory sharing, if you pass a ``pool`` used by a previous capture and the previous capture
used an explicit ``stream`` argument, you should pass the same ``stream`` argument to this capture.
.. warning::
This API is in beta and may change in future releases.
"""
default_capture_stream = None
def __init__(self,
cuda_graph,
pool=None,
stream=None):
# Lazy-init of default_capture_stream helps avoid circular-import errors.
# Not thread safe, but graphs already have the general (explicitly documented)
# restriction that only one capture may be underway at a time in the process.
if self.__class__.default_capture_stream is None:
self.__class__.default_capture_stream = torch.cuda.Stream()
self.pool = () if pool is None else (pool,)
self.capture_stream = stream if stream is not None else self.__class__.default_capture_stream
assert self.capture_stream is not None
self.stream_ctx = torch.cuda.stream(self.capture_stream)
self.cuda_graph = cuda_graph
def __enter__(self):
# Free as much memory as we can for the graph
torch.cuda.synchronize()
gc.collect()
torch.cuda.empty_cache()
# Stackoverflow seems comfortable with this pattern
# https://stackoverflow.com/questions/26635684/calling-enter-and-exit-manually#39172487
self.stream_ctx.__enter__()
self.cuda_graph.capture_begin(*self.pool)
def __exit__(self, exc_type, exc_value, traceback):
self.cuda_graph.capture_end()
self.stream_ctx.__exit__(exc_type, exc_value, traceback)
# returning None should propagate exceptions from either capture_end or stream_ctx.__exit__()
def make_graphed_callables(callables, sample_args, num_warmup_iters=3):
r"""
Accepts callables (functions or :class:`nn.Module<torch.nn.Module>`\ s)
and returns graphed versions.
Each graphed callable's forward pass runs its source callable's
forward CUDA work as a CUDA graph inside a single autograd node.
The graphed callable's forward pass also appends
a backward node to the autograd graph. During backward, this node runs the
callable's backward work as a CUDA graph.
Therefore, each graphed callable should be a drop-in replacement for its source callable
in an autograd-enabled training loop.
See :ref:`Partial-network capture<partial-network-capture>` for detailed use and constraints.
If you pass a tuple of several callables, their captures will use the same memory pool.
See :ref:`Graph memory management<graph-memory-management>` for when this is appropriate.
Arguments:
callables (torch.nn.Module or Python function, or tuple of these): Callable or callables to graph.
See :ref:`Graph memory management<graph-memory-management>` for when passing a tuple of callables
is appropriate. If you pass a tuple of callables, their order in the tuple must be the same order
they'll run in the live workload.
sample_args (tuple of Tensors, or tuple of tuples of Tensors): Samples args for each callable.
If a single callable was passed, ``sample_args`` must be a single tuple of argument Tensors.
If a tuple of callables was passed, ``sample_args`` must be tuple of tuples of argument Tensors.
num_warmup_iters (int): The number of warmup iterations. Currently, ``DataDistributedParallel`` needs
11 iterations for warm up. Default: ``3``.
.. note::
The ``requires_grad`` state of each Tensor in ``sample_args`` must match the state
that's expected for the corresponding real input in the training loop.
.. warning::
This API is in beta and may change in future releases.
.. warning::
``sample_args`` for each callable must be a tuple of Tensors. Other types and keyword args
are not allowed.
.. warning::
Returned callables do not support higher order differentiation (e.g., double backward).
.. warning::
In any :class:`~torch.nn.Module` passed to :func:`~make_graphed_callables`, only parameters
may be trainable. Buffers must have ``requires_grad=False``.
.. warning::
After you pass a :class:`torch.nn.Module` through :func:`~make_graphed_callables`,
you may not add or remove any of that Module's parameters or buffers.
.. warning::
:class:`torch.nn.Module`\s passed to :func:`~torch.cuda.make_graphed_callables` must not have module hooks
registered on them at the time they are passed. However, registering hooks on modules *after* passing them
through :func:`~torch.cuda.make_graphed_callables` is allowed.
.. warning::
When running a graphed callable, you must pass its arguments in the same order and format
they appeared in that callable's ``sample_args``.
.. warning::
All Tensor outputs of graphed callables must require grad.
"""
just_one_callable = False
if not isinstance(callables, tuple):
just_one_callable = True
callables = (callables,)
sample_args = (sample_args,)
for c, args in zip(callables, sample_args):
if isinstance(c, torch.nn.Module):
assert len(c._backward_hooks) == 0 and len(c._forward_hooks) == 0 and len(c._forward_pre_hooks) == 0, \
"Modules must not have hooks registered at the time they are passed. However, registering hooks " + \
"on modules after passing them through make_graphed_callables is allowed."
assert all(b.requires_grad is False for b in c.buffers()), "In any :class:`~torch.nn.Module` passed to " + \
":func:`~make_graphed_callables`, only parameters may be trainable. All buffers must have " + \
"``requires_grad=False``."
assert all(isinstance(arg, torch.Tensor) for arg in args), "In the beta API, sample_args " + \
"for each callable must be a tuple of Tensors. Other types and keyword args are not allowed."
# If a callable is an nn.Module, its graph's full input surface is the args the user explicitly
# passes to forward (ie, its sample_args) AND the module's parameter attributes.
per_callable_len_user_args = [len(args) for args in sample_args]
per_callable_module_params = [tuple(c.parameters()) if isinstance(c, torch.nn.Module) else ()
for c in callables]
per_callable_static_input_surfaces = [sample_args[i] + per_callable_module_params[i]
for i in range(len(callables))]
fwd_graphs = [torch.cuda.CUDAGraph() for _ in range(len(callables))]
bwd_graphs = [torch.cuda.CUDAGraph() for _ in range(len(callables))]
mempool = graph_pool_handle()
# Warmup
# Hopefully prevents cudnn benchmarking and other lazy-initialization cuda work
# from ending up in any captures.
torch.cuda.synchronize()
with torch.cuda.stream(torch.cuda.Stream()):
for func, args, static_input_surface in zip(callables,
sample_args,
per_callable_static_input_surfaces):
for _ in range(num_warmup_iters):
outputs = func(*args)
outputs = (outputs,) if isinstance(outputs, torch.Tensor) else outputs
grad_inputs = torch.autograd.grad(outputs=outputs,
inputs=tuple(i for i in static_input_surface if i.requires_grad),
grad_outputs=tuple(torch.empty_like(o) for o in outputs),
only_inputs=True,
allow_unused=False)
del outputs, grad_inputs
torch.cuda.synchronize()
# All captures here share a mempool. To avoid replays corrupting each other's memory,
# the safest approach is to capture all passes in the same order they'll run:
# fwd 1, fwd 2, ... fwd N, then bwd N, bwd N-1, ... bwd 1.
# Clear AMP autocast cache before capturing the graphs
torch.clear_autocast_cache()
# Capture forward graphs
per_callable_static_outputs = []
per_callable_output_was_tensor = []
for func, args, fwd_graph in zip(callables,
sample_args,
fwd_graphs):
with torch.cuda.graph(fwd_graph, pool=mempool):
outputs = func(*args)
# Assumes model output is a tensor or tuple of tensors
if isinstance(outputs, torch.Tensor):
per_callable_output_was_tensor.append(True)
outputs = (outputs,)
else:
per_callable_output_was_tensor.append(False)
per_callable_static_outputs.append(outputs)
# Capture backward graphs in reverse order
per_callable_static_grad_outputs = []
per_callable_static_grad_inputs = []
for static_input_surface, static_outputs, bwd_graph, module_params in \
zip(reversed(per_callable_static_input_surfaces),
reversed(per_callable_static_outputs),
reversed(bwd_graphs),
reversed(per_callable_module_params)):
# For now, assumes all static_outputs require grad
assert all(o.requires_grad for o in static_outputs), "Outputs of graphed callables must require grad."
static_grad_outputs = tuple(torch.empty_like(o) for o in static_outputs)
with torch.cuda.graph(bwd_graph, pool=mempool):
grad_inputs = torch.autograd.grad(outputs=static_outputs,
inputs=tuple(i for i in static_input_surface if i.requires_grad),
grad_outputs=static_grad_outputs,
only_inputs=True,
allow_unused=False)
# Constructs a tuple suitable for returning from Graphed.backward:
# Pads out the actually-needed grads with Nones in gradient slots for inputs that don't require grad.
# I couldn't think of a slick one-liner for this pattern.
static_grad_inputs = []
grad_idx = 0
for arg in static_input_surface:
if arg.requires_grad:
static_grad_inputs.append(grad_inputs[grad_idx])
grad_idx += 1
else:
static_grad_inputs.append(None) # type: ignore[arg-type]
static_grad_inputs = tuple(static_grad_inputs) # type: ignore[assignment]
per_callable_static_grad_outputs.append(static_grad_outputs)
per_callable_static_grad_inputs.append(static_grad_inputs)
# Reverses the most recent two lists
per_callable_static_grad_outputs = list(reversed(per_callable_static_grad_outputs))
per_callable_static_grad_inputs = list(reversed(per_callable_static_grad_inputs))
# Now for every per_callable list, per_callable_*[i] holds the stuff for the ith callable.
# Clear AMP autocast cache after both forward and backward graphs are captured
torch.clear_autocast_cache()
def make_graphed_autograd_function(fwd_graph,
bwd_graph,
module_params,
len_user_args,
output_was_tensor,
static_input_surface,
static_outputs,
static_grad_outputs,
static_grad_inputs):
class Graphed(torch.autograd.Function):
@staticmethod
def forward(ctx, *inputs):
# At this stage, only the user args may (potentially) be new tensors.
for i in range(len_user_args):
if static_input_surface[i].data_ptr() != inputs[i].data_ptr():
static_input_surface[i].copy_(inputs[i])
fwd_graph.replay()
assert isinstance(static_outputs, tuple)
return tuple(o.detach() for o in static_outputs)
@staticmethod
@torch.autograd.function.once_differentiable
def backward(ctx, *grads):
for g, grad in zip(static_grad_outputs, grads):
if g is None:
assert grad is None
else:
# don't copy if autograd gods have been kind and the
# incoming grad is already in the right place
if g.data_ptr() != grad.data_ptr():
g.copy_(grad)
bwd_graph.replay()
# Input args that didn't require grad expect a None gradient.
assert isinstance(static_grad_inputs, tuple)
return tuple(b.detach() if b is not None else b for b in static_grad_inputs)
def functionalized(*user_args):
# Runs the autograd function with inputs == all inputs to the graph that might require grad
# (explicit user args + module parameters)
# Assumes module params didn't change since capture.
out = Graphed.apply(*(user_args + module_params))
return out[0] if output_was_tensor else out
return functionalized
# Put together the final graphed callables
ret = []
for i, func in enumerate(callables):
graphed = make_graphed_autograd_function(fwd_graphs[i],
bwd_graphs[i],
per_callable_module_params[i],
per_callable_len_user_args[i],
per_callable_output_was_tensor[i],
per_callable_static_input_surfaces[i],
per_callable_static_outputs[i],
per_callable_static_grad_outputs[i],
per_callable_static_grad_inputs[i])
if isinstance(func, torch.nn.Module):
def make_graphed_forward(func, graph_training_state, graphed, orig_fwd):
def new_fwd(*user_args):
# If the module's training-or-eval state matches what we graphed,
# run the graph, otherwise run the original forward method
if func.training == graph_training_state:
return graphed(*user_args)
else:
return orig_fwd(*user_args)
return new_fwd
func.forward = make_graphed_forward(func, func.training, graphed, func.forward) # type: ignore[assignment]
ret.append(func)
else:
ret.append(graphed)
if just_one_callable:
return ret[0]
return tuple(ret)
| pytorch-master | torch/cuda/graphs.py |
import tempfile
import contextlib
from . import cudart, check_error
DEFAULT_FLAGS = [
"gpustarttimestamp",
"gpuendtimestamp",
"gridsize3d",
"threadblocksize",
"streamid",
"enableonstart 0",
"conckerneltrace",
]
def init(output_file, flags=None, output_mode='key_value'):
rt = cudart()
if not hasattr(rt, 'cudaOutputMode'):
raise AssertionError("HIP does not support profiler initialization!")
flags = DEFAULT_FLAGS if flags is None else flags
if output_mode == 'key_value':
output_mode_enum = rt.cudaOutputMode.KeyValuePair
elif output_mode == 'csv':
output_mode_enum = rt.cudaOutputMode.CSV
else:
raise RuntimeError("supported CUDA profiler output modes are: key_value and csv")
with tempfile.NamedTemporaryFile(delete=True) as f:
f.write(b'\n'.join(f.encode('ascii') for f in flags))
f.flush()
check_error(rt.cudaProfilerInitialize(f.name, output_file, output_mode_enum))
def start():
check_error(cudart().cudaProfilerStart())
def stop():
check_error(cudart().cudaProfilerStop())
@contextlib.contextmanager
def profile():
try:
start()
yield
finally:
stop()
| pytorch-master | torch/cuda/profiler.py |
import torch
from typing import Any
# The _get_device_index has been moved to torch.utils._get_device_index
from torch._utils import _get_device_index as _torch_get_device_index
def _get_device_index(device: Any, optional: bool = False,
allow_cpu: bool = False) -> int:
r"""Gets the device index from :attr:`device`, which can be a torch.device
object, a Python integer, or ``None``.
If :attr:`device` is a torch.device object, returns the device index if it
is a CUDA device. Note that for a CUDA device without a specified index,
i.e., ``torch.device('cuda')``, this will return the current default CUDA
device if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``,
CPU devices will be accepted and ``-1`` will be returned in this case.
If :attr:`device` is a Python integer, it is returned as is.
If :attr:`device` is ``None``, this will return the current default CUDA
device if :attr:`optional` is ``True``.
"""
if isinstance(device, str):
device = torch.device(device)
if isinstance(device, torch.device):
if allow_cpu:
if device.type not in ['cuda', 'cpu']:
raise ValueError('Expected a cuda or cpu device, but got: {}'.format(device))
elif device.type != 'cuda':
raise ValueError('Expected a cuda device, but got: {}'.format(device))
if not torch.jit.is_scripting():
if isinstance(device, torch.cuda.device):
return device.idx
return _torch_get_device_index(device, optional, allow_cpu)
def _dummy_type(name: str) -> type:
def init_err(self):
class_name = self.__class__.__name__
raise RuntimeError(
"Tried to instantiate dummy base class {}".format(class_name))
return type(name, (object,), {"__init__": init_err})
| pytorch-master | torch/cuda/_utils.py |
import torch
import functools
import collections
try:
import numpy as np
HAS_NUMPY = True
except ModuleNotFoundError:
np = None # type: ignore[assignment]
from torch._six import string_classes
from typing import Any
class autocast(torch.amp.autocast_mode.autocast):
r"""
See :class:`torch.autocast`.
``torch.cuda.amp.autocast(args...)`` is equivalent to ``torch.autocast("cuda", args...)``
"""
def __init__(self, enabled : bool = True, dtype : torch.dtype = torch.float16, cache_enabled : bool = True):
if torch._jit_internal.is_scripting():
self._enabled = enabled
self.device = "cuda"
self.fast_dtype = dtype
return
super().__init__("cuda", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled)
def __enter__(self):
if torch._jit_internal.is_scripting():
return self
return super().__enter__()
# TODO: discuss a unified TorchScript-friendly API for autocast
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override]
if torch._jit_internal.is_scripting():
return
return super().__exit__(exc_type, exc_val, exc_tb)
def __call__(self, func):
if torch._jit_internal.is_scripting():
return func
return super().__call__(func)
# Casts Tensors and containers of Tensors. Special-cases passthroughs for strings and np.ndarrays, which
# may be falsely detected as "Iterables."
def _cast(value, dtype):
if isinstance(value, torch.Tensor):
is_eligible = (value.is_floating_point() and value.is_cuda and (value.dtype is not torch.float64))
return value.to(dtype) if is_eligible else value
elif isinstance(value, string_classes):
return value
elif HAS_NUMPY and isinstance(value, np.ndarray):
return value
elif isinstance(value, collections.abc.Mapping):
return {_cast(k, dtype): _cast(v, dtype) for k, v in value.items()}
elif isinstance(value, collections.abc.Iterable):
iterable = map(lambda v: _cast(v, dtype), value)
if isinstance(value, list) or isinstance(value, tuple):
return type(value)(iterable)
else:
return iterable
else:
return value
# custom_fwd is a decorator that may or may not be used with arguments, following
# https://github.com/dabeaz/python-cookbook/tree/master/src/9/defining_a_decorator_that_takes_an_optional_argument.
# this works:
# @custom_fwd
# def forward(...):
# this also works:
# @custom_fwd(cast_inputs=torch.float)
# def forward(...):
def custom_fwd(fwd=None, *, cast_inputs=None):
"""
Helper decorator for ``forward`` methods of custom autograd functions (subclasses of
:class:`torch.autograd.Function`). See the :ref:`example page<amp-custom-examples>` for more detail.
Args:
cast_inputs (:class:`torch.dtype` or None, optional, default=None): If not ``None``,
when ``forward`` runs in an autocast-enabled region, casts incoming
floating-point CUDA Tensors to the target dtype (non-floating-point Tensors are not affected),
then executes ``forward`` with autocast disabled.
If ``None``, ``forward``'s internal ops execute with the current autocast state.
.. note::
If the decorated ``forward`` is called outside an autocast-enabled region,
:func:`custom_fwd<custom_fwd>` is a no-op and ``cast_inputs`` has no effect.
"""
if fwd is None:
return functools.partial(custom_fwd, cast_inputs=cast_inputs)
@functools.wraps(fwd)
def decorate_fwd(*args, **kwargs):
if cast_inputs is None:
args[0]._fwd_used_autocast = torch.is_autocast_enabled()
return fwd(*args, **kwargs)
else:
autocast_context = torch.is_autocast_enabled()
args[0]._fwd_used_autocast = False
if autocast_context:
with autocast(enabled=False):
return fwd(*_cast(args, cast_inputs), **_cast(kwargs, cast_inputs))
else:
return fwd(*args, **kwargs)
return decorate_fwd
# Autograd ensures incoming gradients are the same type as forward outputs. Allowing a separate
# cast_inputs argument on custom_bwd is unnecessary and could cause errors if it doesn't match
# cast_inputs supplied to custom_fwd.
def custom_bwd(bwd):
"""
Helper decorator for backward methods of custom autograd functions (subclasses of
:class:`torch.autograd.Function`).
Ensures that ``backward`` executes with the same autocast state as ``forward``.
See the :ref:`example page<amp-custom-examples>` for more detail.
"""
@functools.wraps(bwd)
def decorate_bwd(*args, **kwargs):
with autocast(args[0]._fwd_used_autocast):
return bwd(*args, **kwargs)
return decorate_bwd
| pytorch-master | torch/cuda/amp/autocast_mode.py |
import torch
from collections import defaultdict, abc
import warnings
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
from .common import amp_definitely_not_available
class _MultiDeviceReplicator(object):
"""
Lazily serves copies of a tensor to requested devices. Copies are cached per-device.
"""
def __init__(self, master_tensor: torch.Tensor) -> None:
assert master_tensor.is_cuda or master_tensor.device.type == 'xla'
self.master = master_tensor
self._per_device_tensors: Dict[torch.device, torch.Tensor] = {}
def get(self, device) -> torch.Tensor:
retval = self._per_device_tensors.get(device, None)
if retval is None:
retval = self.master.to(device=device, non_blocking=True, copy=True)
self._per_device_tensors[device] = retval
return retval
# Defines default_factory for GradScaler's _per_optimizer_states defaultdict,
# as well as associated "enum" values. Prefers defining these at top level because
# - Lambdas can't be pickled, so we don't want to supply a lambda as the factory.
# - Defining READY, UNSCALED, STEPPED and _refresh_per_optimizer_state within GradScaler
# causes a circular reference, which we'd rather avoid.
class OptState(Enum):
READY = 0
UNSCALED = 1
STEPPED = 2
def _refresh_per_optimizer_state():
return {"stage": OptState.READY, "found_inf_per_device": {}}
class GradScaler(object):
_scale: Optional[torch.Tensor]
_grows_tracker: Optional[torch.Tensor]
_per_optimizer_states: Dict[int, Dict[str, Any]]
"""
An instance ``scaler`` of :class:`GradScaler` helps perform the steps of gradient scaling
conveniently.
* ``scaler.scale(loss)`` multiplies a given loss by ``scaler``'s current scale factor.
* ``scaler.step(optimizer)`` safely unscales gradients and calls ``optimizer.step()``.
* ``scaler.update()`` updates ``scaler``'s scale factor.
Example::
# Creates a GradScaler once at the beginning of training.
scaler = GradScaler()
for epoch in epochs:
for input, target in data:
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
# Scales loss. Calls backward() on scaled loss to create scaled gradients.
scaler.scale(loss).backward()
# scaler.step() first unscales gradients of the optimizer's params.
# If gradients don't contain infs/NaNs, optimizer.step() is then called,
# otherwise, optimizer.step() is skipped.
scaler.step(optimizer)
# Updates the scale for next iteration.
scaler.update()
See the :ref:`Automatic Mixed Precision examples<amp-examples>` for usage
(along with autocasting) in more complex cases like gradient clipping, gradient accumulation, gradient penalty,
and multiple losses/optimizers.
``scaler`` dynamically estimates the scale factor each iteration. To minimize gradient underflow,
a large scale factor should be used. However, ``float16`` values can "overflow" (become inf or NaN) if
the scale factor is too large. Therefore, the optimal scale factor is the largest factor that can be used
without incurring inf or NaN gradient values.
``scaler`` approximates the optimal scale factor over time by checking the gradients for infs and NaNs during every
``scaler.step(optimizer)`` (or optional separate ``scaler.unscale_(optimizer)``, see :meth:`unscale_`).
* If infs/NaNs are found, ``scaler.step(optimizer)`` skips the underlying ``optimizer.step()`` (so the params
themselves remain uncorrupted) and ``update()`` multiplies the scale by ``backoff_factor``.
* If no infs/NaNs are found, ``scaler.step(optimizer)`` runs the underlying ``optimizer.step()`` as usual.
If ``growth_interval`` unskipped iterations occur consecutively, ``update()`` multiplies the scale by
``growth_factor``.
The scale factor often causes infs/NaNs to appear in gradients for the first few iterations as its
value calibrates. ``scaler.step`` will skip the underlying ``optimizer.step()`` for these
iterations. After that, step skipping should occur rarely (once every few hundred or thousand iterations).
Args:
init_scale (float, optional, default=2.**16): Initial scale factor.
growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during
:meth:`update` if no inf/NaN gradients occur for ``growth_interval`` consecutive iterations.
backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during
:meth:`update` if inf/NaN gradients occur in an iteration.
growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients
that must occur for the scale to be multiplied by ``growth_factor``.
enabled (bool, optional): If ``False``, disables gradient scaling. :meth:`step` simply
invokes the underlying ``optimizer.step()``, and other methods become no-ops.
Default: ``True``
"""
def __init__(self,
init_scale=2.**16,
growth_factor=2.0,
backoff_factor=0.5,
growth_interval=2000,
enabled=True):
if enabled and amp_definitely_not_available():
warnings.warn("torch.cuda.amp.GradScaler is enabled, but CUDA is not available. Disabling.")
self._enabled = False
else:
self._enabled = enabled
if self._enabled:
assert growth_factor > 1.0, "The growth factor must be > 1.0."
assert backoff_factor < 1.0, "The backoff factor must be < 1.0."
self._init_scale = init_scale
# self._scale will be lazily initialized during the first call to scale()
self._scale = None
self._growth_factor = growth_factor
self._backoff_factor = backoff_factor
self._growth_interval = growth_interval
self._init_growth_tracker = 0
# self._growth_tracker will be lazily initialized during the first call to scale()
self._growth_tracker = None
self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
def _check_scale_growth_tracker(self, funcname) -> Tuple[torch.Tensor, torch.Tensor]:
fix = "This may indicate your script did not use scaler.scale(loss or outputs) earlier in the iteration."
assert self._scale is not None, "Attempted {} but _scale is None. ".format(funcname) + fix
assert self._growth_tracker is not None, "Attempted {} but _growth_tracker is None. ".format(funcname) + fix
return (self._scale, self._growth_tracker)
def _lazy_init_scale_growth_tracker(self, dev):
assert self._growth_tracker is None, "_growth_tracker initialized before _scale"
self._scale = torch.full((1,), self._init_scale, dtype=torch.float32, device=dev)
self._growth_tracker = torch.full((1,), self._init_growth_tracker, dtype=torch.int32, device=dev)
def scale(self, outputs):
"""
Multiplies ('scales') a tensor or list of tensors by the scale factor.
Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned
unmodified.
Args:
outputs (Tensor or iterable of Tensors): Outputs to scale.
"""
if not self._enabled:
return outputs
# Short-circuit for the common case.
if isinstance(outputs, torch.Tensor):
assert outputs.is_cuda or outputs.device.type == 'xla'
if self._scale is None:
self._lazy_init_scale_growth_tracker(outputs.device)
assert self._scale is not None
return outputs * self._scale.to(device=outputs.device, non_blocking=True)
# Invoke the more complex machinery only if we're treating multiple outputs.
stash: List[_MultiDeviceReplicator] = [] # holds a reference that can be overwritten by apply_scale
def apply_scale(val):
if isinstance(val, torch.Tensor):
assert val.is_cuda or val.device.type == 'xla'
if len(stash) == 0:
if self._scale is None:
self._lazy_init_scale_growth_tracker(val.device)
assert self._scale is not None
stash.append(_MultiDeviceReplicator(self._scale))
return val * stash[0].get(val.device)
elif isinstance(val, abc.Iterable):
iterable = map(apply_scale, val)
if isinstance(val, list) or isinstance(val, tuple):
return type(val)(iterable)
else:
return iterable
else:
raise ValueError("outputs must be a Tensor or an iterable of Tensors")
return apply_scale(outputs)
def _unscale_grads_(self, optimizer, inv_scale, found_inf, allow_fp16):
per_device_inv_scale = _MultiDeviceReplicator(inv_scale)
per_device_found_inf = _MultiDeviceReplicator(found_inf)
# To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype.
# There could be hundreds of grads, so we'd like to iterate through them just once.
# However, we don't know their devices or dtypes in advance.
# https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict
# Google says mypy struggles with defaultdicts type annotations.
per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) # type: ignore[var-annotated]
with torch.no_grad():
for group in optimizer.param_groups:
for param in group["params"]:
if param.grad is None:
continue
if (not allow_fp16) and param.grad.dtype == torch.float16:
raise ValueError("Attempting to unscale FP16 gradients.")
if param.grad.is_sparse:
# is_coalesced() == False means the sparse grad has values with duplicate indices.
# coalesce() deduplicates indices and adds all values that have the same index.
# For scaled fp16 values, there's a good chance coalescing will cause overflow,
# so we should check the coalesced _values().
if param.grad.dtype is torch.float16:
param.grad = param.grad.coalesce()
to_unscale = param.grad._values()
else:
to_unscale = param.grad
# TODO: is there a way to split by device and dtype without appending in the inner loop?
per_device_and_dtype_grads[to_unscale.device][to_unscale.dtype].append(to_unscale)
for device, per_dtype_grads in per_device_and_dtype_grads.items():
for grads in per_dtype_grads.values():
torch._amp_foreach_non_finite_check_and_unscale_(grads,
per_device_found_inf.get(device),
per_device_inv_scale.get(device))
return per_device_found_inf._per_device_tensors
def unscale_(self, optimizer):
"""
Divides ("unscales") the optimizer's gradient tensors by the scale factor.
:meth:`unscale_` is optional, serving cases where you need to
:ref:`modify or inspect gradients<working-with-unscaled-gradients>`
between the backward pass(es) and :meth:`step`.
If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`.
Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients::
...
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
scaler.step(optimizer)
scaler.update()
Args:
optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled.
.. note::
:meth:`unscale_` does not incur a CPU-GPU sync.
.. warning::
:meth:`unscale_` should only be called once per optimizer per :meth:`step` call,
and only after all gradients for that optimizer's assigned parameters have been accumulated.
Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError.
.. warning::
:meth:`unscale_` may unscale sparse gradients out of place, replacing the ``.grad`` attribute.
"""
if not self._enabled:
return
self._check_scale_growth_tracker("unscale_")
optimizer_state = self._per_optimizer_states[id(optimizer)]
if optimizer_state["stage"] is OptState.UNSCALED:
raise RuntimeError("unscale_() has already been called on this optimizer since the last update().")
elif optimizer_state["stage"] is OptState.STEPPED:
raise RuntimeError("unscale_() is being called after step().")
# FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
assert self._scale is not None
inv_scale = self._scale.double().reciprocal().float()
found_inf = torch.full((1,), 0.0, dtype=torch.float32, device=self._scale.device)
optimizer_state["found_inf_per_device"] = self._unscale_grads_(optimizer, inv_scale, found_inf, False)
optimizer_state["stage"] = OptState.UNSCALED
def _maybe_opt_step(self, optimizer, optimizer_state, *args, **kwargs):
retval = None
if not sum(v.item() for v in optimizer_state["found_inf_per_device"].values()):
retval = optimizer.step(*args, **kwargs)
return retval
def step(self, optimizer, *args, **kwargs):
"""
:meth:`step` carries out the following two operations:
1. Internally invokes ``unscale_(optimizer)`` (unless :meth:`unscale_` was explicitly called for ``optimizer``
earlier in the iteration). As part of the :meth:`unscale_`, gradients are checked for infs/NaNs.
2. If no inf/NaN gradients are found, invokes ``optimizer.step()`` using the unscaled
gradients. Otherwise, ``optimizer.step()`` is skipped to avoid corrupting the params.
``*args`` and ``**kwargs`` are forwarded to ``optimizer.step()``.
Returns the return value of ``optimizer.step(*args, **kwargs)``.
Args:
optimizer (torch.optim.Optimizer): Optimizer that applies the gradients.
args: Any arguments.
kwargs: Any keyword arguments.
.. warning::
Closure use is not currently supported.
"""
if (not self._enabled):
return optimizer.step(*args, **kwargs)
if "closure" in kwargs:
raise RuntimeError("Closure use is not currently supported if GradScaler is enabled.")
self._check_scale_growth_tracker("step")
optimizer_state = self._per_optimizer_states[id(optimizer)]
if optimizer_state["stage"] is OptState.STEPPED:
raise RuntimeError("step() has already been called since the last update().")
retval = None
if (hasattr(optimizer, "_step_supports_amp_scaling") and optimizer._step_supports_amp_scaling):
# This optimizer has customized scale-handling logic, so we can call optimizer.step() directly.
# The contract with custom optimizers is that their step() should accept an additional,
# optional grad_scaler kwarg. We append self to the kwargs so the custom optimizer has full information:
# it can query its own state, invoke unscale_ on itself, etc
retval = optimizer.step(*args, **dict(kwargs, grad_scaler=self))
optimizer_state["stage"] = OptState.STEPPED
return retval
if optimizer_state["stage"] is OptState.READY:
self.unscale_(optimizer)
assert len(optimizer_state["found_inf_per_device"]) > 0, "No inf checks were recorded for this optimizer."
retval = self._maybe_opt_step(optimizer, optimizer_state, *args, **kwargs)
optimizer_state["stage"] = OptState.STEPPED
return retval
def update(self, new_scale=None):
"""
Updates the scale factor.
If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
the scale is multiplied by ``growth_factor`` to increase it.
Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not
used directly, it's used to fill GradScaler's internal scale tensor. So if
``new_scale`` was a tensor, later in-place changes to that tensor will not further
affect the scale GradScaler uses internally.)
Args:
new_scale (float or :class:`torch.cuda.FloatTensor`, optional, default=None): New scale factor.
.. warning::
:meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
been invoked for all optimizers used this iteration.
"""
if not self._enabled:
return
_scale, _growth_tracker = self._check_scale_growth_tracker("update")
if new_scale is not None:
# Accept a new user-defined scale.
if isinstance(new_scale, float):
self._scale.fill_(new_scale) # type: ignore[union-attr]
else:
reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor with requires_grad=False."
assert isinstance(new_scale, torch.cuda.FloatTensor), reason # type: ignore[attr-defined]
assert new_scale.numel() == 1, reason
assert new_scale.requires_grad is False, reason
self._scale.copy_(new_scale) # type: ignore[union-attr]
else:
# Consume shared inf/nan data collected from optimizers to update the scale.
# If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
found_infs = [found_inf.to(device=_scale.device, non_blocking=True)
for state in self._per_optimizer_states.values()
for found_inf in state["found_inf_per_device"].values()]
assert len(found_infs) > 0, "No inf checks were recorded prior to update."
found_inf_combined = found_infs[0]
if len(found_infs) > 1:
for i in range(1, len(found_infs)):
found_inf_combined += found_infs[i]
torch._amp_update_scale_(_scale,
_growth_tracker,
found_inf_combined,
self._growth_factor,
self._backoff_factor,
self._growth_interval)
# To prepare for next iteration, clear the data collected from optimizers this iteration.
self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
def _get_scale_async(self):
return self._scale
def get_scale(self):
"""
Returns a Python float containing the current scale, or 1.0 if scaling is disabled.
.. warning::
:meth:`get_scale` incurs a CPU-GPU sync.
"""
if self._enabled:
return self._init_scale if self._scale is None else self._get_scale_async().item()
else:
return 1.0
def get_growth_factor(self):
r"""
Returns a Python float containing the scale growth factor.
"""
return self._growth_factor
def set_growth_factor(self, new_factor):
r"""
Args:
new_scale (float): Value to use as the new scale growth factor.
"""
self._growth_factor = new_factor
def get_backoff_factor(self):
r"""
Returns a Python float containing the scale backoff factor.
"""
return self._backoff_factor
def set_backoff_factor(self, new_factor):
r"""
Args:
new_scale (float): Value to use as the new scale backoff factor.
"""
self._backoff_factor = new_factor
def get_growth_interval(self):
r"""
Returns a Python int containing the growth interval.
"""
return self._growth_interval
def set_growth_interval(self, new_interval):
r"""
Args:
new_interval (int): Value to use as the new growth interval.
"""
self._growth_interval = new_interval
def _get_growth_tracker(self):
if self._enabled:
return self._init_growth_tracker if self._growth_tracker is None else self._growth_tracker.item()
else:
return 0
def is_enabled(self):
r"""
Returns a bool indicating whether this instance is enabled.
"""
return self._enabled
def state_dict(self):
r"""
Returns the state of the scaler as a :class:`dict`. It contains five entries:
* ``"scale"`` - a Python float containing the current scale
* ``"growth_factor"`` - a Python float containing the current growth factor
* ``"backoff_factor"`` - a Python float containing the current backoff factor
* ``"growth_interval"`` - a Python int containing the current growth interval
* ``"_growth_tracker"`` - a Python int containing the number of recent consecutive unskipped steps.
If this instance is not enabled, returns an empty dict.
.. note::
If you wish to checkpoint the scaler's state after a particular iteration, :meth:`state_dict`
should be called after :meth:`update`.
"""
return {"scale": self.get_scale(),
"growth_factor": self._growth_factor,
"backoff_factor": self._backoff_factor,
"growth_interval": self._growth_interval,
"_growth_tracker": self._get_growth_tracker()} if self._enabled else {}
def load_state_dict(self, state_dict):
r"""
Loads the scaler state. If this instance is disabled, :meth:`load_state_dict` is a no-op.
Args:
state_dict(dict): scaler state. Should be an object returned from a call to :meth:`state_dict`.
"""
if not self._enabled:
return
if len(state_dict) == 0:
raise RuntimeError("The source state dict is empty, possibly because it was saved "
"from a disabled instance of GradScaler.")
self._init_scale = state_dict["scale"]
if self._scale is not None:
self._scale.fill_(state_dict["scale"])
self._growth_factor = state_dict["growth_factor"]
self._backoff_factor = state_dict["backoff_factor"]
self._growth_interval = state_dict["growth_interval"]
self._init_growth_tracker = state_dict["_growth_tracker"]
if self._growth_tracker is not None:
self._growth_tracker.fill_(state_dict["_growth_tracker"])
def __getstate__(self):
state = self.__dict__.copy()
if self._enabled:
assert len(self._per_optimizer_states) == 0, "A GradScaler instance may only be pickled at the beginning "\
"of an iteration, or at the end after scaler.update()."
# Pickling _scale and _growth_tracker Tensors directly triggers
# "warnings.warn("pickle support for Storage will be removed in 1.5..."
# so instead, we set the unpickled instance up to reinitialize them lazily.
state['_init_scale'] = self.get_scale()
state['_init_growth_tracker'] = self._get_growth_tracker()
state['_scale'] = None
state['_growth_tracker'] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
def _check_inf_per_device(self, optimizer):
_scale, _ = self._check_scale_growth_tracker("_check_inf_per_device")
dummy_inv_scale = torch.full((1,), 1.0, dtype=torch.float32, device=_scale.device)
found_inf = torch.full((1,), 0.0, dtype=torch.float32, device=_scale.device)
self._per_optimizer_states[id(optimizer)]["found_inf_per_device"] = \
self._unscale_grads_(optimizer, dummy_inv_scale, found_inf, True)
return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"]
def _found_inf_per_device(self, optimizer):
return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"]
| pytorch-master | torch/cuda/amp/grad_scaler.py |
from .autocast_mode import autocast, custom_fwd, custom_bwd # noqa: F401
from .grad_scaler import GradScaler # noqa: F401
| pytorch-master | torch/cuda/amp/__init__.py |
import torch
from importlib.util import find_spec
def amp_definitely_not_available():
return not (torch.cuda.is_available() or find_spec('torch_xla'))
| pytorch-master | torch/cuda/amp/common.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.