python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
from typing import List, Optional
from torchgen.api import dispatcher
from torchgen.api.types import (
BaseCType,
Binding,
boolT,
ConstRefCType,
CType,
longT,
NamedCType,
tensorT,
)
from torchgen.model import (
Argument,
BaseTy,
BaseType,
FunctionSchema,
NativeFunctionsViewGroup,
)
# This file describes the translation of JIT schema to API's used
# when creating view lambdas that are used by the functionalization pass.
# There are two types of lambdas: forward lambdas and reverse lambdas.
# These API's mostly follow the dispatcher API, with a few quirks:
# - The lambda capture has to convert reference types to value types
# - While the forward lambda just directly calls into the at::_ops API
# (following the dispatcher convention), the logic here for the reverse lambda
# is responsible for generating both the call-site, and the declarations
# (which are implemented manually in the at::functionalization::impl namespace).
# The lambdas generated for each view op in the functionalization pass are of the form
# [capture_arguments](outer_arguments) -> returns_type {
# return name(inner_arguments);
# }
# Define some specific lambda input arguments.
base_binding = Binding(
name="base",
nctype=NamedCType(name="base", type=ConstRefCType(BaseCType(tensorT))),
argument=Argument(
name="base", type=BaseType(BaseTy.Tensor), default=None, annotation=None
),
default=None,
)
mutated_view_binding = Binding(
name="mutated_view",
nctype=NamedCType(name="mutated_view", type=ConstRefCType(BaseCType(tensorT))),
argument=Argument(
name="base", type=BaseType(BaseTy.Tensor), default=None, annotation=None
),
default=None,
)
mutated_view_idx_binding = Binding(
name="mutated_view_idx",
nctype=NamedCType(name="mutated_view_idx", type=BaseCType(longT)),
argument=Argument(
name="base", type=BaseType(BaseTy.Tensor), default=None, annotation=None
),
default=None,
)
reapply_views_binding = Binding(
name="reapply_views",
nctype=NamedCType(name="reapply_views", type=BaseCType(boolT)),
argument=Argument(
name="reapply_views", type=BaseType(BaseTy.bool), default=None, annotation=None
),
default=None,
)
# The lambda capture itself doesn't have a name.
# The name returned here corresponds to the name of the inner function called by the lambda.
def name(
g: NativeFunctionsViewGroup,
*,
is_reverse: bool,
include_namespace: bool,
reapply_views: Optional[bool] = None,
) -> str:
if reapply_views is None:
# reapply_views is only important for the fwd lambda,
# since we always plumb the runtime "reapply_views" argument into the reverse function.
assert is_reverse
if is_reverse:
# for the reverse: the name of the inverse function always involves "view_copy",
# and we plumb the "reapply_views" flag into that function.
# (We could avoid doing that, but that would require writing out twice as many view inverse functions).
assert g.view_copy is not None
api_name = g.view_copy.func.name.unambiguous_name()
# in the reverse case, we codegen both the call-sites (which need the full namespace) and the declarations (which don't)
if include_namespace:
return f"at::functionalization::FunctionalInverses::{api_name}_inverse"
else:
return f"{api_name}_inverse"
# in the forward case, we just directly call into the at::_ops API (so we always need the namespace)
assert include_namespace
assert g.view_copy is not None
api_name = (
g.view.func.name.unambiguous_name()
if reapply_views
else g.view_copy.func.name.unambiguous_name()
)
return f"at::_ops::{api_name}::call"
def capture_arguments(func: FunctionSchema, *, is_reverse: bool) -> List[Binding]:
# capture arguments include all arguments except `self`.
# Importantly, they don't include any C++ reference types (or else we'll get a dangling reference in the capture),
# So any reference types (IntArrayRef) need to be converted to value types (vector<int64_t>)
args = func.arguments.flat_all
assert args[0].type == BaseType(BaseTy.Tensor)
non_self_args = args[1:]
non_self_value_bindings = [
dispatcher.argument(a, remove_non_owning_ref_types=True) for a in non_self_args
]
all_bindings = [reapply_views_binding] + non_self_value_bindings
return all_bindings
def returns_type(func: FunctionSchema) -> CType:
# Assertion: all view ops return tensor-like outputs
assert len(func.returns) >= 1
for ret in func.returns:
assert ret.type.is_tensor_like()
# However, the return type of the lambda is always an individual tensor.
# For multi-tensor outputs, each tensor needs to be tracked individually.
return BaseCType(tensorT)
def outer_arguments(*, is_reverse: bool) -> List[Binding]:
if is_reverse:
return [base_binding, mutated_view_binding, mutated_view_idx_binding]
else:
return [base_binding, mutated_view_idx_binding]
def inner_call_index(func: FunctionSchema) -> Optional[Binding]:
# For view ops that return multiple tensors (like `split`), we generate a separate lambda for each output.
# When we replay a view op that returns multiple tensors, we need to index into the output appropriately
if len(func.returns) > 1 or (
len(func.returns) == 1 and func.returns[0].type.is_list_like()
):
return mutated_view_idx_binding
return None
def inner_arguments(func: FunctionSchema, is_reverse: bool) -> List[Binding]:
args = func.arguments.flat_all
assert args[0].type == BaseType(BaseTy.Tensor)
non_self_args = args[1:]
# The forward lambda calls the at::_ops API, while the reverse lambda calls the view inverse API.
# Both of these follow the dispatcher API.
non_self_bindings = [dispatcher.argument(a) for a in non_self_args]
if not is_reverse:
# the forward lambda swaps out the original tensor argument with the lambd arg "base"
return [base_binding] + non_self_bindings
else:
# the reverse lambda does the same, but with an additional "mutated_view" arg
# additionally, we have a calling convention: for view ops that return multiple tensor outputs
# their corresponding view_inverse function takes in an additional index argument.
index_binding = inner_call_index(func)
if index_binding is not None:
return [
base_binding,
mutated_view_binding,
reapply_views_binding,
index_binding,
] + non_self_bindings
else:
return [
base_binding,
mutated_view_binding,
reapply_views_binding,
] + non_self_bindings
| pytorch-master | torchgen/api/functionalization.py |
import re
from dataclasses import dataclass
from typing import Dict, List, Match, Optional, Sequence, Set, Tuple
from torchgen.api import cpp
from torchgen.api.types import Binding, NamedCType
from torchgen.model import (
FunctionSchema,
NativeFunction,
NativeFunctionsViewGroup,
SchemaKind,
Type,
)
from torchgen.utils import IDENT_REGEX
# Represents a saved attribute involved in backward calculation.
# Note that it can be a derived property of an input argument, e.g.:
# we could save `other.scalar_type()` instead of the entire `other` tensor.
@dataclass(frozen=True)
class SavedAttribute:
# The NamedCType holds the updated name and cpp type of the attribute
# for the name, Suffix is appended if it's derived property, e.g.: `other_scalar_type`
nctype: NamedCType
# The expression to read the derived property at save time, e.g.:
# `other.scalar_type()`.
expr: str
# Represents a backward formula that calculates derivatives for one
# or more tensors.
@dataclass(frozen=True)
class Derivative:
# The formula string (legit C++ expression).
# Note that expressions against input arguments have been replaced with the
# corresponding saved attributes.
# E.g.:
# raw formula: `mul_tensor_backward(grad, self, other.scalar_type())`
# here: `mul_tensor_backward(grad, self, other_scalar_type)`
formula: str
# The formula string before input argument replacement
original_formula: str
# Names of the arguments for which this formula calculates derivatives.
var_names: Tuple[str, ...]
# Saved inputs that are referenced by the formula.
saved_inputs: Tuple[SavedAttribute, ...]
# Saved outputs that are referenced by the formula.
saved_outputs: Tuple[SavedAttribute, ...]
# Gradients that are referenced by name in the formula.
named_gradients: Set[str]
# Represents a forward formula that calculates forward derivatives
# for one tensor.
@dataclass(frozen=True)
class ForwardDerivative:
# The formula string (legit C++ expression).
# Note that special keywords such as "linear" or "element_wise" have been
# replaced by the automatically generated formula.
formula: str
# Name of the output arguments for which this formula calculates forward
# derivatives
var_names: Tuple[str, ...]
# Type of the output arguments for which this formula calculates forward
# derivatives
var_types: Tuple[Type, ...]
# Inputs for which the forward derivatives are required for this formula
required_inputs_fw_grad: Optional[Tuple[str, ...]]
# Inputs for which the primal is required for this formula
required_inputs_primal: Optional[Tuple[str, ...]]
# Flag to specify if this formula requires the original value of self
# This is only used by inplace operations
required_original_self_value: bool
# If this formula is specified in derivatives.yaml or if we are re-using the
# out of place formula for inplace
is_reusing_outplace_formula: bool
# Represents differentiability info for a NativeFunction.
@dataclass(frozen=True)
class DifferentiabilityInfo:
# The base name read from derivatives.yaml.
name: str
# The matching native function.
#
# There can be multiple NativeFunction having the same base name:
# - different overloads with different types of input arguments;
# - in-place/out/functional variants of the same function;
#
# We first use the schema string (under the 'name' key) in derivatives.yaml
# to find the NativeFunction having the same schema string.
# Then we find the in-place/out/functional variants of the matching function.
# Among these variants, we choose the one having the same name as the
# derivatives.yaml entry. If there is no exact match, then we choose the
# in-place variant.
# TODO: maybe the logic to search for all variants is no longer necessary?
func: NativeFunction
# The name of the generated autograd function.
# It's set only if we will calculate a derivative, i.e.
# 'args_with_derivatives' is not empty.
op: Optional[str]
# The derivatives formulae for this function.
# Note that the length of this sequence is the number of differentiable inputs
derivatives: Sequence[Derivative]
# The forward derivatives formulae for this function.
# Note that the length of this sequence is the number of differentiable outputs
forward_derivatives: Sequence[ForwardDerivative]
# The union of 'saved_inputs' of all 'derivatives'.
all_saved_inputs: Sequence[SavedAttribute]
# The union of 'saved_outputs' of all 'derivatives'.
all_saved_outputs: Sequence[SavedAttribute]
# All named gradients that are available for use, in the same
# order as in the grads vector.
available_named_gradients: Sequence[str]
# The named gradients that are used in any of the derivatives.
# Invariant: all(name in available_named_gradients for name in used_named_gradients)
used_named_gradients: Set[str]
# The function's input arguments for which it calculates derivatives.
# It's the union of 'var_names' of all 'derivatives', sorted by the
# argument order in the function schema.
args_with_derivatives: Sequence[Binding]
# Names of arguments whose derivative formula is 'non_differentiable'.
non_differentiable_arg_names: Sequence[str]
# Raw data read from derivatives.yaml.
output_differentiability: Optional[List[bool]]
# output_differentiability in derivatives.yaml can be a list of
# conditions that express if the output is differentiable. In this case,
# the number of conditions must match the number of outputs
# (NB: we only support one condition right now).
# output_differentiability gets populated with True for each condition,
# while output_differentiability_conditions gets populated with the conditions
output_differentiability_conditions: Optional[List[str]]
@property
def has_derivatives(self) -> bool:
return len(self.args_with_derivatives) > 0
# Generates a new DifferentiabilityInfo using the exact same set of derivative information,
# but with a new operator name.
# This is used when generating "copy" variants of view ops,
# which are able to use the exact same derivative formula as the original view op
# See Note [Codegen'd {view}_copy Operators]
def create_view_copy_from_view_derivative(
self, g: NativeFunctionsViewGroup
) -> Optional["DifferentiabilityInfo"]:
if g.view_copy is None:
return None
f = g.view_copy
name_split_by_period = self.name.split(".", maxsplit=2)
# Append a "_copy" to the base name of the operator (but keep the overload name the same)
view_copy_name = f"{name_split_by_period[0]}_copy." + ".".join(
name_split_by_period[1:]
)
view_copy_op_name = None if self.op is None else f"{self.op}_copy"
return DifferentiabilityInfo(
# Use the "_copy" version of name/func/op
name=view_copy_name,
func=f,
op=view_copy_op_name,
# But keep all derivative info the same
derivatives=self.derivatives,
forward_derivatives=self.forward_derivatives,
all_saved_inputs=self.all_saved_inputs,
all_saved_outputs=self.all_saved_outputs,
available_named_gradients=self.available_named_gradients,
used_named_gradients=self.used_named_gradients,
args_with_derivatives=self.args_with_derivatives,
non_differentiable_arg_names=self.non_differentiable_arg_names,
output_differentiability=self.output_differentiability,
output_differentiability_conditions=self.output_differentiability_conditions,
)
def uses_ident(info: Optional[DifferentiabilityInfo], ident: str) -> bool:
if info is None:
return False
for derivative in info.derivatives:
formula = derivative.formula
if re.search(IDENT_REGEX.format(ident), formula):
return True
return False
def uses_retain_variables(info: Optional[DifferentiabilityInfo]) -> bool:
return uses_ident(info, "retain_variables")
def uses_single_grad(info: Optional[DifferentiabilityInfo]) -> bool:
return uses_ident(info, "grad")
# Represents a differentiable `Argument`.
# How is it different from the `Argument` type?
# - It's processed Arguments which are differentiable and only used in the
# context of the autograd codegen;
# - It can represent SelfArgument or regular Argument but not TensorOptionsArgument;
@dataclass(frozen=True)
class DifferentiableInput:
name: str
type: Type
# TODO: only to keep it byte-for-byte compatible with the old codegen, should remove.
cpp_type: str
# Represents a differentiable `Return`.
# How it it different from the `Return` type?
# - The name in `Return` is optional. Here it is always populated using the same
# `cpp.return_names()` method.
# TODO: some cpp naming logic (e.g. resolving name conflict) might be irrelevant?
# - It's processed Returns which are differentiable, in compliance with the
# `output_differentiability` field defined in derivatives.yaml (if specified),
# and are only used in the context of the autograd codegen;
@dataclass(frozen=True)
class DifferentiableOutput:
name: str
type: Type
# TODO: only to keep it byte-for-byte compatible with the old codegen, should remove.
cpp_type: str
@dataclass(frozen=True)
class NativeFunctionWithDifferentiabilityInfo:
func: NativeFunction
info: Optional[Dict[str, DifferentiabilityInfo]]
fw_derivatives: Optional[Dict[str, Sequence[ForwardDerivative]]]
# TODO: Update comment below since it is out of date.
def dispatch_strategy(fn: NativeFunctionWithDifferentiabilityInfo) -> str:
"""How are we going to call the underlying implementation of a
declaration? There are two strategies:
- use_derived: we want to call the implementation on CPUDoubleType
(or a similar, derived Type instance). Because these derived
instances deal in Tensors, not Variables (it's a completely different
object, so it doesn't dispatch back to VariableType), code on
this dispatch path needs to wrap/unwrap tensors. If the
derived implementation takes and returns tensors, the
implementation is usually differentiable (although we also use
the derived dispatch path for non-differentiable functions
that we still want to dispatch on the derived Type instance;
e.g., size())
- use_type: we want to call the implementation on Type, because
it is implemented concretely, and the functions it invokes will
get dispatched back to VariableType (which will ensure that they
are differentiable.)
"""
# fn is derived as long as any of its per-key differentiability infos
# has_derivatives. dispatch_strategy() is used to guard generation of fns in VariableType
# and ADInplaceOrViewType. We want to generate these functions as long as a
# derivative is defined for ANY dispatch key.
if fn.func.is_abstract or (
fn.info is not None and any(info.has_derivatives for info in fn.info.values())
):
# If the function is abstract (not implemented on at::Type), we must
# call the implementation on the derived type with unpacked tensors.
# If the function has a derivative specified and is concrete, we could
# call either implementation. We prefer the calling the derived
# type's implementation with unpacked tensors because it is more
# performant in some cases: any internal calls to other ATen functions
# won't have the history tracked.
# If the function has a type dispatched argument (i.e. is a factory),
# we prefer calling the derived type's implementation both because it is
# more performant and to ensure factory functions return tensors with _version
# of 0 (probably not strictly necessary, but nice to have to keeps versions simple
# to understand.
return "use_derived"
else:
# If the function is concrete (we don't have to override it) and we
# didn't declare it in derivatives.yaml, we'll assume that it is
# actually implemented out of differentiable functions. (This
# assumption might not hold, but then you'll see gradcheck fail.)
return "use_type"
def match_differentiability_info(
native_functions: List[NativeFunction],
differentiability_infos: Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]],
) -> List[NativeFunctionWithDifferentiabilityInfo]:
"""Sets the "derivative" key on declarations to matching autograd function
In-place functions will use the out-of-place derivative definition if there
is no in-place specific derivative.
"""
functional_info_by_signature = {
schema.signature(strip_default=True): info_dict
for schema, info_dict in differentiability_infos.items()
if schema.kind() == SchemaKind.functional
}
non_functional_info_by_signature = {
schema.signature(strip_default=True): info_dict
for schema, info_dict in differentiability_infos.items()
if schema.kind() != SchemaKind.functional
}
def find_info(
f: NativeFunction,
) -> Tuple[Optional[Dict[str, DifferentiabilityInfo]], bool]:
# Don't bother matching info to generated out= variants
if "generated" in f.tags and f.func.kind() == SchemaKind.out:
return None, False
# (1) Check for an exact match
if f.func in differentiability_infos:
return differentiability_infos[f.func], True
# (2) If no exact match, check if the out-of-place variant
# of this operator has a match.
# i.e mul() for mul_() or mul_out()
f_sig = f.func.signature(strip_default=True)
if f_sig in functional_info_by_signature:
return functional_info_by_signature[f_sig], False
# (3) Some operators have a derivative explicitly defined for the mutable
# variant, but get a code-generated out-of-place variant which does *not*
# come with a derivative formula.
# For the generated out-of-place variant, use the mutable variant's formula
# if it exists.
if "generated" in f.tags and f_sig in non_functional_info_by_signature:
info_dict = non_functional_info_by_signature[f_sig]
# See https://github.com/pytorch/pytorch/pull/76320/files#r874816389
assert not any(
any("self" in str(inpt.nctype.name) for inpt in info.all_saved_inputs)
for info in info_dict.values()
), f"""\
Attempted to convert a derivative formula for a mutable operator
to be used by automatically by its functional variant ("{str(f.func)}").
this is not currently supported (we'd need to fix up the formula in the codegen)."""
return info_dict, False
return None, False
result: List[NativeFunctionWithDifferentiabilityInfo] = []
for f in native_functions:
info_dict, is_exact_match = find_info(f)
# Currently, the '.strides()' to 'strides_or_error' replacement does not support
# 'self' derivatives of an inplace function, so we must check for this case.
if f.func.kind() == SchemaKind.inplace and (info_dict is not None):
for info in info_dict.values():
for derivative in info.derivatives:
if "self" in derivative.var_names:
for saved_input in derivative.saved_inputs:
assert "strides_or_error" not in saved_input.expr, (
"Calling '.strides()' in the 'self' derivative formula of an "
f"in-place function is not supported: {f.func}"
)
if not info_dict:
result.append(
NativeFunctionWithDifferentiabilityInfo(
func=f, info=None, fw_derivatives=None
)
)
continue
fw_derivative_dict: Dict[str, Sequence[ForwardDerivative]] = dict()
for key, info in info_dict.items():
if not info.forward_derivatives:
fw_derivative_dict[key] = []
continue
forward_derivatives = info.forward_derivatives
# For functions that have a single def for out-of-place and inplace (like abs())
if f.func.kind() == SchemaKind.inplace:
# For inplace functions there is a little bit of work to do:
# 1) Validate the formula and make sure the input that is modified in not used:
# - If there is a formula for the inplace variant of the function (is_exact_match == True) then
# we make sure that the original value of the input that is being modified inplace (self_p) is
# not used in the formula. Note that the formula can use "original_self_p" here and that would
# trigger a clone of the original input.
# - If we are re-using the out of place formula (is_exact_match == False) then we replace every
# occurrence of self_p and self_t by original_self_p and original_self_t. These will be
# populated by cloned version of the original input (either the clone done by the backward AD
# logic if self is also used in a backward formula or a special clone that we add).
# 2) At this point, there cannot be a self_p in the formula.
# 3) Change "result" into "self_p" as by design, in the inplace function codegen, the result is
# simply called self (as it is modified inplace).
# 4) Update the required primals data in case it used to contain "result" but should now contain
# "self"
# 5) If it is not an exact match, the user formula is not modifying the existing forward grad
# inplace as it should. So add some code that makes sure that we do so if the forward grad
# already exists.
assert (
len(info.forward_derivatives) == 1
) # Only single output inplace should exist
fw_info = info.forward_derivatives[0]
formula = fw_info.formula
def replace_self_with_original_self(formula: str, postfix: str) -> str:
def repl(m: Match[str]) -> str:
return f"{m.group(1)}original_self{postfix}{m.group(2)}"
return re.sub(IDENT_REGEX.format(f"self{postfix}"), repl, formula)
if re.search(IDENT_REGEX.format("self_p"), formula):
if is_exact_match:
# For manually defined formulas, don't allow the original value to be used
raise RuntimeError(
f'The formula for "{f.func.name}" is using the original value of self '
"that is being modified inplace. This would lead to wrong forward gradients. "
'Please use "result" in the formula only.'
)
else:
# When the original formula is out of place, we save a clone of the primal
# value to be able to access this value if needed
# replace "self_p"/"self_t" from the formula by "original_self_p"/"original_self_t"
formula = replace_self_with_original_self(formula, "_p")
formula = replace_self_with_original_self(formula, "_t")
# replace "result" from the formula by "self_p"
def repl(m: Match[str]) -> str:
return f"{m.group(1)}self_p{m.group(2)}"
formula = re.sub(IDENT_REGEX.format("result"), repl, formula)
required_primals = fw_info.required_inputs_primal
if re.search(IDENT_REGEX.format("self_p"), formula):
required_primals = (
required_primals + ("self",) if required_primals else ("self",)
)
if not is_exact_match:
# NOTE [In-place forward AD formula Optimization]
#
# This optimization transforms the formula to directly do inplace, i.e.
# instead of self_t.copy_(self_t.op()) we do self_t.op_() when the following are met:
#
# 1) the formula satisfies the pattern: "self_t.op(*args)"
# 2) "op" in (1) needs to be the same as the op the derivative is for
#
# (2) may seem too strict, but currently the only ops that satisfy (1) also satisfy (2)
# If there is a need, we can relax (2) to allow any op that has an in-place variant
is_single_method_on_self_t = False
match = re.fullmatch(r"self_t.([\w]*)\((.*)\)", formula)
if match:
op_name, between_parens = match.group(1), match.group(2)
# We want to...
# Match: self_t.op1(other_p.op2(arg))
# Avoid: self_t.op1(args) + self_t.op2(args)
# Avoid: self_t.op1(other_p.op2(arg)) + self_t.op2(args)
def check_parens_nest_level_gt_zero(s: str) -> bool:
level = 1
for ch in s:
if ch == ")":
level -= 1
if level == 0:
return False
if ch == "(":
level += 1
return True
is_single_method_on_self_t = check_parens_nest_level_gt_zero(
between_parens
)
directly_do_inplace = (
is_single_method_on_self_t and op_name == info.name
)
if directly_do_inplace:
formula = f"self_t_raw.defined() ? self_t_raw.{op_name}_({between_parens}) : {formula}"
else:
# Make sure that the forward grad is modified inplace when the original formula
# is out of place
formula = f"self_t_raw.defined() ? self_t_raw.copy_({formula}) : {formula}"
required_original_self_value = bool(
re.search(IDENT_REGEX.format("original_self_p"), formula)
) or bool(re.search(IDENT_REGEX.format("original_self_t"), formula))
forward_derivatives = [
ForwardDerivative(
formula=formula,
var_names=("self",),
var_types=fw_info.var_types,
required_inputs_fw_grad=fw_info.required_inputs_fw_grad,
required_inputs_primal=required_primals,
required_original_self_value=required_original_self_value,
is_reusing_outplace_formula=not is_exact_match,
),
]
fw_derivative_dict[key] = forward_derivatives
result.append(
NativeFunctionWithDifferentiabilityInfo(
func=f, info=info_dict, fw_derivatives=fw_derivative_dict
)
)
return result
def is_differentiable(
name: str, type: Type, info: Optional[DifferentiabilityInfo]
) -> bool:
return type.is_tensor_like() and (
info is None or name not in info.non_differentiable_arg_names
)
def gen_differentiable_outputs(
fn: NativeFunctionWithDifferentiabilityInfo, key: str = "Default"
) -> List[DifferentiableOutput]:
f = fn.func
info = fn.info[key] if fn.info else None
outputs: List[DifferentiableOutput] = [
DifferentiableOutput(
name=name, type=ret.type, cpp_type=cpp.return_type(ret).cpp_type()
)
for name, ret in zip(cpp.return_names(f), f.func.returns)
]
output_differentiability = info.output_differentiability if info else None
if output_differentiability is not None:
if len(output_differentiability) != len(outputs):
raise RuntimeError(
f"The length of output_differentiability ({len(output_differentiability)}), "
f"does not match the number of outputs ({len(outputs)})."
)
differentiable_outputs: List[DifferentiableOutput] = []
if False in output_differentiability and f.func.kind() == SchemaKind.inplace:
raise RuntimeError(
"output_differentiability=False for inplace operation (version_counter won't get updated)"
)
for differentiable, output in zip(output_differentiability, outputs):
if differentiable:
differentiable_outputs.append(output)
return differentiable_outputs
candidate_differentiable_outputs = list(
filter(lambda r: is_differentiable(r.name, r.type, info), outputs)
)
if uses_single_grad(info):
return candidate_differentiable_outputs[:1]
else:
return candidate_differentiable_outputs
| pytorch-master | torchgen/api/autograd.py |
from typing import List, Union
from torchgen.api import cpp
from torchgen.api.types import (
ArgName,
ArrayRefCType,
BaseCType,
Binding,
ConstRefCType,
dimnameListT,
intArrayRefT,
iOptTensorListRefT,
iTensorListRefT,
NamedCType,
OptionalCType,
optionalIntArrayRefT,
optionalScalarRefT,
optionalTensorRefT,
scalarT,
tensorT,
)
from torchgen.model import (
Argument,
BaseTy,
BaseType,
ListType,
NativeFunctionsGroup,
OptionalType,
SelfArgument,
TensorOptionsArguments,
Type,
)
from torchgen.utils import assert_never
# This file describes the translation of JIT schema to the structured functions API.
# This is similar to native API, but a number of historical problems with native
# API have been fixed.
# Translation of types occuring in JIT arguments to a C++ argument type.
# NB: For now, mutable doesn't do anything; but it could if we make
# some more nominal types
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
# If it's a value type, do the value type translation
r = cpp.valuetype_type(t, binds=binds)
if r is not None:
return r
if isinstance(t, BaseType):
if t.name == BaseTy.Tensor:
return NamedCType(binds, ConstRefCType(BaseCType(tensorT)))
elif t.name == BaseTy.Scalar:
return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
else:
raise AssertionError(f"base type should have been value type {t}")
elif isinstance(t, OptionalType):
if t.elem == BaseType(BaseTy.Tensor):
return NamedCType(binds, BaseCType(optionalTensorRefT))
elif t.elem == BaseType(BaseTy.Scalar):
return NamedCType(binds, BaseCType(optionalScalarRefT))
elif isinstance(t.elem, ListType) and str(t.elem.elem) == "int":
return NamedCType(binds, BaseCType(optionalIntArrayRefT))
elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
return NamedCType(binds, OptionalCType(elem.type))
elif isinstance(t, ListType):
if t.elem == BaseType(BaseTy.Tensor):
return NamedCType(binds, BaseCType(iTensorListRefT))
elif t.elem == OptionalType(BaseType(BaseTy.Tensor)):
return NamedCType(binds, BaseCType(iOptTensorListRefT))
# TODO: delete these special cases; see torchgen.api.cpp--these
# must be changed in tandem, but there are problems; see
# https://github.com/pytorch/pytorch/pull/51485
elif str(t.elem) == "int":
return NamedCType(binds, BaseCType(intArrayRefT))
elif str(t.elem) == "Dimname":
return NamedCType(binds, BaseCType(dimnameListT))
elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
return NamedCType(binds, ArrayRefCType(elem.type))
else:
raise AssertionError(f"unrecognized type {repr(t)}")
def argument_type(a: Argument, *, binds: ArgName) -> NamedCType:
return argumenttype_type(a.type, mutable=a.is_write, binds=binds)
# returns_type intentionally omitted, because structured kernels never "return";
# instead, they always indirectly report their outputs (in the case of a meta
# function, by calling set_output; in the case of an impl function, by writing
# directly into the provided out argument).
# Structured kernels are never defaulted
def argument(a: Union[Argument, SelfArgument, TensorOptionsArguments]) -> List[Binding]:
if isinstance(a, Argument):
return [
Binding(
nctype=argument_type(a, binds=a.name),
name=a.name,
default=None,
argument=a,
)
]
elif isinstance(a, SelfArgument):
return argument(a.argument)
elif isinstance(a, TensorOptionsArguments):
raise AssertionError("structured kernels don't support TensorOptions yet")
else:
assert_never(a)
def impl_arguments(g: NativeFunctionsGroup) -> List[Binding]:
args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
if g.out.precomputed:
# A list of parameters for the impl function with
# certain parameters replaced with precomputed counterparts
# as specified in native_functions.yaml.
non_out_args_replaced: List[
Union[Argument, TensorOptionsArguments, SelfArgument]
] = []
for a in g.out.func.arguments.non_out:
if isinstance(a, Argument) and a.name in g.out.precomputed.replace:
# If a is in precompute.replace, append the parameters
# that should replace it onto non_out_args_replaced.
for replacement in g.out.precomputed.replace[a.name]:
non_out_args_replaced.append(replacement)
else:
# If not, push a as it is.
non_out_args_replaced.append(a)
args.extend(non_out_args_replaced)
# g.out.precomputed.add is the list of parameters that are added
# without replacement after the non out args and just before the out args
args.extend(g.out.precomputed.add)
else:
args.extend(g.out.func.arguments.non_out)
args.extend(g.out.func.arguments.out)
return [r for arg in args for r in argument(arg)]
def meta_arguments(g: NativeFunctionsGroup) -> List[Binding]:
args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
args.extend(g.functional.func.arguments.non_out)
return [r for arg in args for r in argument(arg)]
def out_arguments(g: NativeFunctionsGroup) -> List[Binding]:
args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
args.extend(g.out.func.arguments.out)
return [r for arg in args for r in argument(arg)]
| pytorch-master | torchgen/api/structured.py |
from dataclasses import dataclass
from typing import Dict, List, Optional, Sequence, Set, Tuple, Union
from torchgen.api import cpp
from torchgen.api.types import Binding, CppSignature, CppSignatureGroup
from torchgen.gen import pythonify_default
from torchgen.model import (
Argument,
BaseTy,
BaseType,
FunctionSchema,
ListType,
NativeFunction,
OptionalType,
Return,
Type,
Variant,
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Data Models
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# [Notes] python binding codegen
#
# The Python binding codegen produces code that takes the input list of
# PyObjects, finds the matching ATen C++ function using PythonArgParser,
# converts the PyObjects into C++ types and calls the ATen C++ function:
#
# +--------+ parsing +------------------------+ binding +-----------------------+
# | PyObjs | ---------> | PythonArgParser Output | ---------> | Cpp Function Dispatch |
# +--------+ +------------------------+ +-----------------------+
#
# The following examples demonstrate the data models the Python binding
# codegen needs to deal with and the tasks it needs to accomplish. It
# helps understand the purpose of the new data types we introduced below.
#
# - Function Schema (source of truth)
#
# aten::empty.names(int[] size, *, Dimname[]? names,
# ScalarType? dtype=None, Layout? layout=None,
# Device? device=None, bool? pin_memory=None,
# MemoryFormat? memory_format=None) -> Tensor
#
# - Python Signature
#
# It's used to generate input schema string for PythonArgParser.
# Note: TensorOptions fields are reordered and the additional
# 'requires_grad' field is added:
#
# empty(IntArrayRef size, *, DimnameList? names,
# MemoryFormat? memory_format=None, ScalarType dtype=None,
# Layout layout=torch.strided, Device device=None,
# bool pin_memory=False, bool requires_grad=False)
#
# - C++ Signature
#
# It's used to generate C++ lambda formals & dispatch call.
# Note: the scattered TensorOptions fields are packed into 'options'.
#
# auto dispatch_empty =
# [](IntArrayRef size, c10::optional<DimnameList> names,
# const TensorOptions & options,
# c10::optional<MemoryFormat> memory_format) -> Tensor {
# pybind11::gil_scoped_release no_gil;
# return torch::empty(size, names, options, memory_format);
# };
#
# - Binding between Python Arguments and C++ Arguments
#
# Given a set of Python Arguments in scope, we need produce the
# binding expressions that translate the Python API into C++ API:
#
# Python Args Cpp Args Binding Exprs
# -----------------------------------------------------------------
# 0: size size '_r.intlist(0)'
# 1: names names 'names' [special init]
# 2: memory_format -------+
# 3: dtype -----+-|--> options 'options' [special packing]
# 4: layout / |
# 5: device / +--> memory_format '_r.memoryformatOptional(2)'
# 6: pin_memory /
# 7: requires_grad -+
#
# So the full dispatch expression would look like:
#
# dispatch_empty(_r.intlist(0), names, options,
# _r.memoryformatOptional(2))
#
# Where does 'names' come from? It involves special local init:
#
# auto __names = _r.toDimnameListOptional(1);
# c10::optional<DimnameList> names =
# __names ? c10::make_optional(DimnameList(__names.value()))
# : c10::nullopt;
#
# Where does 'options' come from? It involves special local init
# for TensorOptions. Note that Python side has the additional
# 'requires_grad' field:
#
# const auto options = TensorOptions()
# .dtype(_r.scalartype(3))
# .device(_r.device(5))
# .layout(_r.layoutOptional(4))
# .requires_grad(_r.toBool(7))
# .pinned_memory(_r.toBool(6));
#
# In some other cases one Python Argument can map to multiple C++
# Arguments. For example:
#
# aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False)
# -> (Tensor values, Tensor indices)
#
# Python Args Cpp Args Binding Exprs
# ---------------------------------------------------------------------
# +----> max 'out[0]'
# /-----> max_values 'out[1]
# 0: input / self '_r.tensor(0)'
# 1: dim / dim '_r.dimname(1)'
# 2: keepdim / keepdim '_r.toBool(2)'
# 3: out -----+ [local init] out '_r.tensorlist_n<2>(3)'
#
# As demonstrated above, the binding can involve reordering,
# packing, unpacking and special local inits.
#
#
# Let's look at a concrete example:
#
# static PythonArgParser parser({
# "abs(Tensor input, *, Tensor out=None)",
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ^
# +--- Python Schema, represented by PythonSignature and PythonArgument
#
# }, /*traceable=*/true);
#
# ParsedArgs<2> parsed_args;
# auto _r = parser.parse(nullptr, args, kwargs, parsed_args);
#
# ...
#
# if (_r.isNone(1)) {
# ~~~~~~~~~~~~ <--- Scattered PythonArgParser output (arg name = 'out')
# represented by PythonArgParserOutputExpr
#
# // aten::abs(Tensor self) -> Tensor
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ^
# +--- NativeFunction schema, base version
#
# auto dispatch_abs = [](const Tensor & self) -> Tensor {
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ^
# +--- dispatch_lambda_args / dispatch_lambda_return_str
# generated from NativeFunction / CppSignature
# (deprecated PythonSignature is special)
# arguments are represented by DispatchLambdaArgument
#
# pybind11::gil_scoped_release no_gil;
# return self.abs();
# ~~~~~~~~~~~ <--- cpp_dispatch_target / cpp_dispatch_exprs
# generated from NativeFunction / CppSignature
# };
# return wrap(dispatch_abs(_r.tensor(0)));
# ~~~~~~~~~~~~~
# ^
# +--- dispatch_lambda_exprs
# binding PythonArgParserOutputExpr (python args)
# and DispatchLambdaArgument (c++ args)
#
# } else {
# // aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ^
# +--- NativeFunction schema, out-variant
#
# auto dispatch_abs_out = [](Tensor out, const Tensor & self) -> Tensor {
# pybind11::gil_scoped_release no_gil;
# return at::abs_out(out, self);
# };
# return wrap(dispatch_abs_out(_r.tensor(1), _r.tensor(0)));
# }
#
#
# [Notes] python interface codegen
# The python dataclasses below are used used to generate both python binding code
# and pyi type hint signatures.
# In theory these two should look very similar, but there are number of differences
# in how pyi signatures vs. python_arg_parser signatures are generated.
# These differences have been encapsulated in signature_str() vs. signature_str_pyi()
# to display the full signatures, and argument_str() vs argument_str_pyi() to display arguments.
# For examples, only pyi signatures include return types.
@dataclass(frozen=True)
class PythonReturns:
returns: Tuple[Return, ...]
@dataclass(frozen=True)
class PythonArgument:
name: str
type: Type
default: Optional[str]
# Used to generate the default init expr for some PythonArgParser outputs, e.g.:
#
# _r.layoutWithDefault(3, layout_from_backend(self.options().backend())))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ^
# +--- default_init str
default_init: Optional[str]
# Compute argument formal for python argument parsing.
# Needs to be consistent with torch/csrc/utils/python_arg_parser.h.
def argument_str(self, *, method: bool = False) -> str:
type_str = argument_type_str(self.type).replace("const ", "").replace(" &", "")
name = self.name
# s/self/input/ outside method bindings
# [old codegen] TODO: remove this? doesn't rename in codegen, it's just
# for the parse string
if name == "self" and type_str == "Tensor" and not method:
name = "input"
# add default
if self.default is not None:
default = {
"nullptr": "None",
"c10::nullopt": "None",
"{}": "None",
}.get(self.default, self.default)
return f"{type_str} {name}={default}"
else:
return f"{type_str} {name}"
def argument_str_pyi(
self, *, method: bool = False, deprecated: bool = False
) -> str:
type_str = argument_type_str_pyi(self.type)
name = self.name
# s/self/input/ outside method bindings
# [old codegen] TODO: remove this? doesn't rename in codegen, it's just
# for the parse string
if name == "self" and type_str == "Tensor" and not method and not deprecated:
name = "input"
if name == "from": # from is a Python keyword...
name += "_"
# pyi merges the _out and functional variants into the same signature, with an optional out arg
if name == "out" and type_str == "Tensor" and not deprecated:
type_str = "Optional[" + type_str + "]"
# pyi deprecated signatures don't get defaults for their out arg
treat_as_no_default = (
deprecated
and isinstance(self, PythonOutArgument)
and self.default == "None"
)
# add default
if self.default is not None and not treat_as_no_default:
if (
isinstance(self.type, ListType)
and self.type.elem == BaseType(BaseTy.int)
and self.default.startswith("{")
and self.default.endswith("}")
):
default = "(" + self.default[1:-1] + ")"
else:
default = {
"nullptr": "None",
"c10::nullopt": "None",
"{}": "None",
"MemoryFormat::Contiguous": "contiguous_format",
"QScheme::PER_TENSOR_AFFINE": "per_tensor_affine",
}.get(self.default, self.default)
return f"{name}: {type_str}={default}"
else:
return f"{name}: {type_str}"
@dataclass(frozen=True)
class PythonOutArgument(PythonArgument):
# In Python signature multiple output fields are packed into one 'out' argument.
# When binding to C++, it's first binded to a local 'out' variable:
# 'auto out = _r.tensorlist_n<2>(2);',
# then binded to scattered C++ output arguments as 'out[0]', 'out[1]', and etc.
# TODO: maybe don't need keep scattered out fields for python signature?
outputs: Tuple[PythonArgument, ...]
@staticmethod
def from_outputs(
outputs: Tuple[PythonArgument, ...]
) -> Optional["PythonOutArgument"]:
if not outputs:
return None
size = len(outputs)
if size == 1:
return PythonOutArgument(
name=outputs[0].name,
type=outputs[0].type,
default="None",
default_init=None,
outputs=outputs,
)
elif size > 1:
if any(map(lambda a: not a.type.is_tensor_like(), outputs)):
raise RuntimeError(f"Unsupported output type: {outputs}")
return PythonOutArgument(
name="out",
# TODO: shouldn't this be OptionalType[ListType[...]], since it defaults to None?
type=ListType(BaseType(BaseTy.Tensor), size),
default="None",
default_init=None,
outputs=outputs,
)
raise AssertionError(r"Unexpected PythonOutArgument size")
@dataclass(frozen=True)
class PythonSignature:
# Base operator name, without inplace/outplace suffix.
name: str
# Positional arguments.
# TODO: create a dedicated SelfArgument type for 'self'?
input_args: Tuple[PythonArgument, ...]
# Keyword arguments excluding the 'out' argument and scattered kwargs belonging
# to TensorOptions (dtype, layout, device, pin_memory, requires_grad, etc).
input_kwargs: Tuple[PythonArgument, ...]
output_args: Optional[PythonOutArgument]
# Return types, which are only used by pyi
returns: PythonReturns
# These are scattered kwargs arguments belonging to TensorOptions.
# When binding to C++, they are packed into a TensorOptions object 'options'.
# It's possible that the C++ signature doesn't take TensorOptions object (e.g.
# for out variant), in which case they will be used as scattered fields without
# being packed into 'options'.
# TODO: maybe create a PythonTensorOptionsArgument?
tensor_options_args: Tuple[PythonArgument, ...]
# method or function signature?
method: bool
@property
def deprecated(self) -> bool:
return False
def arguments(
self, *, skip_outputs: bool = False, skip_tensor_options: bool = False
) -> Tuple[Union[PythonArgument, PythonOutArgument], ...]:
result: List[Union[PythonArgument, PythonOutArgument]] = []
result.extend(self.input_args)
result.extend(self.input_kwargs)
if self.output_args is not None and not skip_outputs:
result.append(self.output_args)
if not skip_tensor_options:
result.extend(self.tensor_options_args)
return tuple(result)
def arguments_count(self) -> int:
return len(self.arguments())
def output_idx(self) -> int:
return len(self.input_args) + len(self.input_kwargs)
# [old codegen] Compute the Python function signature for argument parsing,
# as specified in torch/csrc/utils/python_arg_parser.h. WARNING:
# this is NOT the same type signature as specified by PEP 484
# as understood by mypy; our format was independently developed
# and has some quirks to make it more suitable specifically
# for error parsing.
#
# For a translation to mypy-valid type signatures, see
# signature_str_pyi().
def signature_str(self, *, skip_outputs: bool = False) -> str:
args = self.arguments(skip_outputs=skip_outputs)
schema_formals: List[str] = list(
map(lambda a: a.argument_str(method=self.method), args)
)
positional_argc = len(self.input_args)
if len(schema_formals) > positional_argc:
schema_formals.insert(positional_argc, "*")
return f'{self.name}({", ".join(schema_formals)})'
def signature_str_pyi(self, *, skip_outputs: bool = False) -> str:
args = self.arguments(skip_outputs=skip_outputs)
schema_formals: List[str] = list(
map(lambda a: a.argument_str_pyi(method=self.method), args)
)
positional_argc = len(self.input_args)
if len(schema_formals) > positional_argc:
schema_formals.insert(positional_argc, "*")
# only pyi signatures include returns
returns_str = returns_str_pyi(self)
# pyi also includes self (with no typing/defaults) for methods
if self.method:
schema_formals.insert(0, "self")
return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
def signature_str_pyi_vararg(self, *, skip_outputs: bool = False) -> Optional[str]:
# only pyi uses vararg signatures
args = self.arguments(skip_outputs=skip_outputs)
schema_formals: List[str] = list(
map(lambda a: a.argument_str_pyi(method=self.method), args)
)
# vararg only applies to pyi signatures. vararg variants are not generated for all signatures
num_args = self.arguments_count()
num_positionalargs = len(self.input_args)
have_vararg_version = False
if num_args > 0:
vararg_type = args[0].type
if (
isinstance(vararg_type, ListType)
and str(vararg_type.elem) == "int"
and num_positionalargs == 1
):
have_vararg_version = True
if not have_vararg_version:
return None
# Below are the major changes in vararg vs. regular pyi signatures
# vararg signatures also omit the asterix
schema_formals[0] = "*" + args[0].name + ": _int"
returns_str = returns_str_pyi(self)
# pyi also includes self (with no typing/defaults) for methods
if self.method:
schema_formals.insert(0, "self")
return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
# The deprecated python signature involves some special logic, so create a
# dedicated data model to store these extra properties.
@dataclass(frozen=True)
class PythonSignatureDeprecated(PythonSignature):
# Schema for the deprecated function
deprecated_schema: FunctionSchema
# The deprecated signature might miss some arguments that the corresponding
# C++ signature expects. We need store the constant default values to pass in.
# For example:
# [deprecate signature]: addmm(Scalar beta, Tensor self, Tensor mat1, Tensor mat2)
# [func schema]: aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
# [func call]: self.addmm(mat1, mat2, beta, 1)
# We store ['self', 'mat1', 'mat2', 'beta', '1'] in this case.
deprecated_args_exprs: Tuple[str, ...]
@property
def deprecated(self) -> bool:
return True
def signature_str(self, *, skip_outputs: bool = False) -> str:
return (
PythonSignature.signature_str(self, skip_outputs=skip_outputs)
+ "|deprecated"
)
def signature_str_pyi(self, *, skip_outputs: bool = False) -> str:
args = self.arguments(skip_outputs=skip_outputs)
schema_formals: List[str] = list(
map(lambda a: a.argument_str_pyi(method=self.method, deprecated=True), args)
)
positional_argc = len(self.input_args)
if len(schema_formals) > positional_argc:
schema_formals.insert(positional_argc, "*")
returns_str = returns_str_pyi(self)
return f'def {self.name}({", ".join(schema_formals)}) -> {returns_str}: ...'
def signature_str_pyi_vararg(self, *, skip_outputs: bool = False) -> Optional[str]:
# the codegen doesn't include vararg variants for deprecated signatures
return None
# This struct is used to hold the PythonSignature and its corresponding
# NativeFunction BEFORE grouping base and out-variant functions.
# Why not store NativeFunction in PythonSignature or construct PythonSignature
# from NativeFunction? Because they are not 1-1 mapped.
# One native function could have both deprecated and non-deprecated python
# signatures - NativeFunction doesn't contain information to construct the
# deprecated python signature.
# One python signature is used to handle both the base and the out-variant
# function - see 'PythonSignatureGroup'.
@dataclass(frozen=True)
class PythonSignatureNativeFunctionPair:
signature: PythonSignature
function: NativeFunction
# We merge pairs of functions with signatures that are equivalent mod
# output arguments, and use a single entry in the python_arg_parser sig
# list for both (output arguments become optional).
@dataclass(frozen=True)
class PythonSignatureGroup:
# The signature used for Python argument parsing. The outplace signature
# is preferred if exists, because it can be used to parse inputs for both
# the out-place variant and the base version (with output omitted).
signature: PythonSignature
# The regular ATen declaration (e.g. conv2d)
base: NativeFunction
# The out variant (e.g. conv2d_out)
outplace: Optional[NativeFunction]
@classmethod
def from_pairs(
cls,
functional: PythonSignatureNativeFunctionPair,
out: Optional[PythonSignatureNativeFunctionPair],
) -> "PythonSignatureGroup":
if out is None:
return PythonSignatureGroup(
signature=functional.signature,
base=functional.function,
outplace=None,
)
# prefer the signature with optional out=... arguments because it's the
# superset that can be used to parse input for both base and outplace.
signature_kwargs = out.signature.__dict__.copy()
# Out overloads in C++ don't have TensorOptions arguments,
# so take these from the functional variant
signature_kwargs[
"tensor_options_args"
] = functional.signature.tensor_options_args
return PythonSignatureGroup(
signature=type(out.signature)(**signature_kwargs),
base=functional.function,
outplace=out.function,
)
# C++ function dispatch is wrapped in a lambda function. The lambda function
# has almost the same signature as the C++ function, only with some small
# variants - see details below.
# This data model is used to represent arguments of the lambda function
# signature.
@dataclass(frozen=True)
class DispatchLambdaArgument:
name: str
type_str: str
is_out_arg: bool
# To pass PyObjects arguments to C++ function (via the lambda wrapper),
# we need first convert PyObjects into simple C++ objects. This work
# is done by PythonArgParser.
# This data model is used to represent the output of PythonArgParser.
# It has 1-1 mapping with PythonArgument in PythonSignature.
@dataclass(frozen=True)
class PythonArgParserOutputExpr:
# argument name
name: str
# RHS expression to reference PythonArgParser output.
expr: str
# In some special cases we need create different expr, e.g.:
# '_r.isNone(1)' instead of '_r.tensor(1)'.
index: int
# The python argument it maps to.
argument: PythonArgument
@property
def is_none_expr(self) -> str:
return f"_r.isNone({self.index})"
# To pass PythonArgParser output to the lambda wrapper, we need bind
# PythonArgParserOutputExpr to DispatchLambdaArgument.
# They are not always 1-1 mapped, e.g. scattered TensorOptions fields
# need be packed into a TensorOptions object, which is the argument
# that the lambda function wrapper takes.
@dataclass(frozen=True)
class DispatchLambdaArgumentExprs:
# The exprs that provide the binding for lambda arguments, e.g.:
#
# 'self' -> '_r.tensor(0)'
# 'min' -> 'out[0]' / 'min_indices' -> 'out[1]'
# 'options' -> 'options'
#
# It has 1-1 mapping with DispatchLambdaArgument.
exprs: Sequence[str]
# Special local inits, which might introduce new variables that
# the 'exprs' above reference, e.g.:
#
# 'auto out = _r.tensorlist_n<2>(2);'
#
inits: Sequence[str]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Helper Functions
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def _cpp_signature(f: NativeFunction, *, method: bool = False) -> CppSignature:
return CppSignatureGroup.from_native_function(f, method=method).signature
def has_tensor_options(f: NativeFunction) -> bool:
return f.func.arguments.tensor_options is not None
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Python Signature
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# 'simple_type' was introduced by the old codegen, which is slightly
# different from the python schema type, e.g.: doesn't have '?' suffix
# for optional Tensor/TensorList; doesn't have '[size]' suffix for list type.
def argument_type_str(t: Type, *, simple_type: bool = False) -> str:
if isinstance(t, BaseType):
if t.name == BaseTy.Tensor:
return "Tensor"
elif t.name == BaseTy.int:
return "int64_t"
elif t.name == BaseTy.float:
return "double"
elif t.name == BaseTy.str:
return "c10::string_view"
elif t.name in [
BaseTy.bool,
BaseTy.QScheme,
BaseTy.Scalar,
BaseTy.ScalarType,
BaseTy.Generator,
BaseTy.Storage,
BaseTy.Layout,
BaseTy.Device,
BaseTy.MemoryFormat,
BaseTy.Dimname,
BaseTy.Stream,
BaseTy.ConstQuantizerPtr,
BaseTy.SymInt,
]:
# These python schema type names line up with their function schema names
return t.name.name
elif isinstance(t, OptionalType):
if str(t.elem) == "Tensor":
# Is it desired to keep '?' for simple_type with new style dispatcher?
return "Tensor?"
elem = argument_type_str(t.elem, simple_type=simple_type)
return f"{elem}?"
elif isinstance(t, ListType):
size = t.size if not simple_type else None
if str(t.elem) == "bool":
assert t.size is not None
return f"::std::array<bool,{t.size}>"
elif str(t.elem) == "int":
return f"IntArrayRef[{size}]" if size is not None else "IntArrayRef"
elif str(t.elem) == "SymInt":
return f"SymIntArrayRef[{size}]" if size is not None else "SymIntArrayRef"
elif str(t.elem) == "Tensor":
return f"TensorList[{size}]" if size is not None else "TensorList"
elif str(t.elem) == "Scalar":
return f"ScalarList[{size}]" if size is not None else "ScalarList"
elif str(t.elem) == "Tensor?":
if simple_type:
return "c10::List<c10::optional<Tensor>>"
else:
return "const c10::List<c10::optional<Tensor>> &"
elif str(t.elem) == "Dimname":
return f"DimnameList[{size}]" if size is not None else "DimnameList"
elem = argument_type_str(t.elem, simple_type=simple_type)
return f"ArrayRef<{elem}>"
raise RuntimeError(f"unrecognized type {repr(t)}")
def argument_type_size(t: Type) -> Optional[int]:
l = t.is_list_like()
if l is not None and str(l.elem) != "bool":
return l.size
else:
return None
def argument(a: Argument) -> PythonArgument:
return PythonArgument(
name=a.name,
type=a.type,
# TODO: directly translate a.default to python default
default=str(pythonify_default(cpp.default_expr(a.default, a.type)))
if a.default is not None
else None,
default_init=None,
)
# Generates a PythonSignature that can be used for either .pyi or PythonArgParser codegen
def signature(
f: NativeFunction, *, method: bool = False, pyi: bool = False
) -> PythonSignature:
return signature_from_schema(
f.func, category_override=f.category_override, method=method, pyi=pyi
)
def signature_from_schema(
func: FunctionSchema,
*,
category_override: Optional[str],
method: bool = False,
pyi: bool = False,
) -> PythonSignature:
args: List[Argument] = []
args.extend(func.arguments.pre_self_positional)
# Skip SelfArgument if this is method.
if not method and func.arguments.self_arg is not None:
args.append(func.arguments.self_arg.argument)
args.extend(func.arguments.post_self_positional)
args.extend(func.arguments.pre_tensor_options_kwarg_only)
# Skip TensorOptionsArguments. Python side TensorOptions
# arguments are created based on different rules - see below.
args.extend(func.arguments.post_tensor_options_kwarg_only)
args.extend(func.arguments.out)
input_arg_set = set(a.name for a in func.arguments.flat_positional)
kwarg_only_set = set(a.name for a in func.arguments.flat_kwarg_only)
out_arg_set = set(a.name for a in func.arguments.out)
input_args = tuple(map(argument, filter(lambda a: a.name in input_arg_set, args)))
input_kwargs = tuple(
map(argument, filter(lambda a: a.name in kwarg_only_set, args))
)
outputs = tuple(map(argument, filter(lambda a: a.name in out_arg_set, args)))
# Reintroduce the scattered fields of TensorOptions for Python.
# Compared to the cpp counterpart, the python arguments have new property
# (default_init) and a new argument 'requires_grad', which require some
# special handlings.
# [old codegen] TODO: because these aren't guaranteed to be 100% faithful
# to the original versions in the yaml, this recreation is a potential
# source of drift between eager and JIT. Pull this logic out to a shared place.
has_tensor_input_arg = any(
a.type.is_tensor_like() for a in func.arguments.flat_non_out
)
if any(a.name == "requires_grad" for a in func.schema_order_arguments()):
raise ValueError(
"argument named requires_grad is reserved, should not explicitly add it in the schema"
)
# [old codegen] this probably won't work if one of the returns is not a tensor,
# but it will produce a compile-time error that is obvious.
has_tensor_return = any(r.type.is_tensor_like() for r in func.returns)
name: str = cpp.name(func)
is_factory_function = category_override == "factory" or (
has_tensor_return and not has_tensor_input_arg
)
is_like_or_new_function = (
category_override in ("new", "like")
or name.startswith("new_")
or name.endswith("_like")
)
tensor_options_args: List[PythonArgument] = []
if is_factory_function or is_like_or_new_function:
def topt_default_init(name: str) -> Optional[str]:
topt_args = func.arguments.tensor_options
if topt_args is None:
return None
a = getattr(topt_args, name)
if a.default is None or a.default == "None":
return None
return cpp.default_expr(a.default, a.type)
tensor_options_args.append(
PythonArgument(
name="dtype",
type=OptionalType(BaseType(BaseTy.ScalarType)),
default="None",
default_init=(
"self.scalar_type()"
if is_like_or_new_function
else topt_default_init("dtype")
),
)
)
tensor_options_args.append(
PythonArgument(
name="layout",
type=OptionalType(BaseType(BaseTy.Layout)),
default="None",
default_init=(
"self.layout()"
if is_like_or_new_function
else topt_default_init("layout")
),
)
)
tensor_options_args.append(
PythonArgument(
name="device",
type=OptionalType(BaseType(BaseTy.Device)),
default="None",
default_init=(
"self.device()"
if is_like_or_new_function
else (
topt_default_init("device")
or "torch::tensors::get_default_device()"
)
),
)
)
tensor_options_args.append(
PythonArgument(
name="pin_memory",
type=OptionalType(BaseType(BaseTy.bool)),
default="False",
default_init=None,
)
)
tensor_options_args.append(
PythonArgument(
name="requires_grad",
type=OptionalType(BaseType(BaseTy.bool)),
default="False",
default_init=None,
)
)
returns = PythonReturns(returns=func.returns)
return PythonSignature(
name=str(func.name.name),
input_args=input_args,
input_kwargs=input_kwargs,
output_args=PythonOutArgument.from_outputs(outputs),
tensor_options_args=tuple(tensor_options_args),
returns=returns,
method=method,
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Python Interface
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def namedtuple_fieldnames(returns: Tuple[Return, ...]) -> List[str]:
if len(returns) <= 1 or all(map(lambda r: r.name is None, returns)):
return []
else:
if any(map(lambda r: r.name is None, returns)):
# When building on Windows, `PyStructSequence_UnnamedField` could not be
# resolved by the linker for some reason, which cause error in building:
#
# python_nn_functions.cpp.obj : error LNK2001: unresolved external symbol
# PyStructSequence_UnnamedField
#
# Thus, at this point in time, we do not support unnamed
# fields in namedtuple; you must either name all fields,
# or none of them.
raise ValueError("Unnamed field is not supported by codegen")
return list(map(lambda r: str(r.name), returns))
def argument_type_str_pyi(t: Type) -> str:
add_optional = False
if isinstance(t, OptionalType):
t = t.elem
add_optional = True
if isinstance(t, BaseType):
if t.name == BaseTy.int:
ret = "_int"
if t.name == BaseTy.SymInt:
ret = "SymInt"
elif t.name == BaseTy.float:
ret = "_float"
elif t.name == BaseTy.str:
ret = "str"
elif t.name == BaseTy.Scalar:
ret = "Number"
elif t.name == BaseTy.ScalarType:
ret = "_dtype"
elif t.name == BaseTy.bool:
ret = "_bool"
elif t.name == BaseTy.QScheme:
ret = "_qscheme"
elif t.name == BaseTy.Layout:
ret = "_layout"
elif t.name == BaseTy.Device:
ret = "Union[_device, str, None]"
elif t.name == BaseTy.MemoryFormat:
ret = "memory_format"
elif t.name == BaseTy.Dimname:
ret = "Union[str, ellipsis, None]"
elif t.name in [BaseTy.Tensor, BaseTy.Generator, BaseTy.Storage, BaseTy.Stream]:
# These python schema type names line up with their function schema names
ret = t.name.name
elif isinstance(t, ListType):
if str(t.elem) == "int":
ret = "Union[_int, _size]" if t.size is not None else "_size"
elif t.is_tensor_like():
# TODO: this doesn't seem right...
# Tensor?[] currently translates to Optional[Union[Tuple[Tensor, ...], List[Tensor]]]
# It should probably translate to Union[Tuple[Optional[Tensor], ...], List[Optional[Tensor]]]
if isinstance(t.elem, OptionalType):
add_optional = True
ret = (
"Union[Tensor, Tuple[Tensor, ...], List[Tensor]]"
if t.size is not None
else "Union[Tuple[Tensor, ...], List[Tensor]]"
)
elif str(t.elem) == "float":
ret = "Sequence[_float]"
else:
elem = argument_type_str_pyi(t.elem)
ret = f"Sequence[{elem}]"
if add_optional:
ret = "Optional[" + ret + "]"
return ret
raise RuntimeError(f"unrecognized type {repr(t)}")
def return_type_str_pyi(t: Type) -> str:
# Where arguments are open to accepting Union, return types should return
# concrete types
if isinstance(t, OptionalType):
inner = return_type_str_pyi(t.elem)
return f"Optional[{inner}]"
if isinstance(t, BaseType):
if t.name == BaseTy.Device:
return "_device"
elif t.name == BaseTy.Dimname:
ret = "Optional[str]"
else:
return argument_type_str_pyi(t)
if isinstance(t, ListType):
inner = return_type_str_pyi(t.elem)
return f"List[{inner}]"
return argument_type_str_pyi(t)
def returns_named_tuple_pyi(signature: PythonSignature) -> Optional[Tuple[str, str]]:
python_returns = [return_type_str_pyi(r.type) for r in signature.returns.returns]
namedtuple_name = signature.name
field_names = namedtuple_fieldnames(signature.returns.returns)
if field_names:
tuple_args = [
f'("{name}", {typ})' for name, typ in zip(field_names, python_returns)
]
namedtuple_def = f'NamedTuple("{namedtuple_name}", [{", ".join(tuple_args)}])'
return namedtuple_name, namedtuple_def
return None
def returns_str_pyi(signature: PythonSignature) -> str:
field_names = namedtuple_fieldnames(signature.returns.returns)
if field_names:
return f"torch.return_types.{signature.name}"
python_returns = [return_type_str_pyi(r.type) for r in signature.returns.returns]
if len(python_returns) > 1:
return "Tuple[" + ", ".join(python_returns) + "]"
if len(python_returns) == 1:
return python_returns[0]
return "None"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# C++ Function Dispatch
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# This section provides APIs to generate the code that does C++ function
# dispatch. The C++ function call is wrapped by a lambda function.
# For example:
#
# // aten::selu_(Tensor(a!) self) -> Tensor(a!)
# auto dispatch_selu_ = [](Tensor self) -> Tensor {
# pybind11::gil_scoped_release no_gil;
# return at::selu_(self);
# };
#
# The lambda function's signature follows the C++ signature in common
# cases, e.g.:
#
# // aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
# [](const Tensor & self, const Tensor & other, Scalar alpha) -> Tensor
#
# For out variant the 'out' argument's type is changed from 'Tensor &'
# to 'Tensor'. It's because when calling the lambda it passes in the
# PythonArgParser output '_r.tensor(3)', which is stack allocated object
# and needs to pass by value. Also see comments in 'dispatch_lambda_return_str()'.
#
# // aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
# [](Tensor out, const Tensor & self, const Tensor & other, Scalar alpha) -> Tensor
#
# For multi-output case it can keep using reference type because the
# PythonArgParser output has been unpacked to local variables, e.g.:
#
# // aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *,
# // Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
# [](Tensor & max, Tensor & max_values, const Tensor & self, Dimname dim, bool keepdim) -> std::tuple<Tensor,Tensor>
#
# For deprecated python signature, it should follow deprecated python arg order.
# TODO: This is to keep same byte-for-byte result as the old codegen - maybe unnecessary?
def dispatch_lambda_args(
ps: PythonSignature, f: NativeFunction
) -> Tuple[DispatchLambdaArgument, ...]:
if isinstance(ps, PythonSignatureDeprecated):
schema = ps.deprecated_schema
else:
schema = f.func
# Start with cpp arguments - dispatch lambda signature always include 'self'
cpp_args = cpp.arguments(
arguments=schema.arguments,
faithful=False,
method=False,
cpp_no_default_args=f.cpp_no_default_args,
)
out_args: Set[str] = set(a.name for a in schema.arguments.out)
# Convert from cpp argument to lambda argument
def dispatch_lambda_arg(cpp_arg: Binding) -> DispatchLambdaArgument:
type_str = cpp_arg.type
is_out_arg = cpp_arg.name in out_args
if ps.method and cpp_arg.name == "self":
# For method's 'self', we can use 'const Tensor &' and simply ignore mutability!
type_str = "const at::Tensor &"
else:
# For other cases we need prevent dangling refs to temps (unless it's
# unpacked scattered output)
# The reason is explained in the comments above and in 'dispatch_lambda_return_str()'.
# TODO: avoid this special handling?
ensure_temp_safe = len(out_args) <= 1 or not is_out_arg
if ensure_temp_safe:
type_str = {
"at::Tensor &": "at::Tensor",
}.get(type_str, type_str)
return DispatchLambdaArgument(
name=cpp_arg.name,
type_str=type_str,
is_out_arg=is_out_arg,
)
return tuple(map(dispatch_lambda_arg, cpp_args))
# [old codegen] XXX: if you got here because of an assertion failure, it doesn't mean
# it's enough to just extend the list here. Before you do this, make sure
# to add an appropriate wrap() overload in torch/csrc/autograd/utils/wrap_outputs.h.
SUPPORTED_RETURN_TYPES = {
"at::Tensor",
"::std::tuple<at::Tensor,at::Tensor>",
"::std::tuple<at::Tensor,at::Tensor,at::Tensor>",
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor>",
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor>",
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,int64_t>",
"::std::tuple<at::Tensor,at::Tensor,double,int64_t>",
"::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,int64_t>",
"::std::tuple<at::Tensor,at::Tensor,double,at::Tensor,int64_t>",
"::std::tuple<double,int64_t>",
"::std::tuple<at::Tensor,::std::vector<at::Tensor>>",
"::std::vector<at::Tensor>",
"at::Scalar",
"bool",
"int64_t",
"void*",
"void",
"at::QScheme",
"double",
"at::IntArrayRef",
"at::ScalarType",
}
def dispatch_lambda_return_str(f: NativeFunction) -> str:
# [old codegen] Remove type annotation (e.g. 'Tensor' rather than 'Tensor &')
# because the dispatch lambdas take mutable arguments *by value*, not
# by reference. If you then return a reference to such an argument, you
# will now have a pointer to a dangling stack entry. Not good.
#
# You want:
#
# auto dispatch_selu_ = [](Tensor self) -> Tensor { ...; return at::selu_(self); };
# ^^^^^^
#
# *not*
#
# auto dispatch_selu_ = [](Tensor self) -> Tensor& { ...; return at::selu_(self); };
# ^^^^^^^
#
# (NB: We can't make dispatch_selu_ take Tensor&, because the enclosing
# codegen looks like dispatch_selu_(_r.tensor(0)), and you can't take a
# mutable reference to temporary. Maybe we could assign it to a
# variable itself.)
returns_without_annotation = tuple(
map(lambda r: Return(r.name, r.type, None), f.func.returns)
)
return_str = cpp.returns_type(returns_without_annotation).cpp_type()
if return_str not in SUPPORTED_RETURN_TYPES:
raise RuntimeError(f"{f.func.name} returns unsupported type {return_str}")
return return_str
def cpp_dispatch_target(f: NativeFunction) -> str:
name = cpp.name(f.func)
if Variant.method in f.variants:
return f"self.{name}"
if Variant.function in f.variants:
if has_tensor_options(f) or f.func.name.name.base.endswith("_like"):
namespace = "torch"
else:
namespace = "at"
return f"{namespace}::{name}"
raise RuntimeError(f"could not dispatch, neither function nor method: {f.func}")
def cpp_dispatch_exprs(
f: NativeFunction,
*,
python_signature: Optional[PythonSignature] = None,
) -> Tuple[str, ...]:
cpp_args: Sequence[Binding] = _cpp_signature(f, method=False).arguments()
exprs: Tuple[str, ...] = tuple()
if not isinstance(python_signature, PythonSignatureDeprecated):
# By default the exprs are consistent with the C++ signature.
exprs = tuple(map(lambda a: a.name, cpp_args))
else:
# For deprecated python signature we may need fill in some constants.
exprs = tuple(
filter(
lambda n: n != "out" or f.func.is_out_fn(),
python_signature.deprecated_args_exprs,
)
)
if Variant.method in f.variants:
exprs = tuple(filter("self".__ne__, exprs))
return exprs
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#
# Python / C++ Args Binding
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# We explicitly enumerate the PythonArgParser unpacking methods for all
# supported types. This might be more verbose than necessary, partially
# because of the irregularity of unpacking method naming, partially
# because we want to mimic the old codegen behavior - to reject
# unexpected and/or unsupported cases which the old codegen rejects.
# For certain cases it is intentionally more restrictive than necessary,
# e.g.: it doesn't accepts doublelist with definite size.
def arg_parser_unpack_method(
t: Type, default: Optional[str], default_init: Optional[str]
) -> str:
has_default_init = default_init is not None
if has_default_init and str(t) not in (
"ScalarType?",
"ScalarType",
"Device",
"Device?",
"Layout",
"Layout?",
"bool",
"bool?",
):
raise RuntimeError(f"type '{t}' does not supported unpacking with default")
if isinstance(t, BaseType):
if t.name in [
BaseTy.Tensor,
BaseTy.Stream,
BaseTy.Storage,
BaseTy.Scalar,
BaseTy.Dimname,
]:
# These unpack methods line up with their schema names
return t.name.name.lower()
elif t.name == BaseTy.ScalarType:
return "scalartypeWithDefault" if has_default_init else "scalartype"
elif t.name == BaseTy.Device:
return "deviceWithDefault" if has_default_init else "device"
elif t.name == BaseTy.int:
return "toInt64"
elif t.name == BaseTy.SymInt:
return "toSymInt"
elif t.name == BaseTy.bool:
return "toBoolWithDefault" if has_default_init else "toBool"
elif t.name == BaseTy.float:
return "toDouble"
elif t.name == BaseTy.str:
return "stringView"
elif t.name == BaseTy.Layout:
return "layoutWithDefault" if has_default_init else "layout"
elif t.name == BaseTy.MemoryFormat:
return "memoryformat"
elif isinstance(t, OptionalType):
if str(t.elem) == "Tensor":
return "optionalTensor"
elif str(t.elem) == "Generator":
return "generator"
elif str(t.elem) == "Dimname[]":
return "toDimnameListOptional"
elif not has_default_init and default in (None, "None", "c10::nullopt"):
# If default is None: append 'Optional' to elem's unpacking method
return arg_parser_unpack_method(t.elem, None, None) + "Optional"
else:
# Otherwise, load as underlying type with default
return arg_parser_unpack_method(t.elem, default, default_init)
elif isinstance(t, ListType):
if str(t.elem) == "Tensor":
# accept and use definite size
if t.size is not None:
return f"tensorlist_n<{t.size}>"
else:
return "tensorlist"
elif str(t.elem) == "Tensor?":
return "list_of_optional_tensors"
elif str(t.elem) == "Dimname":
# accept definite size
return "dimnamelist"
elif str(t.elem) == "int":
# accept definite size
return "intlist"
elif str(t) == "float[]":
return "doublelist"
elif str(t.elem) == "SymInt":
# accept definite size
return "symintlist"
elif str(t) == "Scalar[]":
return "scalarlist"
raise RuntimeError(f"type '{t}' is not supported by PythonArgParser")
# Return RHS expression for python argument using PythonArgParser output.
# e.g. for arg name 'foo', arg type 'bool', arg_index = 2, returns '_r.toBool(2)'
def arg_parser_output_expr(
arg_index: int, a: PythonArgument
) -> PythonArgParserOutputExpr:
has_default = a.default_init is not None
unpack_method = arg_parser_unpack_method(
t=a.type, default=a.default, default_init=a.default_init
)
default = f", {a.default_init}" if has_default else ""
expr = f"_r.{unpack_method}({arg_index}{default})"
return PythonArgParserOutputExpr(
name=a.name,
expr=expr,
index=arg_index,
argument=a,
)
# Returns a map with key = arg_name and value = PythonArgParserOutputExpr.
def arg_parser_output_exprs(
ps: PythonSignature, f: NativeFunction
) -> Dict[str, PythonArgParserOutputExpr]:
return {
e.name: e
for i, a in enumerate(ps.arguments())
for e in (arg_parser_output_expr(i, a),)
}
# argument name to type for scattered tensor options fields
TENSOR_OPTIONS_FIELDS = {
"dtype": "ScalarType?",
"device": "Device?",
"layout": "Layout?",
"pin_memory": "bool?",
"requires_grad": "bool?",
}
# bind arg parser outputs (python args) with dispatch lambda arguments (c++ args).
def dispatch_lambda_exprs(
ps: PythonSignature, f: NativeFunction
) -> DispatchLambdaArgumentExprs:
# This method is to bind 'arg_parser_outputs' and 'lambda_args' by producing
# 'inits' and 'lambda_args_exprs' for each lambda argument using arg parser
# outputs.
arg_parser_outputs = arg_parser_output_exprs(ps, f)
lambda_args = dispatch_lambda_args(ps, f)
inits: List[str] = []
lambda_args_exprs: Dict[str, str] = dict()
has_toptions = has_tensor_options(f)
# 1. special inits/unpacking to provide binding exprs for lambda arguments.
for a in ps.arguments(skip_tensor_options=True):
name = a.name
arg_parser_expr = arg_parser_outputs[a.name].expr
if has_toptions and name == "self":
# TODO: why this needs to be special case?
inits.extend(
[
f"auto self = {arg_parser_expr};",
]
)
lambda_args_exprs[name] = name
elif (
isinstance(a, PythonOutArgument)
and len(a.outputs) > 1
and f.func.is_out_fn()
):
inits.extend(
[
f"auto out = {arg_parser_expr};",
]
)
for i, out_arg in enumerate(a.outputs):
lambda_args_exprs[out_arg.name] = f"out[{i}]"
elif str(a.type) == "Dimname[]?":
# [old codegen]
# TODO: make this part of something more general, or get rid of it.
# optional<ArrayRef<T>> are special. The PythonArgParser returns an
# optional<vector<T>>, which cannot be implicitly converted to
# optional<ArrayRef<T>>. One needs to unwrap the optional and rewrap.
inits.extend(
[
f"auto __{name} = {arg_parser_expr};",
f"c10::optional<DimnameList> {name} = __{name} ? c10::make_optional(DimnameList(__{name}.value())) : c10::nullopt;", # noqa: B950
]
)
lambda_args_exprs[name] = name
else:
# default case - directly using PythonArgParser output expr
lambda_args_exprs[name] = arg_parser_expr
# method's self is passed directly to python binding, rather than parsed
if ps.method:
lambda_args_exprs["self"] = "self"
# 2. special packing/checking for TensorOptions.
tensor_options_args_names = list(map(lambda a: a.name, ps.tensor_options_args))
if has_toptions:
if f.func.is_out_fn():
raise RuntimeError(f"{f.func}: tensor options with output arg")
for a in ps.tensor_options_args:
if a.name not in TENSOR_OPTIONS_FIELDS:
raise RuntimeError(
f"{f.func}: unrecognized tensor options field '{a.name}' in python binding arguments"
)
if str(a.type) != TENSOR_OPTIONS_FIELDS.get(a.name):
raise RuntimeError(
f"{f.func}: unrecognized type '{str(a.type)}' for tensor options field '{a.name}'"
)
if not all(
map(lambda a: a in tensor_options_args_names, TENSOR_OPTIONS_FIELDS.keys())
):
raise RuntimeError(
f"{f.func}: incomplete tensor options args: {tensor_options_args_names}"
)
inits.append(
f"""\
const auto options = TensorOptions()
.dtype({arg_parser_outputs['dtype'].expr})
.device({arg_parser_outputs['device'].expr})
.layout({arg_parser_outputs['layout'].expr})
.requires_grad({arg_parser_outputs['requires_grad'].expr})
.pinned_memory({arg_parser_outputs['pin_memory'].expr});
torch::utils::maybe_initialize_cuda(options);
"""
)
lambda_args_exprs["options"] = "options"
# 3. special case - access scattered TensorOptions fields without packing
# TODO: maybe move to the generator side as it's not related to binding.
if not has_toptions and tensor_options_args_names:
if "dtype" in tensor_options_args_names:
# we're an output-arg variant, check these args against output tensor
if not f.func.is_out_fn():
raise RuntimeError(
f"{f.func}: dtype in tensor_options_args without output arg"
)
if not all(
map(lambda a: a in tensor_options_args_names, ("layout", "device"))
):
raise RuntimeError(
f"{f.func}: incomplete tensor options for output check"
)
inits.append(
f"""\
check_out_type_matches({arg_parser_outputs['out'].expr}, {arg_parser_outputs['dtype'].expr},
{arg_parser_outputs['dtype'].is_none_expr}, {arg_parser_outputs['layout'].expr},
{arg_parser_outputs['device'].expr}, {arg_parser_outputs['device'].is_none_expr});
"""
)
# we'll set requires_grad on outgoing tensor
if "requires_grad" not in tensor_options_args_names:
raise RuntimeError(
f'{f.func}: expected "requires_grad" in tensor_options_args absent, but found [{tensor_options_args_names}]'
)
return DispatchLambdaArgumentExprs(
exprs=tuple(map(lambda a: lambda_args_exprs[a.name], lambda_args)),
inits=inits,
)
| pytorch-master | torchgen/api/python.py |
from typing import Any, Dict, List, Optional, Tuple, Union
from torchgen.api.types import (
BaseCppType,
BaseCType,
boolT,
CType,
deviceT,
doubleT,
layoutT,
ListCType,
longT,
memoryFormatT,
NamedCType,
OptionalCType,
scalarT,
scalarTypeT,
stringT,
SymIntT,
VectorCType,
)
from torchgen.model import (
Argument,
BaseTy,
BaseType,
FunctionSchema,
ListType,
OperatorName,
OptionalType,
Return,
TensorOptionsArguments,
Type,
)
_valueT = None
def getValueT() -> BaseCppType:
global _valueT
if not _valueT:
raise NotImplementedError(
"The value type needs to be set with setValueT() in run_gen_lazy_tensor()"
)
return _valueT
def setValueT(val: BaseCppType) -> None:
global _valueT
_valueT = val
# this is a bad hack. I need to refactor the data model to represent each arg in the schema as an object,
# making it easier to represent special properties of an arg.
tensorListValueT = BaseCppType("torch::lazy", "Value")
def process_ir_type(
typ: Type, properties: "LazyIrProperties"
) -> Union[BaseCType, VectorCType, OptionalCType, ListCType]:
"""
This function takes a type from NativeFunctions and converts it for use with
lazy tensor codegen.
Type conversion for lazy currently consists of
(1) changing at::Tensors into lazy::Values
(2) wrapping everything in a BaseCType
(3) making cpp-reference types into cpp-value types (e.g. vector instead of IntArrayRef)
(1) converts at::Tensors to lazy::Values (which wrap lazy::Nodes, with which Lazy IR represents tensors.)
There is special handling for Optional[Tensor] or List[Tensor], etc- hence 'tensor-like'
This is incomplete- there are assertions in places that it's expected to need to add
more types as the codegen is used with more operators.
"""
if isinstance(typ, BaseType):
if typ.name == BaseTy.Tensor:
return BaseCType(getValueT())
elif typ.name == BaseTy.Scalar:
if properties.TreatScalarsAsConstants:
return BaseCType(scalarT)
# at::scalar has special handling,
# and is wrapped in an lazy::Value just like at::tensor
return BaseCType(getValueT())
elif typ.name == BaseTy.ScalarType:
return BaseCType(scalarTypeT)
elif typ.name == BaseTy.int:
return BaseCType(longT)
elif typ.name == BaseTy.SymInt:
return BaseCType(getValueT())
elif typ.name == BaseTy.bool:
return BaseCType(boolT)
elif typ.name == BaseTy.float:
return BaseCType(doubleT)
elif typ.name == BaseTy.str:
return BaseCType(stringT)
elif typ.name == BaseTy.Device:
return BaseCType(deviceT)
elif typ.name == BaseTy.Layout:
return BaseCType(layoutT)
elif typ.name == BaseTy.MemoryFormat:
return BaseCType(memoryFormatT)
else:
raise AssertionError(f"TODO add support for type {repr(typ)}")
elif isinstance(typ, OptionalType):
return OptionalCType(process_ir_type(typ.elem, properties))
elif isinstance(typ, ListType):
if str(typ.elem) == "Tensor?":
# TODO(whc) is this actually correct? or should it use a Vector like above
return ListCType(OptionalCType(BaseCType(getValueT())))
elif str(typ.elem) == "Tensor":
# this is a TensorList which comes in from GetTensorList as a Value
return BaseCType(tensorListValueT)
else:
return VectorCType(process_ir_type(typ.elem, properties))
else:
raise AssertionError(f"unrecognized type {repr(typ)}")
def isValueType(typ: CType, properties: "Optional[LazyIrProperties]" = None) -> bool:
"""
Given a type, determine if it is a Value-like type. This is equivalent to
being Tensor-like, but assumes the type has already been transformed.
"""
if isinstance(typ, BaseCType):
# I am regretting my naming conventions, but now we are wrapping at::scalar in
# lazy value, while preserving other 'scalar' types as scalars in the IR
treat_scalars_as_constants = properties and properties.TreatScalarsAsConstants
return (
typ.type == getValueT()
or (typ.type == scalarT and not treat_scalars_as_constants)
or typ.type == SymIntT
)
elif isinstance(typ, (OptionalCType, ListCType, VectorCType)):
return isValueType(typ.elem, properties)
return False
def isSymIntType(typ: Type) -> bool:
return isinstance(typ, BaseType) and typ.name == BaseTy.SymInt
def isWrappedScalarType(typ: Type) -> bool:
"""
Given a type, determine if it is a c10::scalar which we will wrap in a lazy Value.
Since we literally change the type from scalarT to valueT, information is lost.
This function helps build a list of wrapped scalars to save that information
"""
if isinstance(typ, BaseType):
# I am regretting my naming conventions, but now we are wrapping at::scalar in
# lazy value, while preserving other 'scalar' types as scalars in the IR
return typ.name == BaseTy.Scalar
elif isinstance(typ, (OptionalType, ListType)):
return isWrappedScalarType(typ.elem)
return False
def isGeneratorType(typ: Type) -> bool:
if isinstance(typ, BaseType):
return typ.name == BaseTy.Generator
elif isinstance(typ, (OptionalType)):
return isGeneratorType(typ.elem)
return False
class LazyArgument:
name: str
orig_type: Type
lazy_type_: Optional[CType]
is_wrapped_scalar: bool
is_generator: bool
is_symint_or_list: bool
# true if this argument is or contains a lazy IR value
is_lazy_value: bool
def __init__(self, arg: Argument, properties: "LazyIrProperties"):
self.name = arg.name
self.orig_type = arg.type
self.is_optional = isinstance(arg.type, OptionalType)
self.is_generator = isGeneratorType(arg.type)
if self.is_generator:
assert (
self.is_optional
), "We expect all generators are optional since currently they are"
# there is no handling for generators in TorchScript IR (or XLA)
# so we fall back to eager if the (optional)generator has value, and otherwise
# its null and safe to exclude from lazy IR
self.lazy_type_ = None
else:
self.lazy_type_ = process_ir_type(arg.type, properties)
self.is_wrapped_scalar = isWrappedScalarType(arg.type)
self.is_symint_or_list = isSymIntType(arg.type)
self.is_lazy_value = not self.is_generator and isValueType(
self.lazy_type, properties
)
@property
def lazy_type(self) -> CType:
assert (
self.lazy_type_ is not None
), f"Attempted to access lazy_type for invalid argument {self.name}"
return self.lazy_type_
class LazyIrProperties:
"""Collection of properties for an IR node
The property groups are listed below. Each group is mutually
exclusive, meaning that only one property from each group can be True
at any one time. The properties can be accessed as if they were normal
attributes. The mutual exclusivity is automatically handled.
"""
Properties: Tuple[Tuple[str, ...], ...] = (
(
"ShapePrecompute", # Assume shape has been precomputed
"ShapeCompute", # Need to compute the shape on construction
"ShapeCache", # Utilize the shape cache to defer computation
),
(
"Lower", # Codegen full lower function
"LowerDeclOnly", # Codegen only lower function declaration
),
(
"CanBeReused", # Codegen full reuse function
"CanBeReusedDeclOnly", # Codegen only reuse function declaration
),
(
"CreateFn", # Codegen full create function
"CreateFnDeclOnly", # Codegen only create function declaration
),
(
"TreatScalarsAsConstants", # Treat Scalars as constants instead of handling like values
),
)
def __init__(self, *default_properties: str):
properties: Dict[Tuple[str, ...], Optional[str]] = {
p: None for p in LazyIrProperties.Properties
}
self.__dict__["properties"] = properties
for p in default_properties:
setattr(self, p, True)
def __getattr__(self, key: str) -> Any:
properties = self.__dict__["properties"]
for values in LazyIrProperties.Properties:
if key in values:
return properties[values] == key
return self.__getattribute__(key)
def __setattr__(self, key: str, value: Any) -> Any:
properties = self.__dict__["properties"]
for values in LazyIrProperties.Properties:
if key in values:
properties[values] = key if value else None
return value
raise KeyError(f"Invalid property: {key}")
# Inspired by a FunctionSchema object, a LazyIrSchema holds the schema of a Lazy IR node.
# Unlike a FunctionSchema, it has no round-trippable string form (relating to the YAML),
# but carries type information from a native FunctionSchema modified for use with IR nodes,
# and preserving original argument names.
class LazyIrSchema:
# The name of the operator this function schema describes.
name: "OperatorName"
positional_args: Tuple[LazyArgument, ...]
keyword_args: Tuple[LazyArgument, ...]
# TODO: Need to handle collisions with argument names at some point
returns: Tuple["Return", ...]
# if this schema has a Generator arg, list its orig ctype/name but don't
# build a LazyArgument since lazy IR doesn't support it
generator_arg: Optional[NamedCType] = None
properties: LazyIrProperties = LazyIrProperties(
# default properties
"ShapePrecompute",
"Lower",
"CanBeReused",
)
opkind: Optional[str] = None
def __init__(
self, func: FunctionSchema, properties: Optional[LazyIrProperties] = None
):
if properties:
self.properties = properties
positional_args: List[LazyArgument] = []
for arg_field in ["pre_self_positional", "self_arg", "post_self_positional"]:
if arg_field == "self_arg" and func.arguments.self_arg is not None:
arg = getattr(func.arguments, "self_arg").argument
positional_args.append(LazyArgument(arg, self.properties))
elif getattr(func.arguments, arg_field) is not None:
positional_args.extend(
LazyArgument(arg, self.properties)
for arg in getattr(func.arguments, arg_field)
)
self.positional_args = tuple(positional_args)
keyword_args: List[LazyArgument] = []
for arg_field in [
"pre_tensor_options_kwarg_only",
"tensor_options",
"post_tensor_options_kwarg_only",
"out",
]:
curr_args = getattr(func.arguments, arg_field)
if curr_args is not None:
if isinstance(curr_args, TensorOptionsArguments):
curr_args = curr_args.all()
for arg in curr_args:
if isGeneratorType(arg.type):
assert (
self.generator_arg is None
), "We expect there is only one generator arg"
self.generator_arg = NamedCType(arg.name, arg.type)
keyword_args.extend(
LazyArgument(arg, self.properties) for arg in curr_args
)
self.keyword_args = tuple(keyword_args)
self.name = func.name
self.returns = func.returns
@property
def node_name(self) -> str:
"""
Return camel-case version of op in node.
Note: This function also appends any `overload_name` in the operation.
For example, if the op is `bitwise_and.Tensor`, the returned name
will be `BitwiseAndTensor`.
"""
op_name = f"{self.name.name}_{self.name.overload_name}".lower()
return "".join(word.capitalize() or "" for word in op_name.split("_"))
@property
def aten_name(self) -> str:
return str(self.name.name)
@property
def base_name(self) -> str:
return f"{self.name.name.base}"
def filtered_args(
self,
positional: bool = True,
keyword: bool = True,
values: bool = True,
scalars: bool = True,
generator: bool = False,
) -> List[LazyArgument]:
# This function maintains the sorted order of arguments but provides different filtered views.
# Some parts of the code care about kwargs vs args (TS lowerings),
# other parts care about whether they need to wrap the arg in a lazy value or leave it alone.
# Generators are special cased, as they are needed for fallback/shape-inference but not supported
# in TS lowerings and therefore also omitted from lazy IR.
args: List[LazyArgument] = []
if positional:
args.extend(self.positional_args)
if keyword:
args.extend(self.keyword_args)
if values and scalars and generator:
return args
elif values and scalars:
return [a for a in args if not a.is_generator]
elif values:
return [a for a in args if a.is_lazy_value]
elif scalars:
return [
a
for a in args
if not a.is_lazy_value and (generator or not a.is_generator)
]
return []
@property
def positional_values(self) -> List[LazyArgument]:
return self.filtered_args(
positional=True, keyword=False, values=True, scalars=False
)
@property
def positional_scalars(self) -> List[LazyArgument]:
return self.filtered_args(
positional=True, keyword=False, values=False, scalars=True
)
@property
def keyword_values(self) -> List[LazyArgument]:
return self.filtered_args(
positional=False, keyword=True, values=True, scalars=False
)
@property
def keyword_scalars(self) -> List[LazyArgument]:
return self.filtered_args(
positional=False, keyword=True, values=False, scalars=True
)
| pytorch-master | torchgen/api/lazy.py |
from torchgen.model import NativeFunctionsGroup
# Follows dispatcher calling convention, but:
# - Mutable arguments not allowed. Meta functions are always
# written in functional form. Look at FunctionSchema.signature()
# - No tensor returns; instead we return a TensorMeta describing
# the tensor in question
def name(g: NativeFunctionsGroup) -> str:
# use the overload name from the functional version
return str(g.functional.func.name).replace(".", "_")
| pytorch-master | torchgen/api/meta.py |
import re
import sys
from pathlib import Path
from mypy.plugin import Plugin
def get_correct_mypy_version():
# there's probably a more elegant way to do this
match, = re.finditer(
r'mypy==(\d+(?:\.\d+)*)',
Path('.circleci/docker/requirements-ci.txt').read_text(),
)
version, = match.groups()
return version
def plugin(version: str):
correct_version = get_correct_mypy_version()
if version != correct_version:
print(f'''\
You are using mypy version {version}, which is not supported
in the PyTorch repo. Please switch to mypy version {correct_version}.
For example, if you installed mypy via pip, run this:
pip install mypy=={correct_version}
Or if you installed mypy via conda, run this:
conda install -c conda-forge mypy={correct_version}
''', file=sys.stderr)
return Plugin
| pytorch-master | mypy_plugins/check_mypy_version.py |
import unittest
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep
from hypothesis import given, settings
dyndep.InitOpsLibrary("@/caffe2/modules/detectron:detectron_ops")
class TestUpsampleNearestOp(hu.HypothesisTestCase):
@given(
N=st.integers(1, 3),
H=st.integers(10, 300),
W=st.integers(10, 300),
scale=st.integers(1, 3),
**hu.gcs
)
@settings(deadline=None, max_examples=20)
def test_upsample_nearest_op(self, N, H, W, scale, gc, dc):
C = 32
X = np.random.randn(N, C, H, W).astype(np.float32)
op = core.CreateOperator("UpsampleNearest", ["X"], ["Y"], scale=scale)
def ref(X):
outH = H * scale
outW = W * scale
outH_idxs, outW_idxs = np.meshgrid(
np.arange(outH), np.arange(outW), indexing="ij"
)
inH_idxs = (outH_idxs / scale).astype(np.int32)
inW_idxs = (outW_idxs / scale).astype(np.int32)
Y = X[:, :, inH_idxs, inW_idxs]
return [Y]
self.assertReferenceChecks(device_option=gc, op=op, inputs=[X], reference=ref)
if __name__ == "__main__":
unittest.main()
| pytorch-master | modules/detectron/upsample_nearest_op_test.py |
#!/usr/bin/env python3
import os
import subprocess
import sys
import tempfile
import generate_config_yml
CHECKED_IN_FILE = "config.yml"
REGENERATION_SCRIPT = "regenerate.sh"
PARENT_DIR = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
README_PATH = os.path.join(PARENT_DIR, "README.md")
ERROR_MESSAGE_TEMPLATE = """
The checked-in CircleCI "%s" file does not match what was generated by the scripts.
Please re-run the "%s" script in the "%s" directory and commit the result. See "%s" for more information.
"""
def check_consistency():
_, temp_filename = tempfile.mkstemp("-generated-config.yml")
with open(temp_filename, "w") as fh:
generate_config_yml.stitch_sources(fh)
try:
subprocess.check_call(["cmp", temp_filename, CHECKED_IN_FILE])
except subprocess.CalledProcessError:
sys.exit(ERROR_MESSAGE_TEMPLATE % (CHECKED_IN_FILE, REGENERATION_SCRIPT, PARENT_DIR, README_PATH))
finally:
os.remove(temp_filename)
if __name__ == "__main__":
check_consistency()
| pytorch-master | .circleci/ensure-consistency.py |
#!/usr/bin/env python3
"""
This script is the source of truth for config.yml.
Please see README.md in this directory for details.
"""
import os
import shutil
import sys
from collections import namedtuple
import cimodel.data.simple.docker_definitions
import cimodel.data.simple.mobile_definitions
import cimodel.data.simple.nightly_ios
import cimodel.data.simple.anaconda_prune_defintions
import cimodel.lib.miniutils as miniutils
import cimodel.lib.miniyaml as miniyaml
class File(object):
"""
Verbatim copy the contents of a file into config.yml
"""
def __init__(self, filename):
self.filename = filename
def write(self, output_filehandle):
with open(os.path.join("verbatim-sources", self.filename)) as fh:
shutil.copyfileobj(fh, output_filehandle)
class FunctionGen(namedtuple("FunctionGen", "function depth")):
__slots__ = ()
class Treegen(FunctionGen):
"""
Insert the content of a YAML tree into config.yml
"""
def write(self, output_filehandle):
miniyaml.render(output_filehandle, self.function(), self.depth)
class Listgen(FunctionGen):
"""
Insert the content of a YAML list into config.yml
"""
def write(self, output_filehandle):
miniyaml.render(output_filehandle, self.function(), self.depth)
def horizontal_rule():
return "".join("#" * 78)
class Header(object):
def __init__(self, title, summary=None):
self.title = title
self.summary_lines = summary or []
def write(self, output_filehandle):
text_lines = [self.title] + self.summary_lines
comment_lines = ["# " + x for x in text_lines]
lines = miniutils.sandwich([horizontal_rule()], comment_lines)
for line in filter(None, lines):
output_filehandle.write(line + "\n")
def _for_all_items(items, functor) -> None:
if isinstance(items, list):
for item in items:
_for_all_items(item, functor)
if isinstance(items, dict) and len(items) == 1:
item_type, item = next(iter(items.items()))
functor(item_type, item)
def filter_master_only_jobs(items):
def _is_main_or_master_item(item):
filters = item.get('filters', None)
branches = filters.get('branches', None) if filters is not None else None
branches_only = branches.get('only', None) if branches is not None else None
return ('main' in branches_only or 'master' in branches_only) if branches_only is not None else False
master_deps = set()
def _save_requires_if_master(item_type, item):
requires = item.get('requires', None)
item_name = item.get("name", None)
if not isinstance(requires, list):
return
if _is_main_or_master_item(item) or item_name in master_deps:
master_deps.update([n.strip('"') for n in requires])
def _do_filtering(items):
if isinstance(items, list):
rc = [_do_filtering(item) for item in items]
return [item for item in rc if len(item if item is not None else []) > 0]
assert isinstance(items, dict) and len(items) == 1
item_type, item = next(iter(items.items()))
item_name = item.get("name", None)
item_name = item_name.strip('"') if item_name is not None else None
if not _is_main_or_master_item(item) and item_name not in master_deps:
return None
if 'filters' in item:
item = item.copy()
item.pop('filters')
return {item_type: item}
# Scan of dependencies twice to pick up nested required jobs
# I.e. jobs depending on jobs that main-only job depend on
_for_all_items(items, _save_requires_if_master)
_for_all_items(items, _save_requires_if_master)
return _do_filtering(items)
def generate_required_docker_images(items):
required_docker_images = set()
def _requires_docker_image(item_type, item):
requires = item.get('requires', None)
if not isinstance(requires, list):
return
for requirement in requires:
requirement = requirement.replace('"', '')
if requirement.startswith('docker-'):
required_docker_images.add(requirement)
_for_all_items(items, _requires_docker_image)
return required_docker_images
def gen_build_workflows_tree():
build_workflows_functions = [
cimodel.data.simple.mobile_definitions.get_workflow_jobs,
cimodel.data.simple.nightly_ios.get_workflow_jobs,
cimodel.data.simple.anaconda_prune_defintions.get_workflow_jobs,
]
build_jobs = [f() for f in build_workflows_functions]
build_jobs.extend(
cimodel.data.simple.docker_definitions.get_workflow_jobs(
# sort for consistency
sorted(generate_required_docker_images(build_jobs))
)
)
master_build_jobs = filter_master_only_jobs(build_jobs)
rc = {
"workflows": {
"build": {
"when": r"<< pipeline.parameters.run_build >>",
"jobs": build_jobs,
},
}
}
if len(master_build_jobs) > 0:
rc["workflows"]["master_build"] = {
"when": r"<< pipeline.parameters.run_master_build >>",
"jobs": master_build_jobs,
}
return rc
# Order of this list matters to the generated config.yml.
YAML_SOURCES = [
File("header-section.yml"),
File("commands.yml"),
File("nightly-binary-build-defaults.yml"),
Header("Build parameters"),
File("build-parameters/pytorch-build-params.yml"),
File("build-parameters/binary-build-params.yml"),
Header("Job specs"),
File("job-specs/binary-job-specs.yml"),
File("job-specs/job-specs-custom.yml"),
File("job-specs/binary_update_htmls.yml"),
File("job-specs/binary-build-tests.yml"),
File("job-specs/docker_jobs.yml"),
Header("Workflows"),
Treegen(gen_build_workflows_tree, 0),
]
def stitch_sources(output_filehandle):
for f in YAML_SOURCES:
f.write(output_filehandle)
if __name__ == "__main__":
stitch_sources(sys.stdout)
| pytorch-master | .circleci/generate_config_yml.py |
pytorch-master | .circleci/cimodel/__init__.py |
|
pytorch-master | .circleci/cimodel/lib/__init__.py |
|
from dataclasses import dataclass, field
from typing import Optional, Dict
def X(val):
"""
Compact way to write a leaf node
"""
return val, []
def XImportant(name):
"""Compact way to write an important (run on PRs) leaf node"""
return (name, [("important", [X(True)])])
@dataclass
class Ver:
"""
Represents a product with a version number
"""
name: str
version: str = ""
def __str__(self):
return self.name + self.version
@dataclass
class ConfigNode:
parent: Optional['ConfigNode']
node_name: str
props: Dict[str, str] = field(default_factory=dict)
def get_label(self):
return self.node_name
# noinspection PyMethodMayBeStatic
def get_children(self):
return []
def get_parents(self):
return (self.parent.get_parents() + [self.parent.get_label()]) if self.parent else []
def get_depth(self):
return len(self.get_parents())
def get_node_key(self):
return "%".join(self.get_parents() + [self.get_label()])
def find_prop(self, propname, searched=None):
"""
Checks if its own dictionary has
the property, otherwise asks parent node.
"""
if searched is None:
searched = []
searched.append(self.node_name)
if propname in self.props:
return self.props[propname]
elif self.parent:
return self.parent.find_prop(propname, searched)
else:
# raise Exception('Property "%s" does not exist anywhere in the tree! Searched: %s' % (propname, searched))
return None
def dfs_recurse(
node,
leaf_callback=lambda x: None,
discovery_callback=lambda x, y, z: None,
child_callback=lambda x, y: None,
sibling_index=0,
sibling_count=1):
discovery_callback(node, sibling_index, sibling_count)
node_children = node.get_children()
if node_children:
for i, child in enumerate(node_children):
child_callback(node, child)
dfs_recurse(
child,
leaf_callback,
discovery_callback,
child_callback,
i,
len(node_children),
)
else:
leaf_callback(node)
def dfs(toplevel_config_node):
config_list = []
def leaf_callback(node):
config_list.append(node)
dfs_recurse(toplevel_config_node, leaf_callback)
return config_list
| pytorch-master | .circleci/cimodel/lib/conf_tree.py |
def quote(s):
return sandwich('"', s)
def sandwich(bread, jam):
return bread + jam + bread
def override(word, substitutions):
return substitutions.get(word, word)
| pytorch-master | .circleci/cimodel/lib/miniutils.py |
from collections import OrderedDict
import cimodel.lib.miniutils as miniutils
LIST_MARKER = "- "
INDENTATION_WIDTH = 2
def is_dict(data):
return type(data) in [dict, OrderedDict]
def is_collection(data):
return is_dict(data) or type(data) is list
def render(fh, data, depth, is_list_member=False):
"""
PyYaml does not allow precise control over the quoting
behavior, especially for merge references.
Therefore, we use this custom YAML renderer.
"""
indentation = " " * INDENTATION_WIDTH * depth
if is_dict(data):
tuples = list(data.items())
if type(data) is not OrderedDict:
tuples.sort()
for i, (k, v) in enumerate(tuples):
if not v:
continue
# If this dict is itself a list member, the first key gets prefixed with a list marker
list_marker_prefix = LIST_MARKER if is_list_member and not i else ""
trailing_whitespace = "\n" if is_collection(v) else " "
fh.write(indentation + list_marker_prefix + k + ":" + trailing_whitespace)
render(fh, v, depth + 1 + int(is_list_member))
elif type(data) is list:
for v in data:
render(fh, v, depth, True)
else:
# use empty quotes to denote an empty string value instead of blank space
modified_data = miniutils.quote(data) if data == "" else data
list_member_prefix = indentation + LIST_MARKER if is_list_member else ""
fh.write(list_member_prefix + str(modified_data) + "\n")
| pytorch-master | .circleci/cimodel/lib/miniyaml.py |
from cimodel.lib.conf_tree import ConfigNode
CONFIG_TREE_DATA = [
]
def get_major_pyver(dotted_version):
parts = dotted_version.split(".")
return "py" + parts[0]
class TreeConfigNode(ConfigNode):
def __init__(self, parent, node_name, subtree):
super(TreeConfigNode, self).__init__(parent, self.modify_label(node_name))
self.subtree = subtree
self.init2(node_name)
def modify_label(self, label):
return label
def init2(self, node_name):
pass
def get_children(self):
return [self.child_constructor()(self, k, v) for (k, v) in self.subtree]
class TopLevelNode(TreeConfigNode):
def __init__(self, node_name, subtree):
super(TopLevelNode, self).__init__(None, node_name, subtree)
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return DistroConfigNode
class DistroConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["distro_name"] = node_name
def child_constructor(self):
distro = self.find_prop("distro_name")
next_nodes = {
"xenial": XenialCompilerConfigNode,
"bionic": BionicCompilerConfigNode,
}
return next_nodes[distro]
class PyVerConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["pyver"] = node_name
self.props["abbreviated_pyver"] = get_major_pyver(node_name)
if node_name == "3.9":
self.props["abbreviated_pyver"] = "py3.9"
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return ExperimentalFeatureConfigNode
class ExperimentalFeatureConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["experimental_feature"] = node_name
def child_constructor(self):
experimental_feature = self.find_prop("experimental_feature")
next_nodes = {
"asan": AsanConfigNode,
"xla": XlaConfigNode,
"mps": MPSConfigNode,
"vulkan": VulkanConfigNode,
"parallel_tbb": ParallelTBBConfigNode,
"crossref": CrossRefConfigNode,
"dynamo": DynamoConfigNode,
"parallel_native": ParallelNativeConfigNode,
"onnx": ONNXConfigNode,
"libtorch": LibTorchConfigNode,
"important": ImportantConfigNode,
"build_only": BuildOnlyConfigNode,
"shard_test": ShardTestConfigNode,
"cuda_gcc_override": CudaGccOverrideConfigNode,
"pure_torch": PureTorchConfigNode,
"slow_gradcheck": SlowGradcheckConfigNode,
}
return next_nodes[experimental_feature]
class SlowGradcheckConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["is_slow_gradcheck"] = True
def child_constructor(self):
return ExperimentalFeatureConfigNode
class PureTorchConfigNode(TreeConfigNode):
def modify_label(self, label):
return "PURE_TORCH=" + str(label)
def init2(self, node_name):
self.props["is_pure_torch"] = node_name
def child_constructor(self):
return ImportantConfigNode
class XlaConfigNode(TreeConfigNode):
def modify_label(self, label):
return "XLA=" + str(label)
def init2(self, node_name):
self.props["is_xla"] = node_name
def child_constructor(self):
return ImportantConfigNode
class MPSConfigNode(TreeConfigNode):
def modify_label(self, label):
return "MPS=" + str(label)
def init2(self, node_name):
self.props["is_mps"] = node_name
def child_constructor(self):
return ImportantConfigNode
class AsanConfigNode(TreeConfigNode):
def modify_label(self, label):
return "Asan=" + str(label)
def init2(self, node_name):
self.props["is_asan"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class ONNXConfigNode(TreeConfigNode):
def modify_label(self, label):
return "Onnx=" + str(label)
def init2(self, node_name):
self.props["is_onnx"] = node_name
def child_constructor(self):
return ImportantConfigNode
class VulkanConfigNode(TreeConfigNode):
def modify_label(self, label):
return "Vulkan=" + str(label)
def init2(self, node_name):
self.props["is_vulkan"] = node_name
def child_constructor(self):
return ImportantConfigNode
class ParallelTBBConfigNode(TreeConfigNode):
def modify_label(self, label):
return "PARALLELTBB=" + str(label)
def init2(self, node_name):
self.props["parallel_backend"] = "paralleltbb"
def child_constructor(self):
return ImportantConfigNode
class CrossRefConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["is_crossref"] = node_name
def child_constructor(self):
return ImportantConfigNode
class DynamoConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["is_dynamo"] = node_name
def child_constructor(self):
return ImportantConfigNode
class ParallelNativeConfigNode(TreeConfigNode):
def modify_label(self, label):
return "PARALLELNATIVE=" + str(label)
def init2(self, node_name):
self.props["parallel_backend"] = "parallelnative"
def child_constructor(self):
return ImportantConfigNode
class LibTorchConfigNode(TreeConfigNode):
def modify_label(self, label):
return "BUILD_TEST_LIBTORCH=" + str(label)
def init2(self, node_name):
self.props["is_libtorch"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class CudaGccOverrideConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["cuda_gcc_override"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class BuildOnlyConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["build_only"] = node_name
def child_constructor(self):
return ExperimentalFeatureConfigNode
class ShardTestConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["shard_test"] = node_name
def child_constructor(self):
return ImportantConfigNode
class ImportantConfigNode(TreeConfigNode):
def modify_label(self, label):
return "IMPORTANT=" + str(label)
def init2(self, node_name):
self.props["is_important"] = node_name
def get_children(self):
return []
class XenialCompilerConfigNode(TreeConfigNode):
def modify_label(self, label):
return label or "<unspecified>"
def init2(self, node_name):
self.props["compiler_name"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return XenialCompilerVersionConfigNode if self.props["compiler_name"] else PyVerConfigNode
class BionicCompilerConfigNode(TreeConfigNode):
def modify_label(self, label):
return label or "<unspecified>"
def init2(self, node_name):
self.props["compiler_name"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return BionicCompilerVersionConfigNode if self.props["compiler_name"] else PyVerConfigNode
class XenialCompilerVersionConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["compiler_version"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return PyVerConfigNode
class BionicCompilerVersionConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["compiler_version"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return PyVerConfigNode
| pytorch-master | .circleci/cimodel/data/pytorch_build_data.py |
from collections import OrderedDict
import cimodel.data.simple.util.branch_filters as branch_filters
import cimodel.data.binary_build_data as binary_build_data
import cimodel.lib.conf_tree as conf_tree
import cimodel.lib.miniutils as miniutils
class Conf(object):
def __init__(self, os, gpu_version, pydistro, parms, smoke, libtorch_variant, gcc_config_variant, libtorch_config_variant):
self.os = os
self.gpu_version = gpu_version
self.pydistro = pydistro
self.parms = parms
self.smoke = smoke
self.libtorch_variant = libtorch_variant
self.gcc_config_variant = gcc_config_variant
self.libtorch_config_variant = libtorch_config_variant
def gen_build_env_parms(self):
elems = [self.pydistro] + self.parms + [binary_build_data.get_processor_arch_name(self.gpu_version)]
if self.gcc_config_variant is not None:
elems.append(str(self.gcc_config_variant))
if self.libtorch_config_variant is not None:
elems.append(str(self.libtorch_config_variant))
return elems
def gen_docker_image(self):
if self.gcc_config_variant == 'gcc5.4_cxx11-abi':
if self.gpu_version is None:
return miniutils.quote("pytorch/libtorch-cxx11-builder:cpu")
else:
return miniutils.quote(
f"pytorch/libtorch-cxx11-builder:{self.gpu_version}"
)
if self.pydistro == "conda":
if self.gpu_version is None:
return miniutils.quote("pytorch/conda-builder:cpu")
else:
return miniutils.quote(
f"pytorch/conda-builder:{self.gpu_version}"
)
docker_word_substitution = {
"manywheel": "manylinux",
"libtorch": "manylinux",
}
docker_distro_prefix = miniutils.override(self.pydistro, docker_word_substitution)
# The cpu nightlies are built on the pytorch/manylinux-cuda102 docker image
# TODO cuda images should consolidate into tag-base images similar to rocm
alt_docker_suffix = "cuda102" if not self.gpu_version else (
"rocm:" + self.gpu_version.strip("rocm") if self.gpu_version.startswith("rocm") else self.gpu_version)
docker_distro_suffix = alt_docker_suffix if self.pydistro != "conda" else (
"cuda" if alt_docker_suffix.startswith("cuda") else "rocm")
return miniutils.quote("pytorch/" + docker_distro_prefix + "-" + docker_distro_suffix)
def get_name_prefix(self):
return "smoke" if self.smoke else "binary"
def gen_build_name(self, build_or_test, nightly):
parts = [self.get_name_prefix(), self.os] + self.gen_build_env_parms()
if nightly:
parts.append("nightly")
if self.libtorch_variant:
parts.append(self.libtorch_variant)
if not self.smoke:
parts.append(build_or_test)
joined = "_".join(parts)
return joined.replace(".", "_")
def gen_workflow_job(self, phase, upload_phase_dependency=None, nightly=False):
job_def = OrderedDict()
job_def["name"] = self.gen_build_name(phase, nightly)
job_def["build_environment"] = miniutils.quote(" ".join(self.gen_build_env_parms()))
if self.smoke:
job_def["requires"] = [
"update_s3_htmls",
]
job_def["filters"] = branch_filters.gen_filter_dict(
branches_list=["postnightly"],
)
else:
filter_branch = r"/.*/"
job_def["filters"] = branch_filters.gen_filter_dict(
branches_list=[filter_branch],
tags_list=[branch_filters.RC_PATTERN],
)
if self.libtorch_variant:
job_def["libtorch_variant"] = miniutils.quote(self.libtorch_variant)
if phase == "test":
if not self.smoke:
job_def["requires"] = [self.gen_build_name("build", nightly)]
if not (self.smoke and self.os == "macos") and self.os != "windows":
job_def["docker_image"] = self.gen_docker_image()
# fix this. only works on cuda not rocm
if self.os != "windows" and self.gpu_version:
job_def["use_cuda_docker_runtime"] = miniutils.quote("1")
else:
if self.os == "linux" and phase != "upload":
job_def["docker_image"] = self.gen_docker_image()
if phase == "test":
if self.gpu_version:
if self.os == "windows":
job_def["executor"] = "windows-with-nvidia-gpu"
else:
job_def["resource_class"] = "gpu.medium"
os_name = miniutils.override(self.os, {"macos": "mac"})
job_name = "_".join([self.get_name_prefix(), os_name, phase])
return {job_name : job_def}
def gen_upload_job(self, phase, requires_dependency):
"""Generate binary_upload job for configuration
Output looks similar to:
- binary_upload:
name: binary_linux_manywheel_3_7m_cu113_devtoolset7_nightly_upload
context: org-member
requires: binary_linux_manywheel_3_7m_cu113_devtoolset7_nightly_test
filters:
branches:
only:
- nightly
tags:
only: /v[0-9]+(\\.[0-9]+)*-rc[0-9]+/
package_type: manywheel
upload_subfolder: cu113
"""
return {
"binary_upload": OrderedDict({
"name": self.gen_build_name(phase, nightly=True),
"context": "org-member",
"requires": [self.gen_build_name(
requires_dependency,
nightly=True
)],
"filters": branch_filters.gen_filter_dict(
branches_list=["nightly"],
tags_list=[branch_filters.RC_PATTERN],
),
"package_type": self.pydistro,
"upload_subfolder": binary_build_data.get_processor_arch_name(
self.gpu_version,
),
})
}
def get_root(smoke, name):
return binary_build_data.TopLevelNode(
name,
binary_build_data.CONFIG_TREE_DATA,
smoke,
)
def gen_build_env_list(smoke):
root = get_root(smoke, "N/A")
config_list = conf_tree.dfs(root)
newlist = []
for c in config_list:
conf = Conf(
c.find_prop("os_name"),
c.find_prop("gpu"),
c.find_prop("package_format"),
[c.find_prop("pyver")],
c.find_prop("smoke") and not (c.find_prop("os_name") == "macos_arm64"), # don't test arm64
c.find_prop("libtorch_variant"),
c.find_prop("gcc_config_variant"),
c.find_prop("libtorch_config_variant"),
)
newlist.append(conf)
return newlist
def predicate_exclude_macos(config):
return config.os == "linux" or config.os == "windows"
def get_nightly_uploads():
configs = gen_build_env_list(False)
mylist = []
for conf in configs:
phase_dependency = "test" if predicate_exclude_macos(conf) else "build"
mylist.append(conf.gen_upload_job("upload", phase_dependency))
return mylist
def get_post_upload_jobs():
return [
{
"update_s3_htmls": {
"name": "update_s3_htmls",
"context": "org-member",
"filters": branch_filters.gen_filter_dict(
branches_list=["postnightly"],
),
},
},
]
def get_nightly_tests():
configs = gen_build_env_list(False)
filtered_configs = filter(predicate_exclude_macos, configs)
tests = []
for conf_options in filtered_configs:
yaml_item = conf_options.gen_workflow_job("test", nightly=True)
tests.append(yaml_item)
return tests
def get_jobs(toplevel_key, smoke):
jobs_list = []
configs = gen_build_env_list(smoke)
phase = "build" if toplevel_key == "binarybuilds" else "test"
for build_config in configs:
# don't test for macos_arm64 as it's cross compiled
if phase != "test" or build_config.os != "macos_arm64":
jobs_list.append(build_config.gen_workflow_job(phase, nightly=True))
return jobs_list
def get_binary_build_jobs():
return get_jobs("binarybuilds", False)
def get_binary_smoke_test_jobs():
return get_jobs("binarysmoketests", True)
| pytorch-master | .circleci/cimodel/data/binary_build_definitions.py |
PHASES = ["build", "test"]
CUDA_VERSIONS = [
"102",
"113",
"116",
"117",
]
ROCM_VERSIONS = [
"4.3.1",
"4.5.2",
]
ROCM_VERSION_LABELS = ["rocm" + v for v in ROCM_VERSIONS]
GPU_VERSIONS = [None] + ["cuda" + v for v in CUDA_VERSIONS] + ROCM_VERSION_LABELS
STANDARD_PYTHON_VERSIONS = [
"3.7",
"3.8",
"3.9",
"3.10"
]
| pytorch-master | .circleci/cimodel/data/dimensions.py |
pytorch-master | .circleci/cimodel/data/__init__.py |
|
from collections import OrderedDict
from dataclasses import dataclass, field
from typing import List, Optional
import cimodel.data.dimensions as dimensions
import cimodel.lib.conf_tree as conf_tree
import cimodel.lib.miniutils as miniutils
from cimodel.data.pytorch_build_data import CONFIG_TREE_DATA, TopLevelNode
from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN
from cimodel.data.simple.util.docker_constants import gen_docker_image
@dataclass
class Conf:
distro: str
parms: List[str]
parms_list_ignored_for_docker_image: Optional[List[str]] = None
pyver: Optional[str] = None
cuda_version: Optional[str] = None
rocm_version: Optional[str] = None
# TODO expand this to cover all the USE_* that we want to test for
# tesnrorrt, leveldb, lmdb, redis, opencv, mkldnn, ideep, etc.
# (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259453608)
is_xla: bool = False
is_vulkan: bool = False
is_pure_torch: bool = False
restrict_phases: Optional[List[str]] = None
gpu_resource: Optional[str] = None
dependent_tests: List = field(default_factory=list)
parent_build: Optional["Conf"] = None
is_libtorch: bool = False
is_important: bool = False
parallel_backend: Optional[str] = None
build_only: bool = False
@staticmethod
def is_test_phase(phase):
return "test" in phase
# TODO: Eliminate the special casing for docker paths
# In the short term, we *will* need to support special casing as docker images are merged for caffe2 and pytorch
def get_parms(self, for_docker):
leading = []
# We just don't run non-important jobs on pull requests;
# previously we also named them in a way to make it obvious
# if self.is_important and not for_docker:
# leading.append("AAA")
leading.append("pytorch")
if self.is_xla and not for_docker:
leading.append("xla")
if self.is_vulkan and not for_docker:
leading.append("vulkan")
if self.is_libtorch and not for_docker:
leading.append("libtorch")
if self.is_pure_torch and not for_docker:
leading.append("pure_torch")
if self.parallel_backend is not None and not for_docker:
leading.append(self.parallel_backend)
cuda_parms = []
if self.cuda_version:
cudnn = "cudnn8" if self.cuda_version.startswith("11.") else "cudnn7"
cuda_parms.extend(["cuda" + self.cuda_version, cudnn])
if self.rocm_version:
cuda_parms.extend([f"rocm{self.rocm_version}"])
result = leading + ["linux", self.distro] + cuda_parms + self.parms
if not for_docker and self.parms_list_ignored_for_docker_image is not None:
result = result + self.parms_list_ignored_for_docker_image
return result
def gen_docker_image_path(self):
parms_source = self.parent_build or self
base_build_env_name = "-".join(parms_source.get_parms(True))
image_name, _ = gen_docker_image(base_build_env_name)
return miniutils.quote(image_name)
def gen_docker_image_requires(self):
parms_source = self.parent_build or self
base_build_env_name = "-".join(parms_source.get_parms(True))
_, requires = gen_docker_image(base_build_env_name)
return miniutils.quote(requires)
def get_build_job_name_pieces(self, build_or_test):
return self.get_parms(False) + [build_or_test]
def gen_build_name(self, build_or_test):
return (
("_".join(map(str, self.get_build_job_name_pieces(build_or_test))))
.replace(".", "_")
.replace("-", "_")
)
def get_dependents(self):
return self.dependent_tests or []
def gen_workflow_params(self, phase):
parameters = OrderedDict()
build_job_name_pieces = self.get_build_job_name_pieces(phase)
build_env_name = "-".join(map(str, build_job_name_pieces))
parameters["build_environment"] = miniutils.quote(build_env_name)
parameters["docker_image"] = self.gen_docker_image_path()
if Conf.is_test_phase(phase) and self.gpu_resource:
parameters["use_cuda_docker_runtime"] = miniutils.quote("1")
if Conf.is_test_phase(phase):
resource_class = "large"
if self.gpu_resource:
resource_class = "gpu." + self.gpu_resource
if self.rocm_version is not None:
resource_class = "pytorch/amd-gpu"
parameters["resource_class"] = resource_class
if phase == "build" and self.rocm_version is not None:
parameters["resource_class"] = "xlarge"
if hasattr(self, 'filters'):
parameters['filters'] = self.filters
if self.build_only:
parameters['build_only'] = miniutils.quote(str(int(True)))
return parameters
def gen_workflow_job(self, phase):
job_def = OrderedDict()
job_def["name"] = self.gen_build_name(phase)
if Conf.is_test_phase(phase):
# TODO When merging the caffe2 and pytorch jobs, it might be convenient for a while to make a
# caffe2 test job dependent on a pytorch build job. This way we could quickly dedup the repeated
# build of pytorch in the caffe2 build job, and just run the caffe2 tests off of a completed
# pytorch build job (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259452641)
dependency_build = self.parent_build or self
job_def["requires"] = [dependency_build.gen_build_name("build")]
job_name = "pytorch_linux_test"
else:
job_name = "pytorch_linux_build"
job_def["requires"] = [self.gen_docker_image_requires()]
if not self.is_important:
job_def["filters"] = gen_filter_dict()
job_def.update(self.gen_workflow_params(phase))
return {job_name: job_def}
# TODO This is a hack to special case some configs just for the workflow list
class HiddenConf(object):
def __init__(self, name, parent_build=None, filters=None):
self.name = name
self.parent_build = parent_build
self.filters = filters
def gen_workflow_job(self, phase):
return {
self.gen_build_name(phase): {
"requires": [self.parent_build.gen_build_name("build")],
"filters": self.filters,
}
}
def gen_build_name(self, _):
return self.name
class DocPushConf(object):
def __init__(self, name, parent_build=None, branch="master"):
self.name = name
self.parent_build = parent_build
self.branch = branch
def gen_workflow_job(self, phase):
return {
"pytorch_doc_push": {
"name": self.name,
"branch": self.branch,
"requires": [self.parent_build],
"context": "org-member",
"filters": gen_filter_dict(branches_list=["nightly"],
tags_list=RC_PATTERN)
}
}
def gen_docs_configs(xenial_parent_config):
configs = []
configs.append(
HiddenConf(
"pytorch_python_doc_build",
parent_build=xenial_parent_config,
filters=gen_filter_dict(branches_list=["master", "main", "nightly"],
tags_list=RC_PATTERN),
)
)
configs.append(
DocPushConf(
"pytorch_python_doc_push",
parent_build="pytorch_python_doc_build",
branch="site",
)
)
configs.append(
HiddenConf(
"pytorch_cpp_doc_build",
parent_build=xenial_parent_config,
filters=gen_filter_dict(branches_list=["master", "main", "nightly"],
tags_list=RC_PATTERN),
)
)
configs.append(
DocPushConf(
"pytorch_cpp_doc_push",
parent_build="pytorch_cpp_doc_build",
branch="master",
)
)
return configs
def get_root():
return TopLevelNode("PyTorch Builds", CONFIG_TREE_DATA)
def gen_tree():
root = get_root()
configs_list = conf_tree.dfs(root)
return configs_list
def instantiate_configs(only_slow_gradcheck):
config_list = []
root = get_root()
found_configs = conf_tree.dfs(root)
for fc in found_configs:
restrict_phases = None
distro_name = fc.find_prop("distro_name")
compiler_name = fc.find_prop("compiler_name")
compiler_version = fc.find_prop("compiler_version")
is_xla = fc.find_prop("is_xla") or False
is_asan = fc.find_prop("is_asan") or False
is_crossref = fc.find_prop("is_crossref") or False
is_dynamo = fc.find_prop("is_dynamo") or False
is_onnx = fc.find_prop("is_onnx") or False
is_pure_torch = fc.find_prop("is_pure_torch") or False
is_vulkan = fc.find_prop("is_vulkan") or False
is_slow_gradcheck = fc.find_prop("is_slow_gradcheck") or False
parms_list_ignored_for_docker_image = []
if only_slow_gradcheck ^ is_slow_gradcheck:
continue
python_version = None
if compiler_name == "cuda" or compiler_name == "android":
python_version = fc.find_prop("pyver")
parms_list = [fc.find_prop("abbreviated_pyver")]
else:
parms_list = ["py" + fc.find_prop("pyver")]
cuda_version = None
rocm_version = None
if compiler_name == "cuda":
cuda_version = fc.find_prop("compiler_version")
elif compiler_name == "rocm":
rocm_version = fc.find_prop("compiler_version")
restrict_phases = ["build", "test1", "test2", "caffe2_test"]
elif compiler_name == "android":
android_ndk_version = fc.find_prop("compiler_version")
# TODO: do we need clang to compile host binaries like protoc?
parms_list.append("clang5")
parms_list.append("android-ndk-" + android_ndk_version)
android_abi = fc.find_prop("android_abi")
parms_list_ignored_for_docker_image.append(android_abi)
restrict_phases = ["build"]
elif compiler_name:
gcc_version = compiler_name + (fc.find_prop("compiler_version") or "")
parms_list.append(gcc_version)
if is_asan:
parms_list.append("asan")
python_version = fc.find_prop("pyver")
parms_list[0] = fc.find_prop("abbreviated_pyver")
if is_crossref:
parms_list_ignored_for_docker_image.append("crossref")
if is_dynamo:
parms_list_ignored_for_docker_image.append("dynamo")
if is_onnx:
parms_list.append("onnx")
python_version = fc.find_prop("pyver")
parms_list[0] = fc.find_prop("abbreviated_pyver")
restrict_phases = ["build", "ort_test1", "ort_test2"]
if cuda_version:
cuda_gcc_version = fc.find_prop("cuda_gcc_override") or "gcc7"
parms_list.append(cuda_gcc_version)
is_libtorch = fc.find_prop("is_libtorch") or False
is_important = fc.find_prop("is_important") or False
parallel_backend = fc.find_prop("parallel_backend") or None
build_only = fc.find_prop("build_only") or False
shard_test = fc.find_prop("shard_test") or False
# TODO: fix pure_torch python test packaging issue.
if shard_test:
restrict_phases = ["build"] if restrict_phases is None else restrict_phases
restrict_phases.extend(["test1", "test2"])
if build_only or is_pure_torch:
restrict_phases = ["build"]
if is_slow_gradcheck:
parms_list_ignored_for_docker_image.append("old")
parms_list_ignored_for_docker_image.append("gradcheck")
gpu_resource = None
if cuda_version and cuda_version != "10":
gpu_resource = "medium"
c = Conf(
distro_name,
parms_list,
parms_list_ignored_for_docker_image,
python_version,
cuda_version,
rocm_version,
is_xla,
is_vulkan,
is_pure_torch,
restrict_phases,
gpu_resource,
is_libtorch=is_libtorch,
is_important=is_important,
parallel_backend=parallel_backend,
build_only=build_only,
)
# run docs builds on "pytorch-linux-xenial-py3.7-gcc5.4". Docs builds
# should run on a CPU-only build that runs on all PRs.
# XXX should this be updated to a more modern build?
if (
distro_name == "xenial"
and fc.find_prop("pyver") == "3.7"
and cuda_version is None
and parallel_backend is None
and not is_vulkan
and not is_pure_torch
and compiler_name == "gcc"
and fc.find_prop("compiler_version") == "5.4"
):
c.filters = gen_filter_dict(branches_list=r"/.*/",
tags_list=RC_PATTERN)
c.dependent_tests = gen_docs_configs(c)
config_list.append(c)
return config_list
def get_workflow_jobs(only_slow_gradcheck=False):
config_list = instantiate_configs(only_slow_gradcheck)
x = []
for conf_options in config_list:
phases = conf_options.restrict_phases or dimensions.PHASES
for phase in phases:
# TODO why does this not have a test?
if Conf.is_test_phase(phase) and conf_options.cuda_version == "10":
continue
x.append(conf_options.gen_workflow_job(phase))
# TODO convert to recursion
for conf in conf_options.get_dependents():
x.append(conf.gen_workflow_job("test"))
return x
| pytorch-master | .circleci/cimodel/data/pytorch_build_definitions.py |
"""
This module models the tree of configuration variants
for "smoketest" builds.
Each subclass of ConfigNode represents a layer of the configuration hierarchy.
These tree nodes encapsulate the logic for whether a branch of the hierarchy
should be "pruned".
"""
from collections import OrderedDict
from cimodel.lib.conf_tree import ConfigNode
import cimodel.data.dimensions as dimensions
LINKING_DIMENSIONS = [
"shared",
"static",
]
DEPS_INCLUSION_DIMENSIONS = [
"with-deps",
"without-deps",
]
def get_processor_arch_name(gpu_version):
return "cpu" if not gpu_version else (
"cu" + gpu_version.strip("cuda") if gpu_version.startswith("cuda") else gpu_version
)
CONFIG_TREE_DATA = OrderedDict(
)
# GCC config variants:
#
# All the nightlies (except libtorch with new gcc ABI) are built with devtoolset7,
# which can only build with old gcc ABI. It is better than devtoolset3
# because it understands avx512, which is needed for good fbgemm performance.
#
# Libtorch with new gcc ABI is built with gcc 5.4 on Ubuntu 16.04.
LINUX_GCC_CONFIG_VARIANTS = OrderedDict(
manywheel=['devtoolset7'],
conda=['devtoolset7'],
libtorch=[
"devtoolset7",
"gcc5.4_cxx11-abi",
],
)
WINDOWS_LIBTORCH_CONFIG_VARIANTS = [
"debug",
"release",
]
class TopLevelNode(ConfigNode):
def __init__(self, node_name, config_tree_data, smoke):
super(TopLevelNode, self).__init__(None, node_name)
self.config_tree_data = config_tree_data
self.props["smoke"] = smoke
def get_children(self):
return [OSConfigNode(self, x, c, p) for (x, (c, p)) in self.config_tree_data.items()]
class OSConfigNode(ConfigNode):
def __init__(self, parent, os_name, gpu_versions, py_tree):
super(OSConfigNode, self).__init__(parent, os_name)
self.py_tree = py_tree
self.props["os_name"] = os_name
self.props["gpu_versions"] = gpu_versions
def get_children(self):
return [PackageFormatConfigNode(self, k, v) for k, v in self.py_tree.items()]
class PackageFormatConfigNode(ConfigNode):
def __init__(self, parent, package_format, python_versions):
super(PackageFormatConfigNode, self).__init__(parent, package_format)
self.props["python_versions"] = python_versions
self.props["package_format"] = package_format
def get_children(self):
if self.find_prop("os_name") == "linux":
return [LinuxGccConfigNode(self, v) for v in LINUX_GCC_CONFIG_VARIANTS[self.find_prop("package_format")]]
elif self.find_prop("os_name") == "windows" and self.find_prop("package_format") == "libtorch":
return [WindowsLibtorchConfigNode(self, v) for v in WINDOWS_LIBTORCH_CONFIG_VARIANTS]
else:
return [ArchConfigNode(self, v) for v in self.find_prop("gpu_versions")]
class LinuxGccConfigNode(ConfigNode):
def __init__(self, parent, gcc_config_variant):
super(LinuxGccConfigNode, self).__init__(parent, "GCC_CONFIG_VARIANT=" + str(gcc_config_variant))
self.props["gcc_config_variant"] = gcc_config_variant
def get_children(self):
gpu_versions = self.find_prop("gpu_versions")
# XXX devtoolset7 on CUDA 9.0 is temporarily disabled
# see https://github.com/pytorch/pytorch/issues/20066
if self.find_prop("gcc_config_variant") == 'devtoolset7':
gpu_versions = filter(lambda x: x != "cuda_90", gpu_versions)
# XXX disabling conda rocm build since docker images are not there
if self.find_prop("package_format") == 'conda':
gpu_versions = filter(lambda x: x not in dimensions.ROCM_VERSION_LABELS, gpu_versions)
# XXX libtorch rocm build is temporarily disabled
if self.find_prop("package_format") == 'libtorch':
gpu_versions = filter(lambda x: x not in dimensions.ROCM_VERSION_LABELS, gpu_versions)
return [ArchConfigNode(self, v) for v in gpu_versions]
class WindowsLibtorchConfigNode(ConfigNode):
def __init__(self, parent, libtorch_config_variant):
super(WindowsLibtorchConfigNode, self).__init__(parent, "LIBTORCH_CONFIG_VARIANT=" + str(libtorch_config_variant))
self.props["libtorch_config_variant"] = libtorch_config_variant
def get_children(self):
return [ArchConfigNode(self, v) for v in self.find_prop("gpu_versions")]
class ArchConfigNode(ConfigNode):
def __init__(self, parent, gpu):
super(ArchConfigNode, self).__init__(parent, get_processor_arch_name(gpu))
self.props["gpu"] = gpu
def get_children(self):
return [PyVersionConfigNode(self, v) for v in self.find_prop("python_versions")]
class PyVersionConfigNode(ConfigNode):
def __init__(self, parent, pyver):
super(PyVersionConfigNode, self).__init__(parent, pyver)
self.props["pyver"] = pyver
def get_children(self):
package_format = self.find_prop("package_format")
os_name = self.find_prop("os_name")
has_libtorch_variants = package_format == "libtorch" and os_name == "linux"
linking_variants = LINKING_DIMENSIONS if has_libtorch_variants else []
return [LinkingVariantConfigNode(self, v) for v in linking_variants]
class LinkingVariantConfigNode(ConfigNode):
def __init__(self, parent, linking_variant):
super(LinkingVariantConfigNode, self).__init__(parent, linking_variant)
def get_children(self):
return [DependencyInclusionConfigNode(self, v) for v in DEPS_INCLUSION_DIMENSIONS]
class DependencyInclusionConfigNode(ConfigNode):
def __init__(self, parent, deps_variant):
super(DependencyInclusionConfigNode, self).__init__(parent, deps_variant)
self.props["libtorch_variant"] = "-".join([self.parent.get_label(), self.get_label()])
| pytorch-master | .circleci/cimodel/data/binary_build_data.py |
pytorch-master | .circleci/cimodel/data/simple/__init__.py |
|
from cimodel.data.simple.util.versions import MultiPartVersion
import cimodel.lib.miniutils as miniutils
XCODE_VERSION = MultiPartVersion([12, 5, 1])
class ArchVariant:
def __init__(self, name, custom_build_name=""):
self.name = name
self.custom_build_name = custom_build_name
def render(self):
extra_parts = [self.custom_build_name] if len(self.custom_build_name) > 0 else []
return "_".join([self.name] + extra_parts)
def get_platform(arch_variant_name):
return "SIMULATOR" if arch_variant_name == "x86_64" else "OS"
class IOSJob:
def __init__(self, xcode_version, arch_variant, is_org_member_context=True, extra_props=None):
self.xcode_version = xcode_version
self.arch_variant = arch_variant
self.is_org_member_context = is_org_member_context
self.extra_props = extra_props
def gen_name_parts(self, with_version_dots):
version_parts = self.xcode_version.render_dots_or_parts(with_version_dots)
build_variant_suffix = "_".join([self.arch_variant.render(), "build"])
return [
"pytorch",
"ios",
] + version_parts + [
build_variant_suffix,
]
def gen_job_name(self):
return "_".join(self.gen_name_parts(False))
def gen_tree(self):
platform_name = get_platform(self.arch_variant.name)
props_dict = {
"build_environment": "-".join(self.gen_name_parts(True)),
"ios_arch": self.arch_variant.name,
"ios_platform": platform_name,
"name": self.gen_job_name(),
}
if self.is_org_member_context:
props_dict["context"] = "org-member"
if self.extra_props:
props_dict.update(self.extra_props)
return [{"pytorch_ios_build": props_dict}]
WORKFLOW_DATA = [
IOSJob(XCODE_VERSION, ArchVariant("x86_64"), is_org_member_context=False, extra_props={
"lite_interpreter": miniutils.quote(str(int(True)))}),
IOSJob(XCODE_VERSION, ArchVariant("x86_64", "full_jit"), is_org_member_context=False, extra_props={
"lite_interpreter": miniutils.quote(str(int(False)))}),
IOSJob(XCODE_VERSION, ArchVariant("arm64"), extra_props={
"lite_interpreter": miniutils.quote(str(int(True)))}),
IOSJob(XCODE_VERSION, ArchVariant("arm64", "metal"), extra_props={
"use_metal": miniutils.quote(str(int(True))),
"lite_interpreter": miniutils.quote(str(int(True)))}),
IOSJob(XCODE_VERSION, ArchVariant("arm64", "full_jit"), extra_props={
"lite_interpreter": miniutils.quote(str(int(False)))}),
IOSJob(XCODE_VERSION, ArchVariant("arm64", "custom"), extra_props={
"op_list": "mobilenetv2.yaml",
"lite_interpreter": miniutils.quote(str(int(True)))}),
IOSJob(XCODE_VERSION, ArchVariant("x86_64", "coreml"), is_org_member_context=False, extra_props={
"use_coreml": miniutils.quote(str(int(True))),
"lite_interpreter": miniutils.quote(str(int(True)))}),
IOSJob(XCODE_VERSION, ArchVariant("arm64", "coreml"), extra_props={
"use_coreml": miniutils.quote(str(int(True))),
"lite_interpreter": miniutils.quote(str(int(True)))}),
]
def get_workflow_jobs():
return [item.gen_tree() for item in WORKFLOW_DATA]
| pytorch-master | .circleci/cimodel/data/simple/ios_definitions.py |
class MacOsJob:
def __init__(self, os_version, is_build=False, is_test=False, extra_props=tuple()):
# extra_props is tuple type, because mutable data structures for argument defaults
# is not recommended.
self.os_version = os_version
self.is_build = is_build
self.is_test = is_test
self.extra_props = dict(extra_props)
def gen_tree(self):
non_phase_parts = ["pytorch", "macos", self.os_version, "py3"]
extra_name_list = [name for name, exist in self.extra_props.items() if exist]
full_job_name_list = non_phase_parts + extra_name_list + [
'build' if self.is_build else None,
'test' if self.is_test else None,
]
full_job_name = "_".join(list(filter(None, full_job_name_list)))
test_build_dependency = "_".join(non_phase_parts + ["build"])
extra_dependencies = [test_build_dependency] if self.is_test else []
job_dependencies = extra_dependencies
# Yes we name the job after itself, it needs a non-empty value in here
# for the YAML output to work.
props_dict = {"requires": job_dependencies, "name": full_job_name}
return [{full_job_name: props_dict}]
WORKFLOW_DATA = [
MacOsJob("10_15", is_build=True),
MacOsJob("10_13", is_build=True),
MacOsJob(
"10_13",
is_build=False,
is_test=True,
),
MacOsJob(
"10_13",
is_build=True,
is_test=True,
extra_props=tuple({
"lite_interpreter": True
}.items()),
)
]
def get_workflow_jobs():
return [item.gen_tree() for item in WORKFLOW_DATA]
| pytorch-master | .circleci/cimodel/data/simple/macos_definitions.py |
from collections import OrderedDict
from cimodel.data.simple.util.branch_filters import gen_filter_dict
from cimodel.lib.miniutils import quote
CHANNELS_TO_PRUNE = ["pytorch-nightly", "pytorch-test"]
PACKAGES_TO_PRUNE = "pytorch torchvision torchaudio torchtext ignite torchcsprng"
def gen_workflow_job(channel: str):
return OrderedDict(
{
"anaconda_prune": OrderedDict(
{
"name": f"anaconda-prune-{channel}",
"context": quote("org-member"),
"packages": quote(PACKAGES_TO_PRUNE),
"channel": channel,
"filters": gen_filter_dict(branches_list=["postnightly"]),
}
)
}
)
def get_workflow_jobs():
return [gen_workflow_job(channel) for channel in CHANNELS_TO_PRUNE]
| pytorch-master | .circleci/cimodel/data/simple/anaconda_prune_defintions.py |
"""
PyTorch Mobile PR builds (use linux host toolchain + mobile build options)
"""
import cimodel.lib.miniutils as miniutils
import cimodel.data.simple.util.branch_filters
class MobileJob:
def __init__(
self,
docker_image,
docker_requires,
variant_parts,
is_master_only=False):
self.docker_image = docker_image
self.docker_requires = docker_requires
self.variant_parts = variant_parts
self.is_master_only = is_master_only
def gen_tree(self):
non_phase_parts = [
"pytorch",
"linux",
"xenial",
"py3",
"clang5",
"mobile",
] + self.variant_parts
full_job_name = "_".join(non_phase_parts)
build_env_name = "-".join(non_phase_parts)
props_dict = {
"build_environment": build_env_name,
"build_only": miniutils.quote(str(int(True))),
"docker_image": self.docker_image,
"requires": self.docker_requires,
"name": full_job_name,
}
if self.is_master_only:
props_dict["filters"] = cimodel.data.simple.util.branch_filters.gen_filter_dict()
return [{"pytorch_linux_build": props_dict}]
WORKFLOW_DATA = [
]
def get_workflow_jobs():
return [item.gen_tree() for item in WORKFLOW_DATA]
| pytorch-master | .circleci/cimodel/data/simple/mobile_definitions.py |
import cimodel.data.simple.ios_definitions as ios_definitions
import cimodel.lib.miniutils as miniutils
class IOSNightlyJob:
def __init__(self,
variant,
is_full_jit=False,
is_upload=False):
self.variant = variant
self.is_full_jit = is_full_jit
self.is_upload = is_upload
def get_phase_name(self):
return "upload" if self.is_upload else "build"
def get_common_name_pieces(self, with_version_dots):
extra_name_suffix = [self.get_phase_name()] if self.is_upload else []
extra_name = ["full_jit"] if self.is_full_jit else []
common_name_pieces = [
"ios",
] + extra_name + [
] + ios_definitions.XCODE_VERSION.render_dots_or_parts(with_version_dots) + [
"nightly",
self.variant,
"build",
] + extra_name_suffix
return common_name_pieces
def gen_job_name(self):
return "_".join(["pytorch"] + self.get_common_name_pieces(False))
def gen_tree(self):
build_configs = BUILD_CONFIGS_FULL_JIT if self.is_full_jit else BUILD_CONFIGS
extra_requires = [x.gen_job_name() for x in build_configs] if self.is_upload else []
props_dict = {
"build_environment": "-".join(["libtorch"] + self.get_common_name_pieces(True)),
"requires": extra_requires,
"context": "org-member",
"filters": {"branches": {"only": "nightly"}},
}
if not self.is_upload:
props_dict["ios_arch"] = self.variant
props_dict["ios_platform"] = ios_definitions.get_platform(self.variant)
props_dict["name"] = self.gen_job_name()
props_dict["use_metal"] = miniutils.quote(str(int(True)))
props_dict["use_coreml"] = miniutils.quote(str(int(True)))
if self.is_full_jit:
props_dict["lite_interpreter"] = miniutils.quote(str(int(False)))
template_name = "_".join([
"binary",
"ios",
self.get_phase_name(),
])
return [{template_name: props_dict}]
BUILD_CONFIGS = [
IOSNightlyJob("x86_64"),
IOSNightlyJob("arm64"),
]
BUILD_CONFIGS_FULL_JIT = [
IOSNightlyJob("x86_64", is_full_jit=True),
IOSNightlyJob("arm64", is_full_jit=True),
]
WORKFLOW_DATA = BUILD_CONFIGS + BUILD_CONFIGS_FULL_JIT + [
IOSNightlyJob("binary", is_full_jit=False, is_upload=True),
IOSNightlyJob("binary", is_full_jit=True, is_upload=True),
]
def get_workflow_jobs():
return [item.gen_tree() for item in WORKFLOW_DATA]
| pytorch-master | .circleci/cimodel/data/simple/nightly_ios.py |
from collections import OrderedDict
from cimodel.lib.miniutils import quote
from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN
# NOTE: All hardcoded docker image builds have been migrated to GHA
IMAGE_NAMES = [
]
# This entry should be an element from the list above
# This should contain the image matching the "slow_gradcheck" entry in
# pytorch_build_data.py
SLOW_GRADCHECK_IMAGE_NAME = "pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
def get_workflow_jobs(images=IMAGE_NAMES, only_slow_gradcheck=False):
"""Generates a list of docker image build definitions"""
ret = []
for image_name in images:
if image_name.startswith('docker-'):
image_name = image_name.lstrip('docker-')
if only_slow_gradcheck and image_name is not SLOW_GRADCHECK_IMAGE_NAME:
continue
parameters = OrderedDict({
"name": quote(f"docker-{image_name}"),
"image_name": quote(image_name),
})
if image_name == "pytorch-linux-xenial-py3.7-gcc5.4":
# pushing documentation on tags requires CircleCI to also
# build all the dependencies on tags, including this docker image
parameters['filters'] = gen_filter_dict(branches_list=r"/.*/",
tags_list=RC_PATTERN)
ret.append(OrderedDict(
{
"docker_build_job": parameters
}
))
return ret
| pytorch-master | .circleci/cimodel/data/simple/docker_definitions.py |
AWS_DOCKER_HOST = "308535385114.dkr.ecr.us-east-1.amazonaws.com"
def gen_docker_image(container_type):
return (
"/".join([AWS_DOCKER_HOST, "pytorch", container_type]),
f"docker-{container_type}",
)
def gen_docker_image_requires(image_name):
return [f"docker-{image_name}"]
DOCKER_IMAGE_BASIC, DOCKER_REQUIREMENT_BASE = gen_docker_image(
"pytorch-linux-xenial-py3.7-gcc5.4"
)
DOCKER_IMAGE_CUDA_10_2, DOCKER_REQUIREMENT_CUDA_10_2 = gen_docker_image(
"pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
)
DOCKER_IMAGE_GCC7, DOCKER_REQUIREMENT_GCC7 = gen_docker_image(
"pytorch-linux-xenial-py3.7-gcc7"
)
def gen_mobile_docker(specifier):
container_type = "pytorch-linux-xenial-py3-clang5-" + specifier
return gen_docker_image(container_type)
DOCKER_IMAGE_ASAN, DOCKER_REQUIREMENT_ASAN = gen_mobile_docker("asan")
DOCKER_IMAGE_NDK, DOCKER_REQUIREMENT_NDK = gen_mobile_docker("android-ndk-r19c")
| pytorch-master | .circleci/cimodel/data/simple/util/docker_constants.py |
pytorch-master | .circleci/cimodel/data/simple/util/__init__.py |
|
NON_PR_BRANCH_LIST = [
"main",
"master",
r"/ci-all\/.*/",
r"/release\/.*/",
]
PR_BRANCH_LIST = [
r"/gh\/.*\/head/",
r"/pull\/.*/",
]
RC_PATTERN = r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/"
def gen_filter_dict(
branches_list=NON_PR_BRANCH_LIST,
tags_list=None
):
"""Generates a filter dictionary for use with CircleCI's job filter"""
filter_dict = {
"branches": {
"only": branches_list,
},
}
if tags_list is not None:
filter_dict["tags"] = {"only": tags_list}
return filter_dict
| pytorch-master | .circleci/cimodel/data/simple/util/branch_filters.py |
class MultiPartVersion:
def __init__(self, parts, prefix=""):
self.parts = parts
self.prefix = prefix
def prefixed_parts(self):
"""
Prepends the first element of the version list
with the prefix string.
"""
if self.parts:
return [self.prefix + str(self.parts[0])] + [str(part) for part in self.parts[1:]]
else:
return [self.prefix]
def render_dots(self):
return ".".join(self.prefixed_parts())
def render_dots_or_parts(self, with_dots):
if with_dots:
return [self.render_dots()]
else:
return self.prefixed_parts()
class CudaVersion(MultiPartVersion):
def __init__(self, major, minor):
self.major = major
self.minor = minor
super().__init__([self.major, self.minor], "cuda")
def __str__(self):
return f"{self.major}.{self.minor}"
| pytorch-master | .circleci/cimodel/data/simple/util/versions.py |
# Documentation: https://docs.microsoft.com/en-us/rest/api/azure/devops/build/?view=azure-devops-rest-6.0
import re
import json
import os
import sys
import requests
import time
AZURE_PIPELINE_BASE_URL = "https://aiinfra.visualstudio.com/PyTorch/"
AZURE_DEVOPS_PAT_BASE64 = os.environ.get("AZURE_DEVOPS_PAT_BASE64_SECRET", "")
PIPELINE_ID = "911"
PROJECT_ID = "0628bce4-2d33-499e-bac5-530e12db160f"
TARGET_BRANCH = os.environ.get("CIRCLE_BRANCH", "main")
TARGET_COMMIT = os.environ.get("CIRCLE_SHA1", "")
build_base_url = AZURE_PIPELINE_BASE_URL + "_apis/build/builds?api-version=6.0"
s = requests.Session()
s.headers.update({"Authorization": "Basic " + AZURE_DEVOPS_PAT_BASE64})
def submit_build(pipeline_id, project_id, source_branch, source_version):
print("Submitting build for branch: " + source_branch)
print("Commit SHA1: ", source_version)
run_build_raw = s.post(build_base_url, json={
"definition": {"id": pipeline_id},
"project": {"id": project_id},
"sourceBranch": source_branch,
"sourceVersion": source_version
})
try:
run_build_json = run_build_raw.json()
except json.decoder.JSONDecodeError as e:
print(e)
print("Failed to parse the response. Check if the Azure DevOps PAT is incorrect or expired.")
sys.exit(-1)
build_id = run_build_json['id']
print("Submitted bulid: " + str(build_id))
print("Bulid URL: " + run_build_json['url'])
return build_id
def get_build(_id):
get_build_url = AZURE_PIPELINE_BASE_URL + f"/_apis/build/builds/{_id}?api-version=6.0"
get_build_raw = s.get(get_build_url)
return get_build_raw.json()
def get_build_logs(_id):
get_build_logs_url = AZURE_PIPELINE_BASE_URL + f"/_apis/build/builds/{_id}/logs?api-version=6.0"
get_build_logs_raw = s.get(get_build_logs_url)
return get_build_logs_raw.json()
def get_log_content(url):
resp = s.get(url)
return resp.text
def wait_for_build(_id):
build_detail = get_build(_id)
build_status = build_detail['status']
while build_status == 'notStarted':
print('Waiting for run to start: ' + str(_id))
sys.stdout.flush()
try:
build_detail = get_build(_id)
build_status = build_detail['status']
except Exception as e:
print("Error getting build")
print(e)
time.sleep(30)
print("Bulid started: ", str(_id))
handled_logs = set()
while build_status == 'inProgress':
try:
print("Waiting for log: " + str(_id))
logs = get_build_logs(_id)
except Exception as e:
print("Error fetching logs")
print(e)
time.sleep(30)
continue
for log in logs['value']:
log_id = log['id']
if log_id in handled_logs:
continue
handled_logs.add(log_id)
print('Fetching log: \n' + log['url'])
try:
log_content = get_log_content(log['url'])
print(log_content)
except Exception as e:
print("Error getting log content")
print(e)
sys.stdout.flush()
build_detail = get_build(_id)
build_status = build_detail['status']
time.sleep(30)
build_result = build_detail['result']
print("Bulid status: " + build_status)
print("Bulid result: " + build_result)
return build_status, build_result
if __name__ == '__main__':
# Convert the branch name for Azure DevOps
match = re.search(r'pull/(\d+)', TARGET_BRANCH)
if match is not None:
pr_num = match.group(1)
SOURCE_BRANCH = f'refs/pull/{pr_num}/head'
else:
SOURCE_BRANCH = f'refs/heads/{TARGET_BRANCH}'
MAX_RETRY = 2
retry = MAX_RETRY
while retry > 0:
build_id = submit_build(PIPELINE_ID, PROJECT_ID, SOURCE_BRANCH, TARGET_COMMIT)
build_status, build_result = wait_for_build(build_id)
if build_result != 'succeeded':
retry = retry - 1
if retry > 0:
print("Retrying... remaining attempt: " + str(retry))
# Wait a bit before retrying
time.sleep((MAX_RETRY - retry) * 120)
continue
else:
print("No more chance to retry. Giving up.")
sys.exit(-1)
else:
break
| pytorch-master | .circleci/scripts/trigger_azure_pipeline.py |
#!/usr/bin/env python3
import os
import sys
import yaml
# Need to import modules that lie on an upward-relative path
sys.path.append(os.path.join(sys.path[0], '..'))
import cimodel.lib.miniyaml as miniyaml
def regurgitate(depth, use_pyyaml_formatter=False):
data = yaml.safe_load(sys.stdin)
if use_pyyaml_formatter:
output = yaml.dump(data, sort_keys=True)
sys.stdout.write(output)
else:
miniyaml.render(sys.stdout, data, depth)
if __name__ == "__main__":
regurgitate(3)
| pytorch-master | .circleci/codegen_validation/normalize_yaml_fragment.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import distutils.command.clean
import shutil
import glob
import os
import subprocess
from setuptools import setup, find_packages
from torch.utils.cpp_extension import (
CppExtension,
BuildExtension,
)
cwd = os.path.dirname(os.path.abspath(__file__))
version_txt = os.path.join(cwd, 'version.txt')
with open(version_txt, 'r') as f:
version = f.readline().strip()
try:
sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()
except Exception:
sha = 'Unknown'
package_name = 'functorch'
if os.getenv('BUILD_VERSION'):
version = os.getenv('BUILD_VERSION')
elif sha != 'Unknown':
version += '+' + sha[:7]
def write_version_file():
version_path = os.path.join(cwd, 'functorch', 'version.py')
with open(version_path, 'w') as f:
f.write("__version__ = '{}'\n".format(version))
f.write("git_version = {}\n".format(repr(sha)))
# pytorch_dep = 'torch'
# if os.getenv('PYTORCH_VERSION'):
# pytorch_dep += "==" + os.getenv('PYTORCH_VERSION')
requirements = [
# This represents a nightly version of PyTorch.
# It can be installed as a binary or from source.
"torch>=1.13.0.dev",
]
extras = {}
extras["aot"] = ["networkx", ]
class clean(distutils.command.clean.clean):
def run(self):
with open(".gitignore", "r") as f:
ignores = f.read()
for wildcard in filter(None, ignores.split("\n")):
for filename in glob.glob(wildcard):
try:
os.remove(filename)
except OSError:
shutil.rmtree(filename, ignore_errors=True)
# It's an old-style class in Python 2.7...
distutils.command.clean.clean.run(self)
def get_extensions():
extension = CppExtension
# See functorch/csrc/Macros.h
define_macros = [('FUNCTORCH_BUILD_MAIN_LIB', None)]
extra_link_args = []
extra_compile_args = {"cxx": [
"-O3",
"-std=c++14",
"-fdiagnostics-color=always",
]}
debug_mode = os.getenv('DEBUG', '0') == '1'
if debug_mode:
print("Compiling in debug mode")
extra_compile_args = {
"cxx": [
"-O0",
"-fno-inline",
"-g",
"-std=c++14",
"-fdiagnostics-color=always",
]}
extra_link_args = ["-O0", "-g"]
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "functorch", "csrc")
extension_sources = set(
os.path.join(extensions_dir, p)
for p in glob.glob(os.path.join(extensions_dir, "*.cpp"))
)
sources = list(extension_sources)
sources.append(os.path.join(extensions_dir, "dim", "dim.cpp"))
ext_modules = [
extension(
"functorch._C",
sources,
include_dirs=[this_dir],
define_macros=define_macros,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
)
]
return ext_modules
class BuildExtension_(BuildExtension):
def build_extensions(self, *args, **kwargs):
# It turns out for windows this isn't populated?
if hasattr(self.compiler, 'compiler_so'):
if '-Wstrict-prototypes' in self.compiler.compiler_so:
self.compiler.compiler_so.remove('-Wstrict-prototypes')
super().build_extensions(*args, **kwargs)
if __name__ == '__main__':
print("Building wheel {}-{}".format(package_name, version))
write_version_file()
setup(
# Metadata
name=package_name,
version=version,
author='PyTorch Core Team',
url="https://github.com/pytorch/functorch",
description='JAX-like composable function transforms for PyTorch',
license='BSD',
# Package info
packages=find_packages(),
install_requires=requirements,
extras_require=extras,
ext_modules=get_extensions(),
cmdclass={
"build_ext": BuildExtension_.with_options(no_python_abi_suffix=True),
'clean': clean,
})
| pytorch-master | functorch/setup.py |
import yaml
import csv
import torch
from collections import defaultdict
def get_ops_for_key(key):
# Needs modified PyTorch C++ code to work
if key is None:
ops = torch._C._dispatch_get_registrations_for_dispatch_key()
else:
ops = torch._C._dispatch_get_registrations_for_dispatch_key(key)
cleaned_ops = []
for i in ops:
if 'aten::' not in i:
continue
cleaned_ops.append(i[6:].strip())
return set(cleaned_ops)
def gen_data(special_op_lists, analysis_name):
all_ops = get_ops_for_key(None)
composite_ops = get_ops_for_key('CompositeImplicitAutograd')
noncomposite_ops = all_ops - composite_ops
ops = yaml.load(open('../../aten/src/ATen/native/native_functions.yaml', 'r').read(), Loader=yaml.CLoader)
annotated_ops = {a.strip(): b.strip() for a, b in list(csv.reader(open('annotated_ops')))}
from collections import defaultdict
uniq_ops = []
uniq_names = set()
overload_types = defaultdict(list)
cnt = 0
for op in ops:
func_str = op['func']
name = func_str[:func_str.index('(')]
if '.' in name:
uniq_name = name[:name.index('.')]
overload_types[name[name.index('.') + 1:]].append(name)
else:
uniq_name = name
op['name'] = uniq_name
full_name = func_str[:func_str.index('(')]
op['full_name'] = full_name
ret_type = func_str[func_str.index('->') + 3:]
op['ret_type'] = ret_type
cnt += 1
if uniq_name in uniq_names:
continue
uniq_names.add(uniq_name)
uniq_ops.append(op)
def annotate_ops(ops, is_unique):
categorization = defaultdict(int)
for op in ops:
if op['name'][-1] == '_':
categorization['inplace'] += 1
op['meta'] = 'inplace'
continue
if not is_unique and 'a!' in op['func'].lower():
categorization['out'] += 1
op['meta'] = 'out'
continue
if 'conv' in op['name']:
categorization['conv'] += 1
op['meta'] = 'conv'
continue
if 'pool' in op['name']:
categorization['pool'] += 1
op['meta'] = 'pool'
continue
if 'backward' in op['name']:
categorization['backward'] += 1
op['meta'] = 'backward'
continue
if op['name'][0] == '_' and op['name'][1] != '_':
categorization['private'] += 1
op['meta'] = 'private'
continue
if 'batch_norm' in op['name']:
categorization['batch_norm'] += 1
op['meta'] = 'batch_norm'
continue
if 'Tensor' not in op['func'] or 'Tensor' not in op['ret_type']:
categorization['non_tensor'] += 1
op['meta'] = 'non_tensor'
continue
if 'cudnn' in op['name'] or 'mkldnn' in op['name'] or 'miopen' in op['name'] or \
'native' in op['name'] or 'thnn' in op['name'] or 'slow' in op['name']:
categorization['backend'] += 1
op['meta'] = 'backend'
continue
if op['name'] in annotated_ops:
categorization['core'] += 1
op['meta'] = 'core ' + annotated_ops[op['name']]
continue
categorization['core'] += 1
op['meta'] = 'core unknown'
return categorization
annotate_ops(ops, is_unique=False)
with open(f"{analysis_name}", 'w') as f:
for op in ops:
info = [
op['full_name'], op['meta'], not (op['full_name'] in noncomposite_ops)
] + [check(op) for check in special_op_lists]
f.write(','.join([str(i) for i in info]) + '\n')
def name_check(lst):
return lambda x: x['name'] in lst
def full_name_check(lst):
return lambda x: x['full_name'] in lst
# Generates batching rule data
gen_data([full_name_check(get_ops_for_key('FuncTorchBatched'))], 'vmap.txt')
def remove_suffix(input_string, suffix):
if suffix and input_string.endswith(suffix):
return input_string[:-len(suffix)]
return input_string
def remove_prefix(input_string, prefix):
if prefix and input_string.startswith(prefix):
return input_string[len(prefix):]
return input_string
if True:
with open('run_ops.txt', 'r') as f:
opinfo_ops = [remove_suffix(i.strip(), '.default') for i in f.readlines()]
with open('count_ops.txt', 'r') as f:
opinfo_counts = [i.strip() for i in f.readlines()]
opinfo_counts = defaultdict(int, {k: v for k, v in zip(opinfo_ops, opinfo_counts)})
def count_fn(x):
return opinfo_counts[x['full_name']]
with open('run_decompositions.txt', 'r') as f:
decomposed_ops = [remove_suffix(i.strip(), '.default') for i in f.readlines()]
with open('public_api', 'r') as f:
ref_api = [i.strip() for i in f.readlines()]
def has_ref_impl(x):
name = x['name']
for prefix in ["linalg_", "special_"]:
name = remove_prefix(name, prefix)
prefixes = ['nn.functional', 'fft', 'special', 'linalg']
return any(f"{prefix}.{name}" in ref_api for prefix in prefixes) or name in ref_api
gen_data([full_name_check(opinfo_ops), full_name_check(decomposed_ops), count_fn, has_ref_impl], 'decompositions.txt')
| pytorch-master | functorch/op_analysis/gen_data.py |
import argparse
import concurrent.futures
import json
import logging
import os
import subprocess
import sys
import time
from enum import Enum
from typing import Any, List, NamedTuple, Optional, BinaryIO
IS_WINDOWS: bool = os.name == "nt"
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stderr, flush=True, **kwargs)
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
def _run_command(
args: List[str],
*,
stdin: BinaryIO,
timeout: int,
) -> "subprocess.CompletedProcess[bytes]":
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(
args,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=IS_WINDOWS, # So batch scripts are found.
timeout=timeout,
check=True,
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
def run_command(
args: List[str],
*,
stdin: BinaryIO,
retries: int,
timeout: int,
) -> "subprocess.CompletedProcess[bytes]":
remaining_retries = retries
while True:
try:
return _run_command(args, stdin=stdin, timeout=timeout)
except subprocess.TimeoutExpired as err:
if remaining_retries == 0:
raise err
remaining_retries -= 1
logging.warning(
"(%s/%s) Retrying because command failed with: %r",
retries - remaining_retries,
retries,
err,
)
time.sleep(1)
def check_file(
filename: str,
retries: int,
timeout: int,
) -> List[LintMessage]:
try:
with open(filename, "rb") as f:
original = f.read()
with open(filename, "rb") as f:
proc = run_command(
[sys.executable, "-mblack", "--stdin-filename", filename, "-"],
stdin=f,
retries=retries,
timeout=timeout,
)
except subprocess.TimeoutExpired:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="BLACK",
severity=LintSeverity.ERROR,
name="timeout",
original=None,
replacement=None,
description=(
"black timed out while trying to process a file. "
"Please report an issue in pytorch/pytorch with the "
"label 'module: lint'"
),
)
]
except (OSError, subprocess.CalledProcessError) as err:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="BLACK",
severity=LintSeverity.ADVICE,
name="command-failed",
original=None,
replacement=None,
description=(
f"Failed due to {err.__class__.__name__}:\n{err}"
if not isinstance(err, subprocess.CalledProcessError)
else (
"COMMAND (exit code {returncode})\n"
"{command}\n\n"
"STDERR\n{stderr}\n\n"
"STDOUT\n{stdout}"
).format(
returncode=err.returncode,
command=" ".join(as_posix(x) for x in err.cmd),
stderr=err.stderr.decode("utf-8").strip() or "(empty)",
stdout=err.stdout.decode("utf-8").strip() or "(empty)",
)
),
)
]
replacement = proc.stdout
if original == replacement:
return []
return [
LintMessage(
path=filename,
line=None,
char=None,
code="BLACK",
severity=LintSeverity.WARNING,
name="format",
original=original.decode("utf-8"),
replacement=replacement.decode("utf-8"),
description="Run `lintrunner -a` to apply this patch.",
)
]
def main() -> None:
parser = argparse.ArgumentParser(
description="Format files with black.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--retries",
default=3,
type=int,
help="times to retry timed out black",
)
parser.add_argument(
"--timeout",
default=90,
type=int,
help="seconds to wait for black",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
with concurrent.futures.ThreadPoolExecutor(
max_workers=os.cpu_count(),
thread_name_prefix="Thread",
) as executor:
futures = {
executor.submit(check_file, x, args.retries, args.timeout): x
for x in args.filenames
}
for future in concurrent.futures.as_completed(futures):
try:
for lint_message in future.result():
print(json.dumps(lint_message._asdict()), flush=True)
except Exception:
logging.critical('Failed at "%s".', futures[future])
raise
if __name__ == "__main__":
main()
| pytorch-master | functorch/tools/lint/black_linter.py |
"""
Initializer script that installs stuff to pip.
"""
import os
import argparse
import logging
import subprocess
import sys
import time
from typing import List
def run_command(args: List[str]) -> "subprocess.CompletedProcess[bytes]":
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(args, check=True)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="pip initializer")
parser.add_argument(
"packages",
nargs="+",
help="pip packages to install",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"--dry-run", help="do not install anything, just print what would be done."
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET if args.verbose else logging.DEBUG,
stream=sys.stderr,
)
for package in args.packages:
package_name, _, version = package.partition("=")
if version == "":
raise RuntimeError(
"Package {package_name} did not have a version specified. "
"Please specify a version to product a consistent linting experience."
)
pip_args = ["pip3", "install"]
# If we are in a global install, use `--user` to install so that you do not
# need root access in order to initialize linters.
#
# However, `pip install --user` interacts poorly with virtualenvs (see:
# https://bit.ly/3vD4kvl) and conda (see: https://bit.ly/3KG7ZfU). So in
# these cases perform a regular installation.
in_conda = os.environ.get("CONDA_PREFIX") is not None
in_virtualenv = os.environ.get("VIRTUAL_ENV") is not None
if not in_conda and not in_virtualenv:
pip_args.append("--user")
pip_args.extend(args.packages)
dry_run = args.dry_run == "1"
if dry_run:
print(f"Would have run: {pip_args}")
sys.exit(0)
run_command(pip_args)
| pytorch-master | functorch/tools/lint/pip_init.py |
import argparse
import json
import logging
import os
import re
import subprocess
import sys
import time
from enum import Enum
from typing import Any, Dict, List, NamedTuple, Optional, Set, Pattern
IS_WINDOWS: bool = os.name == "nt"
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stderr, flush=True, **kwargs)
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
# fmt: off
# https://www.flake8rules.com/
DOCUMENTED_IN_FLAKE8RULES: Set[str] = {
"E101", "E111", "E112", "E113", "E114", "E115", "E116", "E117",
"E121", "E122", "E123", "E124", "E125", "E126", "E127", "E128", "E129",
"E131", "E133",
"E201", "E202", "E203",
"E211",
"E221", "E222", "E223", "E224", "E225", "E226", "E227", "E228",
"E231",
"E241", "E242",
"E251",
"E261", "E262", "E265", "E266",
"E271", "E272", "E273", "E274", "E275",
"E301", "E302", "E303", "E304", "E305", "E306",
"E401", "E402",
"E501", "E502",
"E701", "E702", "E703", "E704",
"E711", "E712", "E713", "E714",
"E721", "E722",
"E731",
"E741", "E742", "E743",
"E901", "E902", "E999",
"W191",
"W291", "W292", "W293",
"W391",
"W503", "W504",
"W601", "W602", "W603", "W604", "W605",
"F401", "F402", "F403", "F404", "F405",
"F811", "F812",
"F821", "F822", "F823",
"F831",
"F841",
"F901",
"C901",
}
# https://pypi.org/project/flake8-comprehensions/#rules
DOCUMENTED_IN_FLAKE8COMPREHENSIONS: Set[str] = {
"C400", "C401", "C402", "C403", "C404", "C405", "C406", "C407", "C408", "C409",
"C410",
"C411", "C412", "C413", "C413", "C414", "C415", "C416",
}
# https://github.com/PyCQA/flake8-bugbear#list-of-warnings
DOCUMENTED_IN_BUGBEAR: Set[str] = {
"B001", "B002", "B003", "B004", "B005", "B006", "B007", "B008", "B009", "B010",
"B011", "B012", "B013", "B014", "B015",
"B301", "B302", "B303", "B304", "B305", "B306",
"B901", "B902", "B903", "B950",
}
# fmt: on
# stdin:2: W802 undefined name 'foo'
# stdin:3:6: T484 Name 'foo' is not defined
# stdin:3:-100: W605 invalid escape sequence '\/'
# stdin:3:1: E302 expected 2 blank lines, found 1
RESULTS_RE: Pattern[str] = re.compile(
r"""(?mx)
^
(?P<file>.*?):
(?P<line>\d+):
(?:(?P<column>-?\d+):)?
\s(?P<code>\S+?):?
\s(?P<message>.*)
$
"""
)
def _test_results_re() -> None:
"""
>>> def t(s): return RESULTS_RE.search(s).groupdict()
>>> t(r"file.py:80:1: E302 expected 2 blank lines, found 1")
... # doctest: +NORMALIZE_WHITESPACE
{'file': 'file.py', 'line': '80', 'column': '1', 'code': 'E302',
'message': 'expected 2 blank lines, found 1'}
>>> t(r"file.py:7:1: P201: Resource `stdout` is acquired but not always released.")
... # doctest: +NORMALIZE_WHITESPACE
{'file': 'file.py', 'line': '7', 'column': '1', 'code': 'P201',
'message': 'Resource `stdout` is acquired but not always released.'}
>>> t(r"file.py:8:-10: W605 invalid escape sequence '/'")
... # doctest: +NORMALIZE_WHITESPACE
{'file': 'file.py', 'line': '8', 'column': '-10', 'code': 'W605',
'message': "invalid escape sequence '/'"}
"""
pass
def _run_command(
args: List[str],
*,
extra_env: Optional[Dict[str, str]],
) -> "subprocess.CompletedProcess[str]":
logging.debug(
"$ %s",
" ".join(
([f"{k}={v}" for (k, v) in extra_env.items()] if extra_env else []) + args
),
)
start_time = time.monotonic()
try:
return subprocess.run(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
encoding="utf-8",
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
def run_command(
args: List[str],
*,
extra_env: Optional[Dict[str, str]],
retries: int,
) -> "subprocess.CompletedProcess[str]":
remaining_retries = retries
while True:
try:
return _run_command(args, extra_env=extra_env)
except subprocess.CalledProcessError as err:
if remaining_retries == 0 or not re.match(
r"^ERROR:1:1: X000 linting with .+ timed out after \d+ seconds",
err.stdout,
):
raise err
remaining_retries -= 1
logging.warning(
"(%s/%s) Retrying because command failed with: %r",
retries - remaining_retries,
retries,
err,
)
time.sleep(1)
def get_issue_severity(code: str) -> LintSeverity:
# "B901": `return x` inside a generator
# "B902": Invalid first argument to a method
# "B903": __slots__ efficiency
# "B950": Line too long
# "C4": Flake8 Comprehensions
# "C9": Cyclomatic complexity
# "E2": PEP8 horizontal whitespace "errors"
# "E3": PEP8 blank line "errors"
# "E5": PEP8 line length "errors"
# "F401": Name imported but unused
# "F403": Star imports used
# "F405": Name possibly from star imports
# "T400": type checking Notes
# "T49": internal type checker errors or unmatched messages
if any(
code.startswith(x)
for x in [
"B9",
"C4",
"C9",
"E2",
"E3",
"E5",
"F401",
"F403",
"F405",
"T400",
"T49",
]
):
return LintSeverity.ADVICE
# "F821": Undefined name
# "E999": syntax error
if any(code.startswith(x) for x in ["F821", "E999"]):
return LintSeverity.ERROR
# "F": PyFlakes Error
# "B": flake8-bugbear Error
# "E": PEP8 "Error"
# "W": PEP8 Warning
# possibly other plugins...
return LintSeverity.WARNING
def get_issue_documentation_url(code: str) -> str:
if code in DOCUMENTED_IN_FLAKE8RULES:
return f"https://www.flake8rules.com/rules/{code}.html"
if code in DOCUMENTED_IN_FLAKE8COMPREHENSIONS:
return "https://pypi.org/project/flake8-comprehensions/#rules"
if code in DOCUMENTED_IN_BUGBEAR:
return "https://github.com/PyCQA/flake8-bugbear#list-of-warnings"
return ""
def check_files(
filenames: List[str],
flake8_plugins_path: Optional[str],
severities: Dict[str, LintSeverity],
retries: int,
) -> List[LintMessage]:
try:
proc = run_command(
[sys.executable, "-mflake8", "--exit-zero"] + filenames,
extra_env={"FLAKE8_PLUGINS_PATH": flake8_plugins_path}
if flake8_plugins_path
else None,
retries=retries,
)
except (OSError, subprocess.CalledProcessError) as err:
return [
LintMessage(
path=None,
line=None,
char=None,
code="FLAKE8",
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(
f"Failed due to {err.__class__.__name__}:\n{err}"
if not isinstance(err, subprocess.CalledProcessError)
else (
"COMMAND (exit code {returncode})\n"
"{command}\n\n"
"STDERR\n{stderr}\n\n"
"STDOUT\n{stdout}"
).format(
returncode=err.returncode,
command=" ".join(as_posix(x) for x in err.cmd),
stderr=err.stderr.strip() or "(empty)",
stdout=err.stdout.strip() or "(empty)",
)
),
)
]
return [
LintMessage(
path=match["file"],
name=match["code"],
description="{}\nSee {}".format(
match["message"],
get_issue_documentation_url(match["code"]),
),
line=int(match["line"]),
char=int(match["column"])
if match["column"] is not None and not match["column"].startswith("-")
else None,
code="FLAKE8",
severity=severities.get(match["code"]) or get_issue_severity(match["code"]),
original=None,
replacement=None,
)
for match in RESULTS_RE.finditer(proc.stdout)
]
def main() -> None:
parser = argparse.ArgumentParser(
description="Flake8 wrapper linter.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--flake8-plugins-path",
help="FLAKE8_PLUGINS_PATH env value",
)
parser.add_argument(
"--severity",
action="append",
help="map code to severity (e.g. `B950:advice`)",
)
parser.add_argument(
"--retries",
default=3,
type=int,
help="times to retry timed out flake8",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
flake8_plugins_path = (
None
if args.flake8_plugins_path is None
else os.path.realpath(args.flake8_plugins_path)
)
severities: Dict[str, LintSeverity] = {}
if args.severity:
for severity in args.severity:
parts = severity.split(":", 1)
assert len(parts) == 2, f"invalid severity `{severity}`"
severities[parts[0]] = LintSeverity(parts[1])
lint_messages = check_files(
args.filenames, flake8_plugins_path, severities, args.retries
)
for lint_message in lint_messages:
print(json.dumps(lint_message._asdict()), flush=True)
if __name__ == "__main__":
main()
| pytorch-master | functorch/tools/lint/flake8_linter.py |
# Owner(s): ["module: functorch"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
from torch.testing._internal.common_utils import (
TestCase, run_tests, parametrize, subtest, instantiate_parametrized_tests
)
import torch
import torch.nn as nn
import torch.nn.functional as F
import unittest
import warnings
import math
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCPU
from torch.testing._internal.common_dtype import get_all_fp_dtypes
from torch.testing._internal.common_utils import IS_WINDOWS
from functools import partial
from functorch.experimental import replace_all_batch_norm_modules_
import functorch
from functorch import (
grad, vjp, vmap, jacrev, jacfwd, grad_and_value, hessian,
jvp, make_functional, make_functional_with_buffers,
combine_state_for_ensemble, make_fx
)
from functorch._src.make_functional import (
functional_init, functional_init_with_buffers,
)
from functorch._src.eager_transforms import _argnums_partial, enable_fwd_grad
from functorch.experimental import functionalize
if not IS_WINDOWS:
from functorch._src.custom_function import custom_vjp
# NB: numpy is a testing dependency!
import numpy as np
USE_TORCHVISION = False
try:
import torchvision # noqa: F401
USE_TORCHVISION = True
except ImportError:
warnings.warn("Couldn't import torchvision. Some of our tests use it, try "
"to install it with commands from pytorch.org, post-fixed with "
"`--no-deps` to avoid overwriting the pytorch installation",
UserWarning)
# TestCase for _argnums_partial, an important helper funciton
class TestArgnumsPartial(TestCase):
def test_invalid_argnum_type(self):
x = torch.randn(3)
args = (x,)
with self.assertRaisesRegex(RuntimeError, "int or Tuple"):
_argnums_partial(torch.sin, args, 0.0)
with self.assertRaisesRegex(RuntimeError, "int or Tuple"):
_argnums_partial(torch.sin, args, [0])
with self.assertRaisesRegex(RuntimeError, "must be int"):
_argnums_partial(torch.sin, args, (0.0,))
args = (0.1, 1.1, 2.1, 3.1, 4.1)
def f(a, b, c, d, e):
return a
with self.assertRaisesRegex(RuntimeError, "must be int"):
_argnums_partial(torch.sin, args, ((0, 1), 2))
def test_out_of_bounds_argnum_values(self):
x = torch.randn(3)
args = (x,)
with self.assertRaisesRegex(RuntimeError, "positional inputs"):
_argnums_partial(torch.sin, args, 1)
with self.assertRaisesRegex(RuntimeError, "positional inputs"):
_argnums_partial(torch.sin, args, -2)
with self.assertRaisesRegex(RuntimeError, "positional inputs"):
_argnums_partial(torch.sin, args, (-2,))
def test_not_enough_argnums(self):
x = torch.randn(3)
args = (x,)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
_argnums_partial(torch.sin, args, ())
def test_duplicate_argnums(self):
x = torch.randn(3)
args = (x, x)
with self.assertRaisesRegex(RuntimeError, "must be unique"):
_argnums_partial(torch.add, args, (0, 0))
with self.assertRaisesRegex(RuntimeError, "must be unique"):
_argnums_partial(torch.add, args, (0, -2))
def test_flat_args_with_positive_int_argnum(self):
args = (0.1, 1.1, 2.1, 3.1, 4.1)
def f(a, b, c, d, e):
return a
f_new, res = _argnums_partial(f, args, 0)
self.assertEqual(res, (0.1,))
self.assertEqual(f_new(*res), 0.1)
f_new, res = _argnums_partial(f, args, 4)
self.assertEqual(res, (4.1,))
self.assertEqual(f_new(*res), 0.1)
def test_flat_args_with_negative_int_argnum(self):
args = (0.1, 1.1, 2.1, 3.1, 4.1)
def f(a, b, c, d, e):
return a
expected = f(*args)
f_new, res = _argnums_partial(f, args, -1)
self.assertEqual(res, (4.1,))
self.assertEqual(f_new(*res), expected)
f_new, res = _argnums_partial(f, args, -5)
self.assertEqual(res, (0.1,))
self.assertEqual(f_new(*res), expected)
def test_flat_args_with_tuple_argnum(self):
args = (0.1, 1.1, 2.1, 3.1, 4.1)
def f(a, b, c, d, e):
return a
f_new, res = _argnums_partial(f, args, (0, 1, 2, 3, 4))
self.assertEqual(f_new(*res), 0.1)
self.assertEqual(res, args)
f_new, res = _argnums_partial(f, args, (0, -3))
self.assertEqual(f_new(*res), 0.1)
self.assertEqual(res, (0.1, 2.1))
def test_pytree_args(self):
args = ((0.1, 1.1), 2.0, [3.1])
def f(a, b, c):
return a[0] + a[1] + b + c[0]
expected = f(*args)
f_new, res = _argnums_partial(f, args, 0)
self.assertEqual(res, args[0:1])
self.assertEqual(f_new(*res), expected)
f_new, res = _argnums_partial(f, args, (0,))
self.assertEqual(res, args[0:1])
self.assertEqual(f_new(*res), expected)
f_new, res = _argnums_partial(f, args, -1)
self.assertEqual(res, args[-1:])
self.assertEqual(f_new(*res), expected)
f_new, res = _argnums_partial(f, args, (0, -2))
self.assertEqual(res, args[0:2])
self.assertEqual(f_new(*res), expected)
def test_argnums_reorders(self):
args = ((0.1, 1.1, 2.1), 3.1, 4.1)
def f(a, b, c):
return a[0] + a[1] + a[2] + b + c
expected = f(*args)
f_new, res = _argnums_partial(f, args, (1, 0))
self.assertEqual(res, (args[1], args[0]))
self.assertEqual(f_new(*res), expected)
def test_function_with_default_args(self):
args = ((0.1, 1.1, 2.1), 3.1)
def f(a, b, c=4.1):
return a[0] + a[1] + a[2] + b + c
expected = f(*args)
f_new, res = _argnums_partial(f, args, -2)
self.assertEqual(res, args[0:1])
self.assertEqual(f_new(*res), expected)
args = ((0.1, 1.1, 2.1), 3.1, 5.1)
expected = f(*args)
f_new, res = _argnums_partial(f, args, -1)
self.assertEqual(res, args[-1:])
self.assertEqual(f_new(*res), expected)
class TestGradTransform(TestCase):
def test_primitive(self, device):
x = torch.randn([], device=device)
result = grad(torch.sin)(x)
self.assertEqual(result, torch.cos(x))
def test_composite_simple(self, device):
x = torch.randn(2, 3, 4, device=device)
result = grad(lambda x: torch.flatten(x).sum())(x)
self.assertEqual(result, torch.ones_like(x))
def test_fn_with_kwargs(self, device):
def foo(x, y):
return (x * y).sum()
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected = grad(foo)(x, y)
result = grad(foo)(x, y=y)
self.assertEqual(result, expected)
def test_composite_complicated(self, device):
x = torch.randn(3, device=device)
y = torch.randn(3, 5, device=device)
def foo(x, y):
result = x @ y
return result.sum()
result = grad(foo)(x, y)
x.requires_grad_()
out = foo(x, y)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_composite_two_ops(self, device):
N, C = 2, 5
y = torch.randn(N, C, device=device)
targets = torch.randint(0, C, (N,), device=device)
def foo(y, targets):
return F.cross_entropy(y, targets)
result = grad(foo)(y, targets)
y.requires_grad_()
expected, = torch.autograd.grad(foo(y, targets), y)
self.assertEqual(result, expected)
def _test_attributes(self, get_attr_lambda, device):
x = torch.randn(2, 3, 5, dtype=torch.double, device=device)
expected = get_attr_lambda(x)
def foo(x):
self.assertEqual(get_attr_lambda(x), expected)
return x.sum()
grad(foo)(x)
def test_shape(self, device):
self._test_attributes(lambda x: x.shape, device)
def test_dtype(self, device):
self._test_attributes(lambda x: x.dtype, device)
def test_is_cuda(self, device):
self._test_attributes(lambda x: x.is_cuda, device)
def test_numel(self, device):
self._test_attributes(lambda x: x.numel(), device)
def test_inplace(self, device):
x = torch.randn([], device=device)
def foo(x):
return x.clone().sin_()
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_inplace_on_view(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y0.sin_()
return y.sum()
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_view_base(self, device):
x = torch.randn(3, device=device)
def foo(x):
y = x.clone()
y0 = y[0]
y.sin_()
return y0
result = grad(foo)(x)
x.requires_grad_()
out = foo(x)
expected, = torch.autograd.grad(out, x)
self.assertEqual(result, expected)
def test_inplace_on_captures(self, device):
x = torch.tensor([1., 2., 3.], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
grad(foo)(x)
def test_nesting_simple(self, device):
x = torch.randn([], device=device)
result = grad(grad(torch.sin))(x)
self.assertEqual(result, -torch.sin(x))
def test_escaped_wrappers_are_marked_as_dead(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
self.assertEqual(functorch._C.dlevel(escaped[0]), -1)
def test_escaped_wrappers_are_ignored(self, device):
x = torch.randn([], device=device)
escaped = []
def foo(x):
y = x.sin()
escaped.append(y)
return y
grad(foo)(x)
something = escaped[0].sum()
self.assertEqual(functorch._C.dlevel(something), 0)
self.assertEqual(something, x.sin().sum())
def test_vjp(self, device):
x = torch.randn([], device=device)
out, vjp_fn = vjp(torch.sin, x)
self.assertEqual(out, x.sin())
v = torch.randn([], device=device)
result, = vjp_fn(v)
self.assertEqual(result, v * x.cos())
def test_vjp_two_outputs(self, device):
def f(x):
return x, x
result, vjp_fn = vjp(f, torch.tensor(1.))
vjp_fn(result)
def test_conj_bit(self):
x = torch.tensor(1 + 1j)
def foo(x):
assert not x.is_conj()
y = x.conj()
assert y.is_conj()
return y
res = grad(foo)(x)
self.assertEqual(res, torch.ones_like(res))
def test_composed_with_autograd(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = grad(torch.sin)(x)
result, = torch.autograd.grad(y, x)
self.assertEqual(result, -x.sin())
def test_grad_of_vjp_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(torch.sin, x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
out, vjp_fn = vjp(grad(torch.sin), x)
return vjp_fn(y)[0]
result = foo(x, y)
expected = -y * x.sin()
self.assertEqual(result, expected)
def test_grad_of_vjp_of_grad_composition(self, device):
x = torch.randn([], device=device)
y = torch.randn([], device=device)
def foo(x, y):
df, vjp_fn = vjp(grad(lambda x: -torch.cos(x)), x)
return grad(lambda y: vjp_fn(y)[0])(y)
result = foo(x, y)
expected = x.cos()
self.assertEqual(result, expected)
def test_views(self, device):
x = torch.randn([], requires_grad=True, device=device)
y = torch.randn([], requires_grad=True, device=device)
def silly_sin(x):
x = x.view([])
x = x.sin()
return x
def foo(x, y):
z1 = grad(silly_sin)(x)
z2 = torch.cos(y)
return z1 + z2
result = foo(x, y)
grads = torch.autograd.grad(result, [x, y])
self.assertEqual(grads[0], -x.sin())
self.assertEqual(grads[1], -y.sin())
def test_view_inplace_simple(self, device):
def foo(x):
x = x.clone()
x.view([]).sin_()
return x
x = torch.randn([], requires_grad=True, device=device)
result = grad(foo)(x)
self.assertEqual(result, x.cos())
def test_invalid_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=-3)(x, y)
with self.assertRaisesRegex(RuntimeError, 'but only'):
grad(torch.mul, argnums=2)(x, y)
with self.assertRaisesRegex(RuntimeError, 'int or Tuple'):
grad(torch.mul, argnums=[0])(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be int'):
grad(torch.mul, argnums=('0',))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, 0))(x, y)
with self.assertRaisesRegex(RuntimeError, 'must be unique'):
grad(torch.mul, argnums=(0, -2))(x, y)
def test_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=0)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(0,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(0, 1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_out_of_order_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gy, gx = grad(torch.mul, argnums=(1, 0))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_negative_argnums(self, device):
x = torch.randn([])
y = torch.randn([])
gx = grad(torch.mul, argnums=-2)(x, y)
self.assertEqual(gx, y)
gy = grad(torch.mul, argnums=-1)(x, y)
self.assertEqual(gy, x)
gx, = grad(torch.mul, argnums=(-2,))(x, y)
self.assertEqual(gx, y)
gx, gy = grad(torch.mul, argnums=(-2, -1))(x, y)
self.assertEqual(gx, y)
self.assertEqual(gy, x)
def test_grad_pytree_inputs(self, device):
x = torch.randn([], device=device)
def f(a, b):
x, y = a
return 1 * x + 2 * y + 3 * b['foo']
args = ((x, x), {'foo': x})
gx, gy = grad(f)(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), = grad(f, argnums=(0,))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
(gx, gy), gz = grad(f, argnums=(0, 1))(*args)
self.assertEqual(gx, torch.tensor(1., device=device))
self.assertEqual(gy, torch.tensor(2., device=device))
self.assertEqual(gz['foo'], torch.tensor(3., device=device))
def test_grad_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: [t, t], has_aux=True)(x)
with self.assertRaisesRegex(
RuntimeError,
r'grad_and_value\(f\)\(\*args\): output of function f should be a tuple'
):
grad(lambda t: (t, t + 2, t + 3), has_aux=True)(x)
def f(t):
y = t.sin()
return y.sum(), t.cos()
out, aux = grad(f, has_aux=True)(x)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.cos())
def test_grad_aux_pytree(self, device):
def f(x):
y = x.sin()
return y.sum(), {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, aux = grad(f, has_aux=True)(x)
_, expected_aux = f(x)
self.assertEqual(aux, expected_aux)
self.assertEqual(out, x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), aux), has_aux=True)(x)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = grad(lambda x: (x.sum(), [x, aux]), has_aux=True)(x)
def test_zero_grad(self, device):
def f(x):
return (x['a']**2.0).sum()
inps = ({'a': torch.randn(10, device=device) + 3, 'b': torch.randn(10, device=device)})
grads = grad(f)(inps)
self.assertNotEqual(grads['a'].sum(), 0.0)
self.assertEqual(grads['b'].sum(), 0.0)
def test_unrelated_grad(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
def unrelated(x):
return y
result = grad(unrelated)(x)
self.assertEqual(result, torch.zeros_like(x))
def test_unrelated_vjp(self, device):
x = torch.tensor(1., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(x):
return y
out, vjp_fn = vjp(unrelated, x)
result = vjp_fn(v)
expected = (torch.zeros_like(x),)
self.assertEqual(result, expected)
def test_unrelated_vjp_multiple_inputs_outputs(self, device):
w = torch.tensor(3., device=device)
x = torch.tensor(4., device=device)
y = torch.tensor(2., device=device)
v = torch.tensor(1., device=device)
def unrelated(w, x):
return y, y, x
out, vjp_fn = vjp(unrelated, w, x)
result = vjp_fn((v, v, v))
expected = (torch.zeros_like(x), torch.ones_like(x))
self.assertEqual(result, expected)
# TODO: https://github.com/zou3519/functorch/issues/12
@onlyCPU
def test_unrelated_hessian(self, device):
N = 5
M = 3
W = torch.randn(N, M, device=device)
def f(x):
return W @ x
x = torch.randn(M)
result = jacrev(jacrev(f))(x)
expected = torch.zeros(N, M, M, device=device)
self.assertEqual(result, expected)
def test_vjp_pytree_input(self, device):
def f(x):
return x[0] * x[1][0]
x = torch.randn([], device=device)
v = torch.randn([], device=device)
out, vjp_fn = vjp(f, (x, (x, x)))
self.assertEqual(out, x * x)
result = vjp_fn(v)
self.assertEqual(result, ((x * v, (x * v, 0.)),))
def test_vjp_pytree_output(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
result, = vjp_fn((v1, (v2, v3)))
self.assertEqual(result, v1 + v2 + v3)
def test_vjp_outputs_can_any_pytree(self, device):
x = torch.randn(2, 3, device=device)
t = torch.randn(2, 3, device=device)
for output in [None, ()]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): Expected f to be a function that has non-empty output"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
for output in [1, True, 12.2, "abc"]:
with self.assertRaisesRegex(
RuntimeError, r"vjp\(f, \*primals\): expected f\(\*primals\) to return only tensors"
):
_, vjp_fn = vjp(lambda _: output, x)
vjp_fn(t)
# Check list output
output, vjp_fn = vjp(lambda x: [x, x.sum()], x)
vjp_out, = vjp_fn([t, t.sum()])
assert isinstance(output, list) and len(output) == 2
assert isinstance(vjp_out, torch.Tensor)
# Check dict output
output, vjp_fn = vjp(lambda x: {"x": x, "xsum": x.sum()}, x)
vjp_out, = vjp_fn({"x": t, "xsum": t.sum()})
assert isinstance(output, dict) and len(output) == 2 and "xsum" in output
assert isinstance(vjp_out, torch.Tensor)
def composite_output(x):
out = x.sum()
return [
(out, {"a": x, "out": [x, out]}),
]
output, vjp_fn = vjp(composite_output, x)
vjp_out, = vjp_fn([(t.sum(), {"a": t, "out": [t, t.sum()]}), ])
assert isinstance(output, list)
assert isinstance(output[0], tuple) and isinstance(output[0][1], dict)
assert isinstance(vjp_out, torch.Tensor)
def test_vjp_pytree_error(self, device):
def f(x):
return x, (x, x)
x = torch.randn([], device=device)
v1 = torch.randn([], device=device)
v2 = torch.randn([], device=device)
v3 = torch.randn([], device=device)
_, vjp_fn = vjp(f, x)
with self.assertRaisesRegex(RuntimeError, 'Expected pytree structure'):
result, = vjp_fn(((v1, (v2, v3)),))
def test_vjp_aux_tensor(self, device):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: [t, t], x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r'vjp\(f, \*primals\): output of function f should be a tuple'):
vjp(lambda t: (t, t + 2, t + 3), x, has_aux=True)
def f(t):
y = t.sin()
return y, t.cos()
out, vjp_fn, aux = vjp(f, x, has_aux=True)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.sin())
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
def test_vjp_aux_pytree(self, device):
def f(x):
y = x.sin()
return y, {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
out, vjp_fn, aux = vjp(f, x, has_aux=True)
expected_out, expected_aux = f(x)
self.assertEqual(out, expected_out)
self.assertEqual(aux, expected_aux)
v = torch.randn(3, device=device)
grad_x, = vjp_fn(v)
self.assertEqual(grad_x, v * x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, aux), x, has_aux=True)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = vjp(lambda x: (x, [x, aux]), x, has_aux=True)
def test_functional_init(self, device):
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.log_softmax(x, -1)
return x
B = 10
weights, fn, _ = functional_init(MLPClassifier, (B,), device=device)(32, 2)
inputs = torch.randn(B, 7, 2, device=device)
vmap(fn)(weights, (inputs,))
def test_functional_init_with_buffers(self, device):
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.bn = nn.BatchNorm1d(self.hidden_dim, affine=True)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.bn(x)
x = self.fc2(x)
x = F.log_softmax(x, -1)
return x
B = 10
weights, buffers, fn, _, _ = \
functional_init_with_buffers(MLPClassifier, [B], device=device)(32, 2)
inputs = torch.randn(B, 7, 2, device=device)
vmap(fn)(weights, buffers, (inputs,))
def test_advanced_indexing(self, device):
def f(value):
log_prob = torch.ones((), device=device)
val = (torch.zeros(()) > 0)
log_prob[val] = 0
return value
result = grad(f)(torch.randn((), device=device))
self.assertEqual(result, torch.ones_like(result))
def f2(value):
value = value.clone()
value[value > 0] = 0
return value.sum()
x = torch.randn(100, device=device)
result = grad(f2)(x)
self.assertEqual(result, (x <= 0).type_as(x))
def test_tensor_ctor_inside_grad(self, device):
def foo(x):
return x * torch.tensor(2., device=device)
x = torch.tensor(3.14, device=device)
functorch.grad(foo)(x)
@parametrize("op_list_data", [
subtest(([vmap, ], [(4, 2), (64, 3, 32, 32)]), name='vmap'),
subtest(([vmap, vmap], [(4, 3, 2), (64, 3, 32, 32)]), name='vmap_vmap'),
subtest(([grad, ], [(0, ), [], (4, 2), (64, 3, 32, 32)]), name='grad'),
subtest(([grad, grad], [[], ]), name='grad_grad'),
subtest(([vmap, grad], [(4, 2)]), name='vmap_grad'),
])
def test_tensor_print(self, device, op_list_data):
op_list, shapes = op_list_data
for dt in get_all_fp_dtypes():
data = [torch.randn(s, dtype=dt, device=device) for s in shapes]
for x in data:
buf = None
def foo(t):
nonlocal buf
buf = repr(t)
return t.mean()
fn = foo
bdim = 0
for op in reversed(op_list):
if op == vmap:
fn = op(fn, in_dims=bdim)
bdim += 1
else:
fn = op(fn)
expected = f"{repr(x)}"
level = 0
for op in op_list:
level += 1
if op == grad:
expected = f"GradTrackingTensor(lvl={level}, value={expected})"
elif op == vmap:
bdim -= 1
expected = f"BatchedTensor(lvl={level}, bdim={bdim}, value={expected})"
fn(x)
buf = buf.replace("\n", "").replace(" ", "")
expected = expected.replace("\n", "").replace(" ", "")
self.assertEqual(expected, buf)
def test_no_grad_outside(self, device):
x = torch.randn([], device=device, requires_grad=True)
with torch.no_grad():
y = grad(torch.sin)(x)
self.assertEqual(y, x.cos())
self.assertFalse(y.requires_grad)
def test_no_grad_inside(self, device):
def f(x):
with torch.no_grad():
shift = x ** 2
return x ** 2 - shift
x = torch.randn([], device=device)
y = grad(f)(x)
self.assertEqual(y, 2 * x)
y = grad(grad(f))(x)
self.assertEqual(y, 2)
x = torch.randn([], device=device, requires_grad=True)
y = grad(f)(x)
z, = torch.autograd.grad(y, x)
self.assertEqual(z, 2)
def test_no_grad_mixed(self, device):
def f(x):
with torch.no_grad():
shift = x ** 2
return x ** 2 - shift
x = torch.randn([], device=device, requires_grad=True)
with torch.no_grad():
y = grad(f)(x)
self.assertEqual(y, 2 * x)
self.assertFalse(y.requires_grad)
def test_no_grad_nested_simple(self, device):
def h(x):
with torch.no_grad():
shift = grad(lambda x: 0.25 * x ** 4)(x)
return x ** 3 - shift
x = torch.tensor(1.5, device=device, requires_grad=True)
y = grad(h)(x)
self.assertEqual(y, 3 * x ** 2)
z, = torch.autograd.grad(y, x)
self.assertEqual(z, 6 * x)
def test_no_grad_nested_complicated(self, device):
def f(x):
with torch.no_grad():
shift = x ** 3
return x ** 3 - shift
def g(x):
r1 = grad(f)(x)
with torch.no_grad():
shift = grad(f)(x)
return r1 - shift
x = torch.randn([], requires_grad=True, device=device)
y = grad(g)(x)
# The only differential part of g is x ** 3
self.assertEqual(y, 6 * x)
z, = torch.autograd.grad(y, x)
self.assertEqual(z, 6)
def test_no_grad_value(self, device):
def h(x):
with torch.no_grad():
gvalue, value = grad_and_value(lambda x: x ** 3)(x)
return x ** 3 - value
x = torch.tensor(1.6, device=device, requires_grad=True)
y = grad(h)(x)
self.assertEqual(y, 3 * x ** 2)
z, = torch.autograd.grad(y, x)
self.assertEqual(z, 6 * x)
def test_no_grad_outside_vjp(self, device):
def h(x):
return x ** 2
x = torch.tensor(2., requires_grad=True, device=device)
with torch.no_grad():
out, vjp_fn = vjp(h, x)
y, = vjp_fn(torch.tensor(1., device=device))
self.assertEqual(y, 2 * x)
self.assertFalse(y.requires_grad)
self.assertFalse(out.requires_grad)
def test_no_grad_outside_vjp_fn(self, device):
def h(x):
return x ** 2
x = torch.tensor(3.14, requires_grad=True, device=device)
out, vjp_fn = vjp(h, x)
with torch.no_grad():
y, = vjp_fn(torch.tensor(1., device=device))
self.assertEqual(y, 2 * x)
self.assertFalse(y.requires_grad)
self.assertTrue(out.requires_grad)
z, = torch.autograd.grad(out, x)
self.assertEqual(z, 2 * x)
def test_no_grad_outside_vjp_only(self, device):
def h(x):
return x ** 2
x = torch.tensor(3.14, requires_grad=True, device=device)
with torch.no_grad():
out, vjp_fn = vjp(h, x)
y, = vjp_fn(torch.tensor(1., device=device))
self.assertEqual(y, 2 * x)
self.assertFalse(out.requires_grad)
# This one is a little weird...
self.assertTrue(y.requires_grad)
z, = torch.autograd.grad(y, x)
self.assertEqual(z, 2)
class TestVmapOfGrad(TestCase):
def test_per_sample_grads_inplace_view(self, device):
def compute_loss(weight, x, t):
x = x.mm(weight)
y = x.squeeze_(0)
return (y - t).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 1, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
# TODO: Check if the rtol is a problem
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_new_zeros_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_zeros((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_new_empty_materializes_tensor(self, device):
N = 3
C = 5
def foo(y, x):
result = x.new_empty((C,))
result.copy_(y)
return result.sum()
x = torch.randn(N, device=device)
y = torch.randn(N, C, device=device)
result = vmap(grad(foo))(y, x)
self.assertEqual(result, torch.ones_like(y))
def test_per_sample_grads_simple(self, device):
def compute_loss(weight, x, t):
y = x @ weight
return ((y - t) ** 2).sum()
weight = torch.randn(16, 2, device=device)
x = torch.randn(64, 16, device=device)
t = torch.randn(64, 2, device=device)
result = vmap(partial(grad(compute_loss), weight))(x, t)
expected = [grad(compute_loss)(weight, x[i], t[i]) for i in range(64)]
expected = torch.stack(expected)
# TODO: Check if the rtol is a problem
self.assertEqual(result, expected, atol=0, rtol=5e-4)
def test_per_sample_grads_embeddingnet(self, device):
class SampleNet(nn.Module):
def __init__(self, vocab_size: int):
super().__init__()
self.emb = nn.Embedding(vocab_size, 16)
self.fc1 = nn.Linear(16, 16)
self.fc2 = nn.Linear(16, 2)
def forward(self, x):
x = self.emb(x)
x = torch.transpose(x, -1, -2)
x = torch.mean(x, -1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
def name(self):
return "SampleNet"
# Create our inputs...
vocab_size = 1000
batch_shape = [64]
words_per_sentence = 5
data = torch.randint(0, vocab_size, (*batch_shape, words_per_sentence), device=device)
targets = torch.randint(0, 1, (*batch_shape,), device=device)
# Construct our module
net = SampleNet(vocab_size).to(device=device)
criterion = nn.CrossEntropyLoss()
net_func, weights = make_functional(net)
def compute_loss(weights, data, target):
output = net_func(weights, data)
result = criterion(output, target)
return result
expected = [grad(compute_loss)(weights, data[i], targets[i]) for i in range(64)]
expected = zip(*expected)
expected = tuple(torch.stack(shards) for shards in expected)
result = vmap(partial(grad(compute_loss), weights))(data, targets)
for r, e in zip(result, expected):
# TODO: Check if the rtol is a problem
self.assertEqual(r, e, atol=0, rtol=1e-3)
def test_log_softmax(self, device):
x = torch.randn(3, 5, device=device)
v = torch.randn(5, device=device)
def foo(x, v):
_, vjp_fn = vjp(partial(torch.log_softmax, dim=-1), x)
return vjp_fn(v)[0]
result = vmap(foo, (0, None))(x, v)
v = v.expand_as(x)
x.requires_grad_()
output = torch.log_softmax(x, dim=-1)
output.backward(v)
self.assertEqual(result, x.grad)
jacrev_and_jacfwd = parametrize("jacapi", [subtest(jacrev, name='jacrev'), subtest(jacfwd, name='jacfwd')])
FIXME_jacrev_only = parametrize("jacapi", [subtest(jacrev, name='jacrev')])
class TestJac(TestCase):
@jacrev_and_jacfwd
def test_simple(self, device, jacapi):
x = torch.randn(3, device=device)
y = jacapi(torch.sin)(x)
expected = torch.diagflat(x.cos())
assert torch.allclose(y, expected)
@jacrev_and_jacfwd
def test_simple_not_flat(self, device, jacapi):
x = torch.randn(2, 3, device=device)
y = jacapi(torch.sin)(x)
expected = torch.diagflat(x.view(-1).cos())
expected = expected.view(2, 3, 2, 3)
assert torch.allclose(y, expected)
@FIXME_jacrev_only
def test_diff_numel(self, device, jacapi):
x = torch.randn(2, 4, device=device)
# Tensor[2, 4] -> Tensor[3, 1]
def f(x):
return x[0, 1:].unsqueeze(-1)
y = jacapi(f)(x)
self.assertEqual(y.shape, (3, 1, 2, 4))
expected = x.new_zeros(3, 1, 2, 4)
expected[0, 0, 0, 1] = 1
expected[1, 0, 0, 2] = 1
expected[2, 0, 0, 3] = 1
self.assertEqual(y, expected)
@FIXME_jacrev_only
def test_vmap_on_jac_simple(self, device, jacapi):
x = torch.randn(2, 3, device=device)
y = vmap(jacapi(torch.sin))(x)
expected = torch.stack([torch.diagflat(x[i].cos()) for i in range(2)])
assert torch.allclose(y, expected)
@FIXME_jacrev_only
def test_nested_jac_simple(self, device, jacapi):
def foo(x):
return x.sin().sum()
x = torch.randn(3, device=device)
y = jacapi(jacapi(foo))(x)
expected = torch.diagflat(-x.sin())
assert torch.allclose(y, expected)
@jacrev_and_jacfwd
def test_multiple_args(self, device, jacapi):
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(torch.multiply, argnums=1)(x, y)
expected = torch.diagflat(x)
assert torch.allclose(z, expected)
@jacrev_and_jacfwd
def test_multiple_outputs_multiple_argnums(self, device, jacapi):
def f(x, y):
return 2 * x + 3 * y, 4 * x + 5 * y
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(f, argnums=(0, 1))(x, y)
expected_out0_x = torch.diagflat(torch.full_like(x, 2))
expected_out0_y = torch.diagflat(torch.full_like(y, 3))
expected_out1_x = torch.diagflat(torch.full_like(x, 4))
expected_out1_y = torch.diagflat(torch.full_like(y, 5))
self.assertEqual(len(z), 2)
self.assertTrue(isinstance(z, tuple))
self.assertEqual(len(z[0]), 2)
self.assertTrue(isinstance(z[0], tuple))
self.assertEqual(z[0][0], expected_out0_x)
self.assertEqual(z[0][1], expected_out0_y)
self.assertEqual(z[1][0], expected_out1_x)
self.assertEqual(z[1][1], expected_out1_y)
@jacrev_and_jacfwd
def test_multiple_outputs_single_argnums(self, device, jacapi):
def f(x, y):
return 2 * x + 3 * y, 4 * x + 5 * y
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
expected_out0_x = torch.diagflat(torch.full_like(x, 2))
expected_out1_x = torch.diagflat(torch.full_like(x, 4))
z = jacapi(f, argnums=0)(x, y)
self.assertEqual(len(z), 2)
self.assertTrue(isinstance(z, tuple))
self.assertEqual(z, (expected_out0_x, expected_out1_x))
z = jacapi(f, argnums=(0,))(x, y)
self.assertEqual(len(z), 2)
self.assertTrue(isinstance(z, tuple))
self.assertTrue(isinstance(z[0], tuple))
self.assertEqual(z, ((expected_out0_x,), (expected_out1_x,)))
@FIXME_jacrev_only
def test_multiple_outputs_pytree(self, device, jacapi):
def f(x, y):
return {'left': 2 * x + 3 * y, 'right': 4 * x + 5 * y}
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(f, argnums=(0, 1))(x, y)
expected_left_x = torch.diagflat(torch.full_like(x, 2))
expected_left_y = torch.diagflat(torch.full_like(y, 3))
expected_right_x = torch.diagflat(torch.full_like(x, 4))
expected_right_y = torch.diagflat(torch.full_like(y, 5))
expected = {
'left': (expected_left_x, expected_left_y),
'right': (expected_right_x, expected_right_y),
}
self.assertTrue(isinstance(z, dict))
self.assertTrue(isinstance(z['left'], tuple))
self.assertTrue(isinstance(z['right'], tuple))
self.assertEqual(z, expected)
@jacrev_and_jacfwd
def test_multiple_inputs_pytree(self, device, jacapi):
def f(a, b, c):
a0, a1 = a
return a0 + a1 * 2 + b * 3 + c * 4
x = torch.randn([], device=device)
args = ((x, x), x, x)
result = jacapi(f, argnums=(0, 1, 2))(*args)
expected = (
(torch.tensor(1., device=device), torch.tensor(2., device=device)),
torch.tensor(3., device=device),
torch.tensor(4., device=device),
)
self.assertEqual(result, expected)
result = jacapi(f, argnums=(0,))(*args)
expected = ((torch.tensor(1., device=device), torch.tensor(2., device=device)),)
self.assertEqual(result, expected)
result = jacapi(f)(*args)
expected = (torch.tensor(1., device=device), torch.tensor(2., device=device))
self.assertEqual(result, expected)
@jacrev_and_jacfwd
def test_dimensionality(self, device, jacapi):
def f(x):
return x
x = torch.randn([], device=device)
result = jacapi(f)(x)
self.assertEqual(result.dim(), 0)
self.assertEqual(result, torch.ones_like(x))
x = torch.randn([1], device=device)
result = jacapi(f)(x)
self.assertEqual(result.dim(), 2)
self.assertEqual(result, x.new_ones(1, 1))
@FIXME_jacrev_only
def test_aux_tensor(self, device, jacapi):
def f(x):
y = x.clone()
return y, y.cos()
x = torch.randn(3, device=device)
result, aux = jacapi(f, has_aux=True)(x)
self.assertEqual(result, torch.eye(3, 3, device=device))
self.assertEqual(aux, x.cos())
@jacrev_and_jacfwd
def test_aux_pytree(self, device, jacapi):
def f(x):
y = x.clone()
return y, {'a': y.cos(), 'b': [y.tan()]}
x = torch.randn(3, device=device)
result, aux = jacapi(f, has_aux=True)(x)
self.assertEqual(result, torch.eye(3, 3, device=device))
_, expected_aux = f(x)
self.assertEqual(aux, expected_aux)
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = jacapi(lambda x: (x, aux), has_aux=True)(x)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = jacapi(lambda x: (x, [x, aux]), has_aux=True)(x)
@jacrev_and_jacfwd
def test_outputs_can_any_pytree(self, device, jacapi):
x = torch.randn(2, 3, device=device)
for output in [None, ()]:
with self.assertRaisesRegex(
RuntimeError, r"(vjp|jvp).+: Expected f to be a function that has non-empty output"
):
jacapi(lambda _: output)(x)
for output in [1, True, 12.2, "abc"]:
with self.assertRaisesRegex(
RuntimeError, r"(vjp|jvp).+: expected f\(\*primals\) to return only tensors"
):
jacapi(lambda _: output)(x)
# Check list output
out = jacapi(lambda x: [x, x.sum()])(x)
assert isinstance(out, list) and len(out) == 2
# Check dict output
out = jacapi(lambda x: {"x": x, "xsum": x.sum()})(x)
assert isinstance(out, dict) and len(out) == 2 and "xsum" in out
def composite_output(x):
out = x.sum()
return [
(out, {"a": x, "out": [x, out]}),
]
out = jacapi(composite_output)(x)
assert isinstance(out, list)
assert isinstance(out[0], tuple) and isinstance(out[0][1], dict)
@jacrev_and_jacfwd
def test_multiple_inputs_outputs_pytree(self, device, jacapi):
def f(a, b, c):
a0, a1 = a
return a0 + a1 * 2, {'foo': b * 3 + c * 4}
x = torch.randn([], device=device)
zero = torch.zeros([], device=device)
args = ((x, x), x, x)
result = jacapi(f)(*args)
expected = (
(torch.tensor(1., device=device), torch.tensor(2., device=device)),
{'foo': (zero, zero)},
)
self.assertEqual(result, expected)
result = jacapi(f, argnums=(0,))(*args)
expected = (
((torch.tensor(1., device=device), torch.tensor(2., device=device)),),
{'foo': ((zero, zero),)},
)
self.assertEqual(result, expected)
result = jacapi(f, argnums=(0, 1))(*args)
expected = (
((torch.tensor(1., device=device), torch.tensor(2., device=device)), zero),
{'foo': ((zero, zero), torch.tensor(3., device=device))},
)
self.assertEqual(result, expected)
@FIXME_jacrev_only
def test_multiple_inputs_outputs_pytree_multidim(self, device, jacapi):
def f(dct):
a = dct['a']
b = dct['b']
return {'c': a.sin(), 'd': b.cos()}
x = torch.randn(3, device=device)
args = ({'a': x, 'b': x},)
result = jacapi(f)(*args)
expected = {
'c': {'a': x.cos().diagflat(), 'b': x.new_zeros(3, 3)},
'd': {'a': x.new_zeros(3, 3), 'b': -x.sin().diagflat()},
}
self.assertEqual(result, expected)
@jacrev_and_jacfwd
def test_unrelated_input(self, device, jacapi):
def f(x, y):
return x
x = torch.randn(2, 3, device=device)
y = torch.randn(2, 3, device=device)
result = jacapi(f, argnums=(0, 1))(x, y)
expected0 = torch.eye(6, 6, device=device).view(2, 3, 2, 3)
expected1 = y.new_zeros(2, 3, 2, 3)
expected = (expected0, expected1)
self.assertTrue(isinstance(result, tuple))
self.assertEqual(result, expected)
@jacrev_and_jacfwd
def test_unrelated_output(self, device, jacapi):
y = torch.randn(2, 3, device=device)
def f(x):
return y
x = torch.randn(2, 3, device=device)
result = jacapi(f)(x)
expected = x.new_zeros(2, 3, 2, 3)
self.assertEqual(result, expected)
@jacrev_and_jacfwd
def test_empty_output(self, device, jacapi):
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
def f(x, y):
return ()
with self.assertRaisesRegex(RuntimeError, 'xpected'):
jacapi(f)(x, y)
@jacrev_and_jacfwd
def test_argnums_tuple(self, device, jacapi):
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(torch.multiply, argnums=(0, 1))(x, y)
expected0 = torch.diagflat(y)
expected1 = torch.diagflat(x)
assert len(z) == 2
assert torch.allclose(z[0], expected0)
assert torch.allclose(z[1], expected1)
@jacrev_and_jacfwd
def test_argnums_effect_on_return(self, device, jacapi):
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(torch.multiply, argnums=(0,))(x, y)
expected0 = torch.diagflat(y)
assert isinstance(z, tuple)
assert len(z) == 1
assert torch.allclose(z[0], expected0)
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(torch.multiply, argnums=0)(x, y)
expected0 = torch.diagflat(y)
assert isinstance(z, torch.Tensor)
assert torch.allclose(z, expected0)
@jacrev_and_jacfwd
def test_argnums_defaults_to_zero(self, device, jacapi):
def f(x, y):
return x * 2 + y * 3
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
z = jacapi(f)(x, y)
expected = torch.diagflat(torch.full_like(x, 2))
self.assertEqual(z, expected)
@jacrev_and_jacfwd
def test_empty_argnums(self, device, jacapi):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
jacapi(torch.sin, argnums=())(x)
@jacrev_and_jacfwd
def test_out_of_bounds_argnums(self, device, jacapi):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, "only 1 positional inputs"):
jacapi(torch.sin, argnums=2)(x)
@jacrev_and_jacfwd
def test_negative_argnums(self, device, jacapi):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, "only 1 positional inputs"):
jacapi(torch.sin, argnums=-2)(x)
@jacrev_and_jacfwd
def test_repeated_argnums(self, device, jacapi):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, "must be unique"):
jacapi(torch.sin, argnums=(0, 0))(x)
@jacrev_and_jacfwd
def test_float_argnums(self, device, jacapi):
x = torch.randn(3, device=device)
with self.assertRaisesRegex(RuntimeError, "must be int or Tuple"):
jacapi(torch.sin, argnums=0.0)(x)
with self.assertRaisesRegex(RuntimeError, "must be int"):
jacapi(torch.multiply, argnums=(1, 0.0))(x, x)
def test_hessian_simple(self, device):
def f(x):
return x.sin()
x = torch.randn(3, device=device)
hessian(f)(x)
def _test_against_reference(self, f, inputs, jacapi):
def foo(inputs):
return f(*inputs)
expected = torch.autograd.functional.jacobian(f, inputs)
result = jacapi(foo)(inputs)
self.assertEqual(result, expected)
@jacrev_and_jacfwd
def test_against_reference_simple(self, device, jacapi):
def f(x):
return 3 * x ** 2
x = torch.randn(2, 3, 5, device=device)
self._test_against_reference(f, (x,), jacapi)
@jacrev_and_jacfwd
def test_against_reference_multi_input(self, device, jacapi):
def f(x, y):
return (x.cos() * x) @ y.sin()
x = torch.randn(2, 3, device=device)
y = torch.randn(3, 5, device=device)
self._test_against_reference(f, (x, y), jacapi)
@jacrev_and_jacfwd
def test_against_reference_multi_input_multi_output(self, device, jacapi):
def f(x, y):
return (x * x) @ y, x @ (x.sum(1) * y), y.sum()
x = torch.randn(5, 3, device=device)
y = torch.randn(3, 5, device=device)
self._test_against_reference(f, (x, y), jacapi)
@jacrev_and_jacfwd
def test_against_reference_unrelated_outputs(self, device, jacapi):
def f(x, y):
return x, y, x, y
x = torch.randn(2, device=device)
y = torch.randn(3, device=device)
self._test_against_reference(f, (x, y), jacapi)
@jacrev_and_jacfwd
def test_against_reference_zero_dim(self, device, jacapi):
# zero-dim output
def f(x, y):
return x.sum(), y.sum(), x * y
x = torch.randn(3, device=device)
y = torch.randn(3, device=device)
self._test_against_reference(f, (x, y), jacapi)
# zero-dim input
def g(x):
return torch.stack([x, x, x])
x = torch.randn([], device=device)
self._test_against_reference(g, (x,), jacapi)
# Mixed zero-dim input / zero-dim output
def h(x, y):
return y.sum(), x * y
x = torch.randn([], device=device)
y = torch.randn(1, device=device)
self._test_against_reference(h, (x, y), jacapi)
@jacrev_and_jacfwd
def test_against_reference_correctness_different_devices(self, device, jacapi):
def f(x, y):
return x * y, (x * y).to(device=device)
x = torch.randn(3)
y = torch.randn(3)
self._test_against_reference(f, (x, y), jacapi)
class TestHessian(TestCase):
def _test_against_reference(self, f, inputs):
def foo(inputs):
return f(*inputs)
expected = torch.autograd.functional.hessian(f, inputs)
result = hessian(foo)(inputs)
self.assertEqual(result, expected)
def test_hessian_vectorize_correctness_simple(self, device):
def f(x):
return (3 * x ** 2).sum()
x = torch.randn(2, 3, 5, device=device)
self._test_against_reference(f, (x,))
def test_hessian_vectorize_correctness_multi_input(self, device):
def f(x, y, z):
return ((x.relu() * x) @ y.sin() @ z).sum()
x = torch.randn(2, 3, device=device)
y = torch.randn(3, 5, device=device)
z = torch.randn(5, 5, device=device)
self._test_against_reference(f, (x, y, z))
def test_hessian_vectorize_correctness_unrelated_outputs(self, device):
# output unrelated to one input
def f(x, y):
return (x ** 2).sum()
x = torch.randn(2, device=device)
y = torch.randn(3, device=device)
self._test_against_reference(f, (x, y))
# output unrelated to all inputs
def f(x, y):
return torch.ones([])
x = torch.randn(2, device=device)
y = torch.randn(3, device=device)
self._test_against_reference(f, (x, y))
def test_jacfwd_different_levels(self, device):
# Test case from:
# https://github.com/pytorch/functorch/issues/597
b = 8
n = 100
d = 2
x1 = torch.randn(b, n, d, device=device)
x2 = x1
A = 0.1 * torch.randn(b, d, d, device=device)
def loss(A, x1, x2):
x2_hat = (A @ (x1.T)).T
res = x2 - x2_hat
res_sqr = res**2
return res_sqr.sum()
hess1 = vmap(jacrev(jacrev(loss)))(A, x1, x2)
hess2 = vmap(hessian(loss))(A, x1, x2)
self.assertEqual(hess2, hess1)
class TestJvp(TestCase):
def test_inplace_on_captures(self, device):
x = torch.tensor([1., 2., 3.], device=device)
captured = torch.randn(3, device=device)
def foo(x):
captured.copy_(x)
return (x * captured).sum()
with self.assertRaisesRegex(RuntimeError, 'mutate a captured Tensor'):
grad(foo)(x)
def test_simple(self, device):
x = torch.randn(2, 3, device=device)
t = torch.randn(2, 3, device=device)
result = jvp(torch.sin, (x,), (t,))
expected = (x.sin(), x.cos() * t)
self.assertTrue(isinstance(result, tuple))
self.assertEqual(result, expected)
def test_multiple_inputs(self, device):
x = torch.randn(2, 3, device=device)
y = torch.randn(2, 3, device=device)
tx = torch.randn(2, 3, device=device)
ty = torch.randn(2, 3, device=device)
def f(x, y):
return x * y
result = jvp(f, (x, y), (tx, ty))
expected = (x * y, y * tx + x * ty)
self.assertTrue(isinstance(result, tuple))
self.assertEqual(result, expected)
def test_pytree_inputs(self, device):
def f(x, y, z):
a, b = x
return a + 2 * b + 3 * y + 4 * z
one = torch.tensor(1., device=device)
primal_outs, tangent_outs = jvp(f, ((one, one), one, one), ((one, one), one, one))
self.assertEqual(primal_outs, one * 10)
self.assertEqual(tangent_outs, one * 10)
def test_pytree_inputs_error_cases(self, device):
def f(x):
return x
one = torch.tensor(1., device=device)
with self.assertRaisesRegex(RuntimeError, 'Expected primals to be a tuple'):
jvp(f, one, one)
with self.assertRaisesRegex(RuntimeError, 'same python structure'):
jvp(f, ((one, one), one), (one, one))
with self.assertRaisesRegex(RuntimeError, 'only contain Tensors'):
jvp(f, ((one, one), 1), ((one, one), one))
with self.assertRaisesRegex(RuntimeError, 'only contain Tensors'):
jvp(f, ((one, one), 1), ((1, one), one))
with self.assertRaisesRegex(RuntimeError, 'at least one Tensor'):
jvp(f, ((),), ((),))
def test_unrelated_input(self, device):
def f(x, y):
return x
x = torch.randn(2, 3, device=device)
y = torch.randn(2, 3, device=device)
tx = torch.randn(2, 3, device=device)
ty = torch.randn(2, 3, device=device)
result = jvp(f, (x, y), (tx, ty))
expected = (x, tx)
self.assertTrue(isinstance(result, tuple))
self.assertEqual(result, expected)
def test_unrelated_output(self, device):
y = torch.randn(2, 3, device=device)
def f(x):
return y
x = torch.randn(2, 3, device=device)
tx = torch.randn(2, 3, device=device)
result = jvp(f, (x,), (tx,))
expected = (y, torch.zeros_like(y))
self.assertTrue(isinstance(result, tuple))
self.assertEqual(result, expected)
def test_strict_mode(self, device):
y = torch.randn(2, 3, device=device)
def f(x):
return x, y
x = torch.randn(2, 3, device=device)
tx = torch.randn(2, 3, device=device)
with self.assertRaisesRegex(RuntimeError, "strict"):
jvp(f, (x,), (tx,), strict=True)
def test_multiple_outputs(self, device):
x = torch.randn(2, 3, device=device)
t = torch.randn(2, 3, device=device)
def f(x):
return torch.sin(x), torch.cos(x)
result = jvp(f, (x,), (t,))
expected = (f(x), (x.cos() * t, -x.sin() * t))
self.assertTrue(isinstance(result, tuple))
self.assertEqual(result, expected)
def test_multiple_inputs_outputs(self, device):
x = torch.randn(2, 3, device=device)
y = torch.randn(2, 3, device=device)
tx = torch.randn(2, 3, device=device)
ty = torch.randn(2, 3, device=device)
def f(x, y):
return 2 * x + 3 * y, 4 * x + 5 * y
result = jvp(f, (x, y), (tx, ty))
expected = (f(x, y), f(tx, ty))
self.assertTrue(isinstance(result, tuple))
self.assertEqual(result, expected)
def test_primals_tangents_length_mismatch(self, device):
x = torch.randn(2, 3, device=device)
t = torch.randn(2, 3, device=device)
msg = "same python structure"
with self.assertRaisesRegex(RuntimeError, msg):
jvp(torch.sin, (x,), (t, t))
with self.assertRaisesRegex(RuntimeError, msg):
jvp(torch.sin, (x, x), (t, t, t))
def test_nonempty_primals_and_tangents(self, device):
with self.assertRaisesRegex(RuntimeError, "at least one Tensor"):
jvp(torch.sin, (), ())
def test_inputs_are_tuples_of_tensors(self, device):
x = torch.randn(2, 3, device=device)
t = torch.randn(2, 3, device=device)
with self.assertRaisesRegex(RuntimeError, 'be a tuple'):
jvp(torch.sin, x, (t,))
with self.assertRaisesRegex(RuntimeError, 'same python structure'):
jvp(torch.sin, (x,), t)
with self.assertRaisesRegex(RuntimeError, 'same python structure'):
jvp(torch.sin, (x,), [t])
with self.assertRaisesRegex(RuntimeError, 'only contain Tensors'):
jvp(torch.sin, (1.,), (t,))
with self.assertRaisesRegex(RuntimeError, 'only contain Tensors'):
jvp(torch.sin, (x,), (1.,))
def test_outputs_can_any_pytree(self, device):
x = torch.randn(2, 3, device=device)
t = torch.randn(2, 3, device=device)
for output in [None, ()]:
with self.assertRaisesRegex(
RuntimeError, r"jvp\(f, primals, tangents\): Expected f to be a function that has non-empty output"
):
jvp(lambda _: output, (x,), (t,))
for output in [1, True, 12.2, "abc"]:
with self.assertRaisesRegex(
RuntimeError, r"jvp\(f, primals, tangents\): expected f\(\*primals\) to return only tensors"
):
jvp(lambda _: output, (x,), (t,))
# Check list output
out = jvp(lambda x: [x, x.sum()], (x,), (t,))
for i in range(2):
assert isinstance(out[i], list) and len(out[i]) == 2
# Check dict output
out = jvp(lambda x: {"x": x, "xsum": x.sum()}, (x,), (t,))
for i in range(2):
assert isinstance(out[i], dict) and len(out[i]) == 2 and "xsum" in out[i]
def composite_output(x):
out = x.sum()
return [
(out, {"a": x, "out": [x, out]}),
]
out = jvp(composite_output, (x,), (t,))
for i in range(2):
assert isinstance(out[i], list)
assert isinstance(out[i][0], tuple) and \
isinstance(out[i][0][1], dict)
def test_aux_tensor(self, device):
x = torch.randn(3, device=device)
t = torch.randn(3, device=device)
with self.assertRaisesRegex(
RuntimeError, r'jvp\(f, primals, tangents\): output of function f should be a tuple'
):
jvp(lambda t: [t, t], (x, ), (t, ), has_aux=True)
with self.assertRaisesRegex(
RuntimeError, r'jvp\(f, primals, tangents\): output of function f should be a tuple'
):
jvp(lambda t: (t, t + 2, t + 3), (x, ), (t, ), has_aux=True)
def f(z):
y = z.sin()
return y, z.cos()
out, jvp_out, aux = jvp(f, (x, ), (t, ), has_aux=True)
self.assertEqual(aux, x.cos())
self.assertEqual(out, x.sin())
self.assertEqual(jvp_out, t * x.cos())
def test_aux_pytree(self, device):
def f(x):
y = x.sin()
return y, {'a': x.cos(), 'b': [x.tan()]}
x = torch.randn(3, device=device)
t = torch.randn(3, device=device)
out, jvp_out, aux = jvp(f, (x, ), (t, ), has_aux=True)
expected_out, expected_aux = f(x)
self.assertEqual(out, expected_out)
self.assertEqual(aux, expected_aux)
self.assertEqual(jvp_out, t * x.cos())
for aux in [1, 1.0, "abc"]:
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = jvp(lambda x: (x, aux), (x, ), (t, ), has_aux=True)
with self.assertRaisesRegex(RuntimeError, r"Expected tensors, got unsupported type"):
_ = jvp(lambda x: (x, [x, aux]), (x, ), (t, ), has_aux=True)
def test_fwd_grad_enabled(self, device):
# Tests some private helper functions to enable/disable fwd grad mode
enabled = functorch._C.get_fwd_grad_enabled()
self.assertTrue(enabled)
try:
functorch._C.set_fwd_grad_enabled(False)
enabled = functorch._C.get_fwd_grad_enabled()
self.assertFalse(enabled)
finally:
functorch._C.set_fwd_grad_enabled(True)
enabled = functorch._C.get_fwd_grad_enabled()
self.assertTrue(enabled)
def test_autograd_function_disables_fwd_grad(self, device):
# Sanity check. We don't really assume this anywhere so
# it's fine if this breaks one day.
class MySquare(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
enabled = functorch._C.get_fwd_grad_enabled()
self.assertFalse(enabled)
return x * x
@staticmethod
def backward(ctx, gx):
return gx
x = torch.randn(3, requires_grad=True)
MySquare.apply(x)
def test_enable_fwd_grad(self, device):
# Tests a private helper function
try:
functorch._C.set_fwd_grad_enabled(False)
enabled = functorch._C.get_fwd_grad_enabled()
self.assertFalse(enabled)
with enable_fwd_grad():
enabled = functorch._C.get_fwd_grad_enabled()
self.assertTrue(enabled)
enabled = functorch._C.get_fwd_grad_enabled()
self.assertFalse(enabled)
finally:
functorch._C.set_fwd_grad_enabled(True)
def test_disable_fwd_grad_outside(self, device):
x = torch.randn([], device=device)
t = torch.ones_like(x)
with enable_fwd_grad(False):
_, y = jvp(torch.sin, (x,), (t,))
self.assertEqual(y, x.cos())
def test_disable_fwd_grad_inside(self, device):
def f(x):
with enable_fwd_grad(False):
shift = x ** 2
return x ** 2 - shift
x = torch.randn([], device=device)
t = torch.ones_like(x)
_, y = jvp(f, (x,), (t,))
self.assertEqual(y, 2 * x)
_, y = jvp(lambda x: jvp(f, (x,), (t,))[1], (x,), (t,))
self.assertEqual(y, 2)
def test_disable_fwd_grad_mixed(self, device):
def f(x):
with enable_fwd_grad(False):
shift = x ** 2
return x ** 2 - shift
x = torch.randn([], device=device)
t = torch.ones_like(x)
with enable_fwd_grad():
_, y = jvp(f, (x,), (t,))
self.assertEqual(y, 2 * x)
def test_jvp_inside_autograd_function(self, device):
class MySin(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
t = torch.ones_like(x)
_, neg_sin_x = jvp(torch.cos, (x,), (t,))
ctx.save_for_backward(x)
return -neg_sin_x
@staticmethod
def backward(ctx, gx):
x, = ctx.saved_tensors
t = torch.ones_like(x)
_, cos_x = jvp(torch.sin, (x,), (t,))
return gx * cos_x
x = torch.randn([], device=device, requires_grad=True)
y = MySin.apply(x)
self.assertEqual(y, x.sin())
gx, = torch.autograd.grad(y, x)
self.assertEqual(gx, x.cos())
def test_zerotensor_vmapjvp_interaction(self, device):
dummy = torch.ones(4, 1)
x = torch.randn(4, 2)
x_tangent = torch.randn(2)
def push_jvp(dummy, x):
result = jvp(torch.cov, (x,), (x_tangent,))
return result
# Should not error
vmap(vmap(push_jvp, (0, None)))(dummy, x)
class TestCustomFunction(TestCase):
@unittest.skipIf(IS_WINDOWS, "Prototype of custom_vjp doesn't link on windows")
@onlyCPU
def test_basic(self, device):
called_impl = False
called_vjp = False
def my_sin_impl(args):
x, = args
nonlocal called_impl
called_impl = True
return x.sin(), x
def my_sin_vjp(args):
grad_y, result, x = args
nonlocal called_vjp
called_vjp = True
return (grad_y * 3 * x.cos(),)
def filter_fn(args):
return args[0]
my_sin = custom_vjp('my_sin', filter_fn, my_sin_impl, my_sin_vjp)
x = torch.tensor([1., 2.], requires_grad=True, device=device)
y = my_sin(x)
self.assertTrue(called_impl)
y.sum().backward()
self.assertTrue(called_vjp)
assert torch.allclose(x.grad, 3 * x.cos())
class TestComposability(TestCase):
def test_grad_grad(self, device):
x = torch.randn([], device=device)
y = grad(grad(torch.sin))(x)
self.assertEqual(y, -x.sin())
def test_grad_vmap(self, device):
def foo(x):
y = vmap(torch.sin)(x)
return y.sum()
x = torch.randn(3, device=device)
y = grad(foo)(x)
self.assertEqual(y, x.cos())
def test_grad_vjp(self, device):
x = torch.randn(3, device=device)
def foo(x):
_, vjp_fn = vjp(torch.sin, x)
return vjp_fn(x)[0].sum()
y = grad(foo)(x)
expected = grad(lambda x: (x * x.cos()).sum())(x)
self.assertEqual(y, expected)
def test_vmap_grad(self, device):
x = torch.randn(3, device=device)
y = vmap(grad(torch.sin))(x)
self.assertEqual(y, x.cos())
def test_vmap_vmap(self, device):
x = torch.randn(2, 3, device=device)
y = vmap(vmap(torch.sin))(x)
self.assertEqual(y, x.sin())
def test_vmap_vjp(self, device):
x = torch.randn(3, device=device)
_, vjp_fn = vjp(torch.sin, x)
def foo(x):
_, vjp_fn = vjp(torch.sin, x)
return vjp_fn(x)
y = vmap(foo)(x)
self.assertEqual(y, vjp_fn(x))
# TODO: there's a very interesting error message when the following
# is on CPU
xs = torch.randn(5, 3, device=device)
expected = torch.stack([vjp_fn(x)[0] for x in xs])
result = vmap(lambda x: vjp_fn(x)[0])(xs)
self.assertEqual(result, expected)
def test_vjp_grad(self, device):
x = torch.randn([], device=device)
y, vjp_fn = vjp(grad(torch.sin), x)
self.assertEqual(y, x.cos())
v = torch.randn([])
self.assertEqual(vjp_fn(v)[0], -x.sin() * v)
def test_vjp_vmap(self, device):
x = torch.randn(3, device=device)
y, vjp_fn = vjp(vmap(torch.sin), x)
self.assertEqual(y, x.sin())
v = torch.randn(3, device=device)
self.assertEqual(vjp_fn(v)[0], x.cos() * v)
def test_vjp_vjp(self, device):
x = torch.randn(3, device=device)
y, vjp_fn = vjp(torch.sin, x)
self.assertEqual(y, x.sin())
y, vjp_fn = vjp(lambda x: vjp_fn(x)[0], x)
self.assertEqual(y, x * x.cos())
y = vjp_fn(x)[0]
# Honestly IDK what the result here is... but at least it runs
def test_make_fx_vmap(self, device):
def f(x):
return torch.sin(x)
inp = torch.randn(5, 3)
f = vmap(f)
fx_f = make_fx(f)(inp)
new_inp = torch.randn(5, 3)
self.assertEqual(fx_f(new_inp), f(new_inp))
def test_make_fx_jacrev(self, device):
def f(x):
return x.sin().sum()
inp = torch.randn(3)
f = jacrev(jacrev(f))
fx_f = make_fx(f)(inp)
new_inp = torch.randn(3)
self.assertEqual(fx_f(new_inp), f(new_inp))
def test_make_fx_vjp(self, device):
def f(x):
return torch.sin(x).sum()
primals = torch.randn(3)
_, vjp_fn = vjp(f, primals)
cotangent = torch.randn(())
fx_f = make_fx(vjp_fn)(cotangent, True, True)
new_cotangent = torch.randn(())
self.assertEqual(fx_f(new_cotangent, True, True), vjp_fn(new_cotangent))
def test_requires_grad_inside_transform(self, device):
def f(x):
x.requires_grad_()
return x.sin().sum()
x = torch.randn(3)
with self.assertRaisesRegex(RuntimeError, "Tensor.requires_grad_()"):
vmap(f)(x)
with self.assertRaisesRegex(RuntimeError, "Tensor.requires_grad_()"):
grad(f)(x)
with self.assertRaisesRegex(RuntimeError, "Tensor.requires_grad_()"):
vmap(grad(f))(x)
x = torch.randn([])
with self.assertRaisesRegex(RuntimeError, "Tensor.requires_grad_()"):
grad(grad(f))(x)
def test_retain_grad_inside_transform(self, device):
def f(x):
y = x.sin()
y.retain_grad()
return y.sum()
x = torch.randn(3)
with self.assertRaisesRegex(RuntimeError, "Tensor.retain_grad()"):
grad(f)(x)
def test_autograd_functional_jacrev_inside_transform(self, device):
def f(x):
y = torch.autograd.functional.jacobian(lambda x: x.sin().sum(), x)
return y
B = 5
x = torch.randn(B, 3)
with self.assertRaisesRegex(RuntimeError, "torch.autograd.functional"):
vmap(f)(x)
x = torch.randn([])
with self.assertRaisesRegex(RuntimeError, "torch.autograd.functional"):
grad(f)(x)
def test_autograd_functional_vjp_inside_transform(self, device):
def f(x):
y = torch.autograd.functional.vjp(lambda x: x.sin().sum(), x)
return y
B = 5
x = torch.randn(B, 3)
with self.assertRaisesRegex(RuntimeError, "torch.autograd.functional"):
vmap(f)(x)
x = torch.randn([])
with self.assertRaisesRegex(RuntimeError, "torch.autograd.functional"):
grad(f)(x)
def test_autograd_functional_jvp_inside_transform(self, device):
def f(x):
t = torch.ones_like(x)
y = torch.autograd.functional.jvp(lambda x: x.sin().sum(), (x,), (t,))
return y
B = 5
x = torch.randn(B, 3)
with self.assertRaisesRegex(RuntimeError, "torch.autograd.functional"):
vmap(f)(x)
x = torch.randn([])
with self.assertRaisesRegex(RuntimeError, "torch.autograd.functional"):
grad(f)(x)
def test_autograd_functional_jacfwd_inside_transform(self, device):
def f(x):
y = torch.autograd.functional.jacobian(
lambda x: x.sin().sum(), x, strategy='forward-mode', vectorize=True)
return y
B = 5
x = torch.randn(B, 3)
with self.assertRaises(RuntimeError):
vmap(f)(x)
x = torch.randn([])
with self.assertRaises(RuntimeError):
grad(f)(x)
class TestMakeFunctional(TestCase):
@parametrize('disable_autograd_tracking', [True, False])
def test_disable_autograd_tracking(self, disable_autograd_tracking):
class Foo(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
mod = Foo()
_, params = make_functional(mod, disable_autograd_tracking=disable_autograd_tracking)
self.assertEqual(len(params), 2)
for param in params:
self.assertEqual(param.requires_grad, not disable_autograd_tracking)
def test_parameter_tying(self):
class Foo(nn.Module):
def __init__(self):
super().__init__()
self.bias = nn.Parameter(torch.randn(3))
self.linear = nn.Linear(3, 3)
self.linear.bias = self.bias
self.linear_tied = self.linear
def forward(self, x):
x = self.linear(x)
x = self.linear_tied(x)
x = x + self.bias
return x
torch.manual_seed(1)
mod = Foo()
func, _ = make_functional(mod)
torch.manual_seed(0)
mod = Foo()
_, params = make_functional(mod)
self.assertEqual(len(params), 2)
x = torch.randn(2, 3)
result = func(params, x)
expected = mod(x)
self.assertEqual(result, expected)
def test_buffer_tying(self):
class Foo(nn.Module):
def __init__(self):
super().__init__()
self.bias = nn.Parameter(torch.randn(3))
self.linear = nn.Linear(3, 3)
self.register_buffer('buffer', torch.randn(3))
self.register_buffer('buffer_tied', self.buffer)
def forward(self, x):
x = self.linear(x)
x = x + self.bias
x = x + self.buffer
x = x + self.buffer_tied
return x
torch.manual_seed(1)
mod = Foo()
func, _, _ = make_functional_with_buffers(mod)
torch.manual_seed(0)
mod = Foo()
_, params, buffers = make_functional_with_buffers(mod)
self.assertEqual(len(params), 3)
self.assertEqual(len(buffers), 1)
x = torch.randn(2, 3)
result = func(params, buffers, x)
expected = mod(x)
self.assertEqual(result, expected)
@parametrize('disable_autograd_tracking', [True, False])
def test_with_buffers_disable_autograd_tracking(self, disable_autograd_tracking):
class Foo(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 3)
self.register_buffer('buffer', torch.randn(3))
def forward(self, x):
x = self.linear(x)
x = x + self.buffer
return x
mod = Foo()
_, params, buffers = make_functional_with_buffers(mod, disable_autograd_tracking=disable_autograd_tracking)
self.assertEqual(len(params), 2)
self.assertEqual(len(buffers), 1)
for param in params:
self.assertEqual(param.requires_grad, not disable_autograd_tracking)
def test_parameter_tying_grad(self):
class Foo(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 3)
self.weight = self.linear.weight
self.bias = self.linear.bias
def forward(self, x):
x = self.linear(x)
x = F.linear(x, self.weight, self.bias)
return x
x = torch.randn(2, 3)
torch.manual_seed(0)
mod = Foo()
loss = mod(x).sum()
expected = torch.autograd.grad(loss, mod.parameters())
mod = Foo()
fmod, _, _ = make_functional_with_buffers(mod)
torch.manual_seed(0)
mod = Foo()
_, params, buffers = make_functional_with_buffers(mod)
def compute_loss(params, buffers, x):
return fmod(params, buffers, x).sum()
result = grad(compute_loss)(params, buffers, x)
self.assertEqual(result, expected)
def test_parameter_tying_ensemble(self):
class Foo(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 3)
self.weight = self.linear.weight
self.bias = self.linear.bias
self.register_buffer('buffer', torch.randn(3))
self.register_buffer('buffer_tied', self.buffer)
def forward(self, x):
x = self.linear(x)
x = F.linear(x, self.weight, self.bias)
x = x + self.buffer
x = x + self.buffer_tied
return x
num_models = 2
xs = torch.randn(num_models, 64, 3)
models = [Foo() for _ in range(num_models)]
fmodel, _, _ = combine_state_for_ensemble(models)
torch.manual_seed(0)
models = [Foo() for _ in range(num_models)]
_, params, buffers = combine_state_for_ensemble(models)
result = vmap(fmodel)(params, buffers, xs)
torch.manual_seed(0)
models = [Foo() for _ in range(num_models)]
expected = torch.stack([model(x) for model, x in zip(models, xs)])
self.assertEqual(result, expected)
def test_correctness_mnist(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
x = torch.randn(64, 1, 32, 32)
torch.manual_seed(301)
fnet, _ = make_functional(Net())
torch.manual_seed(0)
_, params = make_functional(Net())
result = fnet(params, x)
torch.manual_seed(0)
net = Net()
expected = net(x)
self.assertEqual(result, expected)
def test_combine_state_for_ensemble_error(self):
in_features = 2
out_features = 2
models = []
with self.assertRaisesRegex(RuntimeError, "Expected at least one model"):
_ = combine_state_for_ensemble(models)
num_models = 3
models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)]
models[1].eval()
with self.assertRaisesRegex(RuntimeError, "same training/eval mode"):
_ = combine_state_for_ensemble(models)
models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)]
models[1] = torch.nn.Conv2d(3, 3, (3, 3))
with self.assertRaisesRegex(RuntimeError, "models to be of the same class"):
_ = combine_state_for_ensemble(models)
def test_combine_state_for_ensemble_smoke(self):
in_features = 2
out_features = 2
num_models = 3
models = [torch.nn.Linear(in_features, out_features) for i in range(num_models)]
_ = combine_state_for_ensemble(models)
class TestExamplesCorrectness(TestCase):
def test_maml_regression(self, device):
class ThreeLayerNet(nn.Module):
def __init__(self):
super(ThreeLayerNet, self).__init__()
self.fc1 = nn.Linear(1, 40)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(40, 40)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(40, 1)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
x = self.relu2(x)
x = self.fc3(x)
return x
# TODO: should replace with F.mse_loss
def mse_loss(x, y):
return torch.mean((x - y) ** 2)
net, params = make_functional(ThreeLayerNet().to(device))
K = 20
num_tasks = 4
alpha = 0.1
def sample_tasks(outer_batch_size, inner_batch_size):
# Select amplitude and phase for the task
As = []
phases = []
for _ in range(outer_batch_size):
As.append(np.random.uniform(low=0.1, high=.5))
phases.append(np.random.uniform(low=0., high=np.pi))
def get_batch():
xs, ys = [], []
for A, phase in zip(As, phases):
x = np.random.uniform(low=-5., high=5., size=(inner_batch_size, 1))
y = A * np.sin(x + phase)
xs.append(x)
ys.append(y)
return torch.tensor(xs, dtype=torch.float, device=device), \
torch.tensor(ys, dtype=torch.float, device=device)
x1, y1 = get_batch()
x2, y2 = get_batch()
return x1, y1, x2, y2
def get_loss_for_task(use_transform, x1, y1, x2, y2):
def inner_loss(params, x1, y1):
f = net(params, x1)
loss = mse_loss(f, y1)
return loss
if use_transform:
grads = grad(inner_loss)(params, x1, y1)
else:
loss = inner_loss(params, x1, y1)
grads = torch.autograd.grad(loss, params, create_graph=True)
new_params = [(params[i] - alpha * grads[i]) for i in range(len(params))]
v_f = net(new_params, x2)
return mse_loss(v_f, y2)
task = sample_tasks(num_tasks, K)
# Compute with vmap+grad
inner_losses = vmap(partial(get_loss_for_task, True))(task[0], task[1], task[2], task[3])
loss2 = sum(inner_losses) / len(inner_losses)
result_grads = torch.autograd.grad(loss2, params)
# Compute without vmap+grad
inner_losses = [
get_loss_for_task(False, task[0][i], task[1][i], task[2][i], task[3][i])
for i in range(num_tasks)
]
loss2 = sum(inner_losses) / len(inner_losses)
expected_grads = torch.autograd.grad(loss2, params)
self.assertEqual(result_grads, expected_grads)
def test_maml_omniglot(self, device):
# TODO: there appears to be precision issues for float32
dtype = torch.double
# TODO: We don't support inplace relu?
inplace_relu = False
n_way = 5
n_inner_iter = 2
num_tasks = 2
# real example uses batch norm but it's numerically unstable in the first
# iteration, when near 0, and won't produce same gradients. Uses group norm instead
net = nn.Sequential(
nn.Conv2d(1, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(64, n_way)).to(device).to(dtype)
fnet, params, buffers = make_functional_with_buffers(net)
net = (params, buffers, fnet)
def loss_for_task(net, n_inner_iter, use_transform, x_spt, y_spt, x_qry, y_qry):
params, buffers, fnet = net
querysz = x_qry.size(0)
def compute_loss(new_params, buffers, x, y):
logits = fnet(new_params, buffers, x)
loss = F.cross_entropy(logits, y)
return loss
new_params = params
for _ in range(n_inner_iter):
if use_transform:
grads = grad(compute_loss)(new_params, buffers, x_spt, y_spt)
else:
res = compute_loss(new_params, buffers, x_spt, y_spt)
grads = torch.autograd.grad(res, new_params, create_graph=True)
new_params = [p - g * 1e-1 for p, g, in zip(new_params, grads)]
qry_logits = fnet(new_params, buffers, x_qry)
qry_loss = F.cross_entropy(qry_logits, y_qry)
qry_acc = (qry_logits.argmax(
dim=1) == y_qry).sum() / querysz
return qry_loss, qry_acc
# Get some sample inputs...
x_spt = torch.randn(num_tasks, 25, 1, 28, 28, dtype=dtype, device=device)
y_spt = torch.randint(0, 5, (num_tasks, 25), device=device)
x_qry = torch.randn(num_tasks, 75, 1, 28, 28, dtype=dtype, device=device)
y_qry = torch.randint(0, 5, (num_tasks, 75), device=device)
# compute with vmap + grad
compute_loss = partial(loss_for_task, net, n_inner_iter, True)
qry_losses, _ = vmap(compute_loss)(x_spt, y_spt, x_qry, y_qry)
result_grads = torch.autograd.grad(qry_losses.sum(), params)
# compute without vmap + grad
compute_loss = partial(loss_for_task, net, n_inner_iter, False)
losses = [compute_loss(x_spt[i], y_spt[i], x_qry[i], y_qry[i])[0]
for i in range(num_tasks)]
expected_grads = torch.autograd.grad(sum(losses), params)
self.assertEqual(result_grads, expected_grads)
@parametrize('originally_track_running_stats', [True, False])
def test_update_batch_norm(self, device, originally_track_running_stats):
dtype = torch.double
inplace_relu = False
classes = 5
num_batches = 2
net = nn.Sequential(
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, affine=True, track_running_stats=originally_track_running_stats),
nn.ReLU(inplace=inplace_relu),
nn.Flatten(),
nn.Linear(43264, classes)).to(device).to(dtype)
replace_all_batch_norm_modules_(net)
transformed_net = net
fnet, params, buffers = make_functional_with_buffers(transformed_net)
net = (params, buffers, fnet)
criterion = nn.CrossEntropyLoss()
def compute_loss(x, y, params, buffers):
return criterion(fnet(params, buffers, x), y)
# Get some sample inputs...
x = torch.randn(num_batches, 1, 64, 28, 28, device=device, dtype=dtype)
y = torch.randint(0, classes, (num_batches, 1), device=device)
# compute some per sample grads with vmap + grad
result_grads = vmap(grad(compute_loss, argnums=2), in_dims=(0, 0, None, None))(x, y, params, buffers)
# compute some per sample grads without vmap + grad
fnet, params, buffers = make_functional_with_buffers(transformed_net)
expected_grads = [
torch.autograd.grad(compute_loss(x[i], y[i], params, buffers), params)
for i in range(num_batches)
]
expected_grads = [torch.stack(shards) for shards in zip(*expected_grads)]
self.assertEqual(result_grads, expected_grads)
@parametrize('jac', ['jacfwd', 'jacrev'])
def test_lennard_jones_batched_jac(self, device, jac):
sigma = 0.5
epsilon = 4.
jac = getattr(functorch, jac)
def lennard_jones(r):
return epsilon * ((sigma / r)**12 - (sigma / r)**6)
def lennard_jones_force(r):
"""Get magnitude of LJ force"""
return \
-epsilon * ((-12 * sigma**12 / r**13) + (6 * sigma**6 / r**7))
r = torch.linspace(0.5, 2 * sigma, steps=100, requires_grad=True, device=device)
drs = torch.outer(r, torch.tensor([1.0, 0, 0], device=device))
norms = torch.norm(drs, dim=1).reshape(-1, 1)
training_energies = \
torch.stack(list(map(lennard_jones, norms))).reshape(-1, 1)
training_forces = torch.stack(
[force * dr
for force, dr in zip(map(lennard_jones_force, norms), drs)])
model = nn.Sequential(
nn.Linear(1, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 1)
).to(device)
def make_prediction(model, drs, use_functorch):
norms = torch.norm(drs, dim=1).reshape(-1, 1)
energies = model(norms)
if use_functorch:
network_derivs = vmap(jac(model))(norms).squeeze(-1)
forces = -network_derivs * drs / norms
else:
forces = []
for r, dr in zip(norms, drs):
network_deriv = torch.autograd.functional.jacobian(
model, r, create_graph=True)
force = -network_deriv * dr / r
forces.append(force)
forces = torch.cat(forces)
return energies, forces
def loss_fn(energies, forces, predicted_energies, predicted_forces):
return F.mse_loss(energies, predicted_energies) + \
0.01 * F.mse_loss(forces, predicted_forces) / 3
energies, forces = make_prediction(model, drs, use_functorch=True)
loss = loss_fn(training_energies, training_forces, energies, forces)
result = torch.autograd.grad(loss, model.parameters())
energies, forces = make_prediction(model, drs, use_functorch=False)
loss = loss_fn(training_energies, training_forces, energies, forces)
expected = torch.autograd.grad(loss, model.parameters())
self.assertEqual(result, expected)
def test_ensemble_regression(self, device):
def make_spirals(n_samples, noise_std=0., rotations=1.):
ts = torch.linspace(0, 1, n_samples)
rs = ts ** 0.5
thetas = rs * rotations * 2 * math.pi
signs = torch.randint(0, 2, (n_samples,)) * 2 - 1
labels = (signs > 0).to(torch.long)
xs = rs * signs * torch.cos(thetas) + torch.randn(n_samples) * noise_std
ys = rs * signs * torch.sin(thetas) + torch.randn(n_samples) * noise_std
points = torch.stack([xs, ys], dim=1)
return points.to(device), labels.to(device)
points, labels = make_spirals(100, noise_std=0.05)
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.log_softmax(x, -1)
return x
loss_fn = nn.NLLLoss()
func_model, weights = make_functional(MLPClassifier().to(device))
def train_step_fn(use_transform, weights, batch, targets, lr=0.2):
def compute_loss(weights, batch, targets):
output = func_model(weights, batch)
loss = loss_fn(output, targets)
return loss
if use_transform:
grad_weights, loss = grad_and_value(compute_loss)(weights, batch, targets)
else:
loss = compute_loss(weights, batch, targets)
grad_weights = torch.autograd.grad(loss, weights)
new_weights = []
with torch.no_grad():
for grad_weight, weight in zip(grad_weights, weights):
new_weights.append(weight - grad_weight * lr)
# NB: return looks weird because torch.vmap must return Tensors
return (loss, *new_weights)
def unpack(train_result):
return train_result[0], train_result[1:]
def init_fn(num_models):
models = tuple(MLPClassifier().to(device) for _ in range(num_models))
weights = tuple(make_functional(model)[1] for model in models)
weights = tuple(zip(*weights))
weights = tuple(torch.stack(shards).detach() for shards in weights)
return weights
def slice_weights(batched_weights, index):
return tuple(weight[index].detach().requires_grad_() for weight in batched_weights)
batched_weights = init_fn(num_models=2)
parallel_train_step_fn = vmap(partial(train_step_fn, True), in_dims=(0, None, None))
result_loss, result_weights = unpack(parallel_train_step_fn(batched_weights, points, labels))
loss0, weights0 = unpack(train_step_fn(False, slice_weights(batched_weights, 0), points, labels))
loss1, weights1 = unpack(train_step_fn(False, slice_weights(batched_weights, 1), points, labels))
expected_loss = torch.stack([loss0, loss1])
expected_weights = tuple(torch.stack([w0, w1]) for w0, w1 in zip(weights0, weights1))
self.assertEqual(result_loss, expected_loss)
self.assertEqual(result_weights, expected_weights)
@parametrize("dropout_layer", [nn.Dropout, nn.AlphaDropout, nn.FeatureAlphaDropout])
def test_find_learning_rate_ensembling(self, device, dropout_layer):
# This example mimics what a user might do when trying to find the optimal learning rate. They would
# want to run a bunch of models with the same behavior (including the same dropout!) and have them
# each run with different learning rates. Specifically, this is an example of using same randomness with vmap
points, labels = torch.randn(100, 2, 2, 2, 2, device=device), torch.randint(0, 2, (100,), device=device)
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.dropout = dropout_layer()
self.fc1 = nn.Linear(16, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.dropout(x)
x = torch.flatten(x, start_dim=1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.log_softmax(x, -1)
return x
loss_fn = nn.NLLLoss()
func_model, weights = make_functional(MLPClassifier().to(device))
def train_step_fn(weights, batch, targets, lr):
def compute_loss(weights, batch, targets):
output = func_model(weights, batch)
loss = loss_fn(output, targets)
return loss
grad_weights, loss = grad_and_value(compute_loss)(weights, batch, targets)
new_weights = []
with torch.no_grad():
for grad_weight, weight in zip(grad_weights, weights):
new_weights.append(weight - grad_weight * lr)
# NB: return looks weird because torch.vmap must return Tensors
return (loss, *new_weights)
def unpack(train_result):
return train_result[0], train_result[1:]
def init_fn(num_models):
og_model = MLPClassifier().to(device)
models = tuple(copy.deepcopy(og_model) for _ in range(num_models)) # have same initialization
weights = tuple(make_functional(model)[1] for model in models)
weights = tuple(zip(*weights))
weights = tuple(torch.stack(shards).detach() for shards in weights)
return weights
batched_weights = init_fn(num_models=2)
parallel_train_step_fn = vmap(train_step_fn, in_dims=(0, None, None, 0), randomness="same")
lrs = torch.tensor([0.2, 0.4], device=device)
result_loss, result_weights = unpack(parallel_train_step_fn(batched_weights, points, labels, lrs))
self.assertEqual(result_loss[0], result_loss[1])
self.assertNotEqual(tuple(weight[0] for weight in result_weights),
tuple(weight[1] for weight in result_weights))
@unittest.skipIf(not USE_TORCHVISION, "test requires torchvision")
def test_resnet18_per_sample_grads(self, device):
import torchvision.models as models
model = models.__dict__['resnet18'](
pretrained=False, norm_layer=(lambda c: nn.GroupNorm(min(32, c), c))
).to(device)
criterion = nn.CrossEntropyLoss(reduction='sum') # avoid cross batch reductions for for loop comparison
func_model, weights = make_functional(model)
def compute_loss(weights, image, target):
output = func_model(weights, images)
loss = criterion(output, targets)
return loss
batch_size = 3
images = torch.randn(batch_size, 3, 32, 32, device=device)
targets = torch.randint(0, 10, (batch_size,), device=device)
result_grads = vmap(grad(compute_loss), in_dims=(None, 0, 0))(weights, images, targets)
expected_grads = [
torch.autograd.grad(compute_loss(weights, images[i].unsqueeze(0), targets[i].unsqueeze(0)), weights)
for i in range(batch_size)
]
expected_grads = [torch.stack(shards) for shards in zip(*expected_grads)]
self.assertEqual(result_grads, expected_grads, atol=1e-3, rtol=1.)
def normalize_devices(fx_g):
for node in fx_g.graph.nodes:
args = list(node.args)
for idx, arg in enumerate(args):
if isinstance(arg, torch.device):
args[idx] = 'cpu'
node.args = tuple(args)
new_kwargs = {}
for k, v in node.kwargs.items():
if isinstance(v, torch.device):
v = 'cpu'
new_kwargs[k] = v
node.kwargs = new_kwargs
fx_g.recompile()
return fx_g
class TestFunctionalize(TestCase):
def _check_functionalize_correctness(self, f, inpt):
inpt1 = inpt.clone()
inpt2 = inpt.clone()
inpt3 = inpt.clone()
expected_outputs = f(inpt1)
actual_outputs = vmap(functionalize(f))(inpt2.unsqueeze(0))[0].squeeze()
# Right now the flavor of functionalize that also removes view ops
# isn't being used with vmap
# That's because {view}_copy ops don't have batching rules yet
# (although we should probably fix that)
actual_outputs_view_copy = functionalize(f, remove='mutations_and_views')(inpt3)
# Check that outputs are the same
self.assertEqual(actual_outputs, expected_outputs)
self.assertEqual(actual_outputs_view_copy, expected_outputs)
# Inputs might have been mutated by f: check that they were mutated properly
self.assertEqual(inpt1, inpt2)
self.assertEqual(inpt1, inpt3)
def test_simple_view(self, device):
def f(x: torch.Tensor) -> torch.Tensor:
tmp = torch.ones(2, device=device)
y = x.view(4, 2)
y.add_(tmp)
return x
self._check_functionalize_correctness(f, torch.zeros(4, 2, device=device))
def test_multioutput_view(self, device):
def f(x: torch.Tensor) -> torch.Tensor:
tmp = torch.ones(2, device=device)
y1, y2 = x.split(2)
y1_view = y1.diagonal()
y1_view.add_(tmp)
return x
self._check_functionalize_correctness(f, torch.zeros(4, 2, device=device))
def test_inplace_view(self, device):
def f(x: torch.Tensor) -> torch.Tensor:
tmp = torch.ones(4, device=device)
y = x + x
y2 = y.transpose(1, 0)
z = y2[0]
z.add_(tmp)
return y
self._check_functionalize_correctness(f, torch.zeros(4, 2, device=device))
# See https://github.com/pytorch/functorch/issues/780
def test_linear(self, device):
def f(x, y, z) -> torch.Tensor:
return torch._C._nn.linear(x, y, z)
x = torch.randn(14, 1, 384, device=device)
y = torch.randn(96, 384, device=device)
z = torch.randn(96, device=device)
out_expected = f(x, y, z)
out_actual = functionalize(f)(x, y, z)
self.assertEqual(out_expected, out_actual)
def test_multioutput_inplace_slice_view(self, device):
def f(x: torch.Tensor) -> torch.Tensor:
tmp = torch.ones(2, 2, device=device)
y = x.view(8)
z0 = y.reshape(2, 4)
z1 = z0.transpose(1, 0)
z1.unsqueeze_(0)
z1.squeeze_()
z2, z3 = z1.split(2)
z2.add_(tmp)
return x
self._check_functionalize_correctness(f, torch.zeros(4, 2, device=device))
# Ensure functionalize works with List[Optional[Tensor]] arguments.
# See the fix / discussion at https://github.com/pytorch/pytorch/pull/76085
def test_functionalize_opt_tensor_list(self, device):
def f(x: torch.Tensor, indices: torch.Tensor) -> torch.Tensor:
return x[indices]
inpta = torch.ones(4, device=device)
inptb = torch.arange(2, device=device)
out1 = f(inpta, inptb)
out2 = functionalize(f)(inpta, inptb)
self.assertEqual(out1, out2)
out = make_fx(functionalize(f))(inpta, inptb)
self.assertExpectedInline((out.code), """\
def forward(self, x_1, indices_1) -> torch.Tensor:
index_tensor = torch.ops.aten.index.Tensor(x_1, [indices_1]); x_1 = indices_1 = None
return index_tensor
""")
# Ensure grad(functionalize(f)) works
def test_functionalize_grad(self, device):
def f(x: torch.Tensor) -> torch.Tensor:
tmp = torch.ones(2, device=device)
y = x + x
z = y.view(4, 2)
y.add_(tmp)
return z.sum()
inpt1 = torch.ones(4, 2, device=device)
inpt2 = torch.ones(4, 2, device=device)
out1 = grad(f)(inpt1)
out2 = grad(functionalize(f))(inpt2)
self.assertEqual(out1, out2)
self.assertEqual(inpt1, inpt2)
def test_vmap_functionalize_jvp(self, device):
def f(x: torch.Tensor) -> torch.Tensor:
y = x + x
z = y.view(-1)
y.add_(1)
return z
def jvp_wrapper(x, t):
return jvp(f, (x,), (t,),)
x = torch.randn(2, 3, device=device)
t = torch.randn(2, 3, device=device)
out1 = vmap(jvp_wrapper)(x, t)
out2 = vmap(functionalize(jvp_wrapper))(x, t)
self.assertEqual(out1, out2)
def test_functionalize_fx_simple(self, device):
def f(x: torch.Tensor) -> torch.Tensor:
tmp = torch.ones(2, device=device)
y = x.view(4, 2)
y.add_(tmp)
return x
# There's a copy_ in the graph, because the input (x) was mutated.
# To preserve semantics, functionalize() needs to propagate the mutation.
fn = make_fx(functionalize(f, remove='mutations_and_views'))
out = fn(torch.zeros(4, 2, device=device))
out = normalize_devices(out)
self.assertExpectedInline((out.code), """\
def forward(self, x_1) -> torch.Tensor:
ones = torch.ops.aten.ones.default([2], device = 'cpu', pin_memory = False)
view_copy_default = torch.ops.aten.view_copy.default(x_1, [4, 2])
add_tensor = torch.ops.aten.add.Tensor(view_copy_default, ones); view_copy_default = ones = None
view_copy_default_1 = torch.ops.aten.view_copy.default(add_tensor, [4, 2]); add_tensor = None
copy__default = torch.ops.aten.copy_.default(x_1, view_copy_default_1); x_1 = None
return view_copy_default_1
""")
def test_functionalize_fx_transpose_simple(self, device):
def f(x: torch.Tensor) -> torch.Tensor:
return x.transpose(1, 0)
fn = make_fx(functionalize(f, remove='mutations_and_views'))
out = fn(torch.zeros(4, 2, device=device))
out = normalize_devices(out)
self.assertExpectedInline(out.code, """\
def forward(self, x_1) -> torch.Tensor:
transpose_copy_int = torch.ops.aten.transpose_copy.int(x_1, 1, 0); x_1 = None
return transpose_copy_int
""")
def test_functionalize_fx_out_op(self, device):
def f(inpt: torch.Tensor) -> torch.Tensor:
out = torch.empty((), dtype=torch.float32)
torch.add(inpt, inpt, out=out)
out_view = out.view(4)
out_view.add_(1)
return out
fn = make_fx(functionalize(f, remove='mutations_and_views'))
out = fn(torch.arange(4, device=device, dtype=torch.float32))
out = normalize_devices(out)
self.assertExpectedInline(out.code, """\
def forward(self, inpt_1) -> torch.Tensor:
empty = torch.ops.aten.empty.memory_format([], dtype = torch.float32, device = 'cpu', pin_memory = False)
add_tensor = torch.ops.aten.add.Tensor(inpt_1, inpt_1); inpt_1 = None
view_copy_default = torch.ops.aten.view_copy.default(add_tensor, [4])
view_copy_default_1 = torch.ops.aten.view_copy.default(add_tensor, [4]); add_tensor = None
add_tensor_1 = torch.ops.aten.add.Tensor(view_copy_default_1, 1); view_copy_default_1 = None
view_copy_default_2 = torch.ops.aten.view_copy.default(add_tensor_1, [4]); add_tensor_1 = None
return view_copy_default_2
""")
def test_functionalize_fx_multi_out_op(self, device):
def f(inpt: torch.Tensor) -> torch.Tensor:
mins = torch.empty(4, dtype=torch.float32)
maxs = torch.empty(2, 2, dtype=torch.float32)
maxs_view = maxs.view(4)
inpt_view = inpt.view(2, 4)
torch.aminmax(inpt_view, dim=0, out=(mins, maxs_view))
return (maxs, mins)
fn = make_fx(functionalize(f, remove='mutations_and_views'))
out = fn(torch.arange(8, device=device, dtype=torch.float32))
out = normalize_devices(out)
self.assertExpectedInline(out.code, """\
def forward(self, inpt_1) -> torch.Tensor:
empty = torch.ops.aten.empty.memory_format([4], dtype = torch.float32, device = 'cpu', pin_memory = False)
empty_1 = torch.ops.aten.empty.memory_format([2, 2], dtype = torch.float32, device = 'cpu', pin_memory = False)
view_copy_default = torch.ops.aten.view_copy.default(empty_1, [4]); empty_1 = None
view_copy_default_1 = torch.ops.aten.view_copy.default(inpt_1, [2, 4]); inpt_1 = None
aminmax_default = torch.ops.aten.aminmax.default(view_copy_default_1, dim = 0); view_copy_default_1 = None
getitem = aminmax_default[0]
getitem_1 = aminmax_default[1]; aminmax_default = None
view_copy_default_2 = torch.ops.aten.view_copy.default(getitem_1, [2, 2]); getitem_1 = None
return (view_copy_default_2, getitem)
""")
def test_functionalize_fx_reapply_views_simple(self, device):
def f(x: torch.Tensor) -> torch.Tensor:
tmp = torch.ones(2, device=device)
y = x.view(4, 2)
y.add_(tmp)
return x
out = make_fx(functionalize(f))(torch.zeros(4, 2, device=device))
out = normalize_devices(out)
self.assertExpectedInline(out.code, """\
def forward(self, x_1) -> torch.Tensor:
ones = torch.ops.aten.ones.default([2], device = 'cpu', pin_memory = False)
view_default = torch.ops.aten.view.default(x_1, [4, 2])
add_tensor = torch.ops.aten.add.Tensor(view_default, ones); view_default = ones = None
view_default_1 = torch.ops.aten.view.default(add_tensor, [4, 2]); add_tensor = None
copy__default = torch.ops.aten.copy_.default(x_1, view_default_1); x_1 = None
return view_default_1
""")
def test_functionalize_nonfunctional_output(self, device):
global_out = torch.ones(2, device=device)
def f() -> torch.Tensor:
return global_out
out = make_fx(functionalize(f))()
out = normalize_devices(out)
self.assertExpectedInline(out.code, """\
def forward(self) -> torch.Tensor:
_tensor_constant0 = self._tensor_constant0
return _tensor_constant0
""")
def test_functionalize_optional_tensorlist1(self, device):
def f(a, b) -> torch.Tensor:
# at::index has OptionalTensorList arguments,
# test that here
return a[b]
a = torch.arange(4).reshape(2, 2)
b = torch.ones(2, dtype=torch.long)
out = make_fx(functionalize(f))(a, b)
out = normalize_devices(out)
self.assertExpectedInline(out.code, """\
def forward(self, a_1, b_1) -> torch.Tensor:
index_tensor = torch.ops.aten.index.Tensor(a_1, [b_1]); a_1 = b_1 = None
return index_tensor
""")
def test_functionalize_optional_tensorlist2(self, device):
def f(a, b) -> torch.Tensor:
# See https://github.com/pytorch/pytorch/pull/77846
return torch.ops.aten.index(a, b)
a = torch.arange(4).reshape(2, 2)
b = torch.ones(2, dtype=torch.long)
out = make_fx(functionalize(f))(a, b)
self.assertExpectedInline(out.code, """\
def forward(self, a_1, b_1) -> torch.Tensor:
unbind_int = torch.ops.aten.unbind.int(b_1); b_1 = None
getitem = unbind_int[0]
getitem_1 = unbind_int[1]; unbind_int = None
index_tensor = torch.ops.aten.index.Tensor(a_1, [getitem, getitem_1]); a_1 = getitem = getitem_1 = None
return index_tensor
""")
only_for = ("cpu", "cuda")
instantiate_device_type_tests(
TestGradTransform,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestVmapOfGrad,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestJac,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestJvp,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestHessian,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestComposability,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestExamplesCorrectness,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestCustomFunction,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestFunctionalize,
globals(),
only_for=only_for,
)
instantiate_parametrized_tests(
TestMakeFunctional,
)
if __name__ == '__main__':
run_tests()
| pytorch-master | functorch/test/test_eager_transforms.py |
# Owner(s): ["module: functorch"]
import torch
from functorch.compile import minifier
from functorch._src.compile_utils import get_placeholders, get_outputs
from functorch import make_fx
from torch.testing._internal.common_utils import TestCase, run_tests
class TestMinifier(TestCase):
def test_has_mul_minifier(self):
def failing_f(x, y):
y = y / 3
x = x + 3
x = x * y
return x + y
inps = [torch.randn(3), torch.randn(3)]
failing_f = make_fx(failing_f)(*inps)
def has_mul(fx_g, inps):
return (torch.ops.aten.mul.Tensor in set([i.target for i in fx_g.graph.nodes]))
min_f, inps = minifier(failing_f, inps, has_mul)
self.assertEqual(len(min_f.graph.nodes), 4)
self.assertEqual(len(inps), 2)
def test_has_add_mul(self):
def failing_f(x):
x = x * 3
x = x + 5
x = x.cos()
zero = x - x
result = zero / zero
result = result + 3
return (result * 2,)
inps = [torch.randn(3)]
failing_f = make_fx(failing_f)(*inps)
def has_nans(fx_g, inps):
# Basically, make sure none of the nodes are computing nans
for i in inps:
if torch.isnan(i).any():
return False
return torch.isnan(fx_g(*inps)[0]).any()
min_f, inps = minifier(failing_f, inps, has_nans)
self.assertEqual(len(min_f.graph.nodes), 3)
self.assertEqual(len(inps), 1)
def test_input_returned(self):
def f(a, b, c):
a = a.sin()
c = c.cos()
d = a * c
return (a, b, c, d)
inps = [torch.randn(3) for _ in range(3)]
def inputs_returned(fx_g, inps):
inps = set(get_placeholders(fx_g.graph))
outs = set(get_outputs(fx_g.graph))
return len(inps & outs) > 0
failing_f = make_fx(f)(*inps)
min_f, inps = minifier(failing_f, inps, inputs_returned)
self.assertEqual(len(min_f.graph.nodes), 2)
self.assertEqual(len(inps), 1)
if __name__ == "__main__":
run_tests()
| pytorch-master | functorch/test/test_minifier.py |
# Owner(s): ["module: functorch"]
import functorch
from unittest.mock import patch
import functools
from torch.testing._internal.common_utils import run_tests
import test_compile_cache
import test_pythonkey
def make_functionalize_fn(fn):
@functools.wraps(fn)
def _fn(*args, **kwargs):
with patch.object(functorch.compile.config, "use_functionalize", True):
return fn(*args, **kwargs)
return _fn
def make_functionalize_test(cls):
class FunctionalizeTest(cls):
pass
FunctionalizeTest.__name__ = f"Functionalize{cls.__name__}"
for name in dir(cls):
if name.startswith("test_"):
fn = getattr(cls, name)
if not callable(fn):
continue
new_name = f"{name}_functionalize"
fn = make_functionalize_fn(fn)
fn.__name__ = new_name
setattr(FunctionalizeTest, name, None)
setattr(FunctionalizeTest, new_name, fn)
return FunctionalizeTest
FunctionalizeTestCompileCache = make_functionalize_test(test_compile_cache.TestCompileCache)
FunctionalizeTestCompileCacheStaticArgs = make_functionalize_test(test_compile_cache.TestCompileCacheStaticArgs)
FunctionalizeTestPythonKeyAOT = make_functionalize_test(test_pythonkey.TestAOTAutograd)
FunctionalizeTestPythonKeyContiguous = make_functionalize_test(test_pythonkey.TestContiguous)
FunctionalizeTestPythonKeyRandom = make_functionalize_test(test_pythonkey.TestRandom)
FunctionalizeTestPythonKeyPartitioning = make_functionalize_test(test_pythonkey.TestPartitioning)
if __name__ == "__main__":
run_tests()
| pytorch-master | functorch/test/test_functionalize.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from functorch.dim import dims, dimlists, softmax, cat
import math
class Linear(nn.Linear):
def forward(self, input):
ci, co = dims()
b = dimlists()
result = (input[b, ci] * self.weight[co, ci]).sum(ci) + self.bias[co]
return result.order(b, co)
class BertSelfAttention(nn.Module):
def __init__(self, hidden_size, num_attention_heads,
attention_probs_dropout_prob, position_embedding_type=None,
max_position_embeddings=None, linear=Linear):
super().__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
f"The hidden size ({hidden_size}) is not a multiple of the number of attention "
f"heads ({num_attention_heads})"
)
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = linear(hidden_size, self.all_head_size)
self.key = linear(hidden_size, self.all_head_size)
self.value = linear(hidden_size, self.all_head_size)
self.dropout_prob = attention_probs_dropout_prob
self.position_embedding_type = position_embedding_type
if self.position_embedding_type is not None:
assert max_position_embeddings is not None
self.max_position_embeddings = max_position_embeddings
self.distance_embedding = nn.Embedding(2 * max_position_embeddings - 1, self.attention_head_size)
def forward(
self,
hidden_states,
past_key_value=None,
):
# first run the encoding linear layers for q, k, v normally
# the meaning of a linear layer is well understood, so no need to use explicit dimensions
q = self.query(hidden_states)
k = self.key(hidden_states)
v = self.value(hidden_states)
# introduce values that represent each dimension. dimensions are 'first class'
# becaue they are actual python values introduced here
batch, query_sequence, key_sequence, heads, features = dims()
heads.size = self.num_attention_heads
# bind the positional dimensions in k, q, and v against
# our values. the sizes of each dimension are determined by this binding
# and when a dimension is used twice (e.g. batch), its size against both
# uses is checked for consistency.
# The group (heads, features) splits apart a single positional dimension
# into two dimensions. Since heads.size*features.size == q.size(2)
# and we specified heads.size, features.size is inferred here.
q = q[batch, query_sequence, [heads, features]]
k = k[batch, key_sequence, [heads, features]]
v = v[batch, key_sequence, [heads, features]]
# this option allows the model to attend to not just the elements of the current sequence
# but the previouse elements as well as additional tokens.
if past_key_value is not None:
extended_key_sequence = dims()
key_past = past_key_value[0][batch, heads, key_sequence, features]
value_past = past_key_value[1][batch, heads, key_sequence, features]
# cat introduces a new dimension exteneded_key_sequence, becuase it is twice as long
# as the original key_sequence
k = cat([key_past, k], key_sequence, extended_key_sequence)
v = cat([value_past, v], key_sequence, extended_key_sequence)
# for the rest of the function, we will just use extended_key_sequence in lieu of
# key_sequence
key_sequence = extended_key_sequence
# Take the dot product between "query" and "key" to get the raw attention scores.
# The actual outer-product and summation are explicitly represented here,
# and like einsum, will be pattern matched to an efficient matrix multiply op.
attention_scores = (q * k).sum(features) / math.sqrt(features.size)
# relative positional embeddings gave a unique embedding based on the distance between
# key and value tokens in the sequence, e.g.
# 0 1 2 3
# -1 0 1 2
# -2 -1 0 1
# -3 -2 -1 0
if self.position_embedding_type is not None:
# the value of a dimension object when used as a tensor is the indices along its dimension
# so we can directly subtract the two dimensions to get a 2D tensor of (query_sequence x key_sequence)
# with the distance between them
distance = query_sequence - key_sequence
assert key_sequence.size <= self.max_position_embeddings
# we can then use that as an indirect index into the embedding table values to look up the features for that index
# this is just a `gather` primitive op. The resulting tensor will
# have all the dimensions of embeddeding_idx (query_sequence x key_sequence),
# plus all the dimensions of `embed` that were not indirectly accessed (`embedding_range`).
# this form of indirect indexing is more strainghtforward than either advanced indexing or torch.gather which both
# have a lot of dependencies on the positions of indexing tensors.
positional_embedding = self.distance_embedding.weight[self.max_position_embeddings - 1 + distance, features]
if self.position_embedding_type == "relative_key":
# these were einsum ops in the positional code because they are not easy to fit to existing matmul operators
# eventhough they are degenerate matmuls
relative_position_scores = (q * positional_embedding).sum(features)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = (q * positional_embedding).sum(features)
relative_position_scores_key = (k * positional_embedding).sum(features)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_probs = attention_scores
# Normalize the attention scores to probabilities.
attention_probs = softmax(attention_scores, dim=key_sequence)
# # This is actually dropping out entire tokens to attend to, which might
# # seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = torch.nn.functional.dropout(attention_probs, p=self.dropout_prob)
# similarly, we can replace the matmul with a direct listing of the outer product, which makes it clear
# we are weighting the values v across all keys with the attention scores.
context_layer = (attention_probs * v).sum(key_sequence)
# finally, we convert back to a standard tensor by describing the layout of dimensions.
# working in reverse to with_dims, the (heads, features) group flattens the dimensions into a single one.
return context_layer.order(batch, query_sequence, [heads, features])
| pytorch-master | functorch/test/attn_ft.py |
import torch
import copy
from torch.testing._internal.common_methods_invocations import op_db
from functorch_additional_op_db import additional_op_db
from enum import Enum
import functorch._src.top_operators_github_usage as top_ops
import pprint
import unittest
import enum
from torch.testing._internal.common_device_type import toleranceOverride
# Importing these files make modifications to the op_db that we need
import test_ops # noqa: F401
import test_vmap # noqa: F401
all_overridable = list(torch.overrides.get_testing_overrides().keys())
public_docs = [
(torch.nn.functional, 'torch.nn.functional', 'docs/source/nn.functional.rst'),
(torch.fft, 'torch.fft', 'docs/source/fft.rst'),
(torch.special, 'torch.special', 'docs/source/special.rst'),
(torch.linalg, 'torch.linalg', 'docs/source/linalg.rst'),
(torch, 'torch', 'docs/source/torch.rst'),
(torch.Tensor, 'torch.Tensor', 'docs/source/tensors.rst'),
]
# torch.abs, Tensor.abs, Tensor.abs_ are all considered to be different
def get_public_overridable_apis(pytorch_root='/raid/rzou/pt/debug-cpu'):
results = {}
all_overridable_apis = set(torch.overrides.get_testing_overrides().keys())
for module, module_name, src in public_docs:
with open(f'{pytorch_root}/{src}') as f:
lines = f.readlines()
# APIs eitehr begin with 4 spaces or ".. autofunction::"
api_lines1 = [line.strip() for line in lines if line.startswith(' ' * 4)]
api_lines2 = [line.strip()[len('.. autofunction:: '):]
for line in lines if line.startswith('.. autofunction::')]
lines = api_lines1 + api_lines2
lines = [line[7:] if line.startswith('Tensor.') else line for line in lines]
lines = [line for line in lines if hasattr(module, line)]
for line in lines:
api = getattr(module, line)
if api in all_overridable_apis:
results[f'{module_name}.{line}'] = api
return results
denylist = {
'torch.Tensor.data_ptr',
'torch.Tensor.dim',
'torch.Tensor.element_size',
'torch.Tensor.backward',
'torch.Tensor.as_strided',
'torch.Tensor.register_hook',
'torch.Tensor.record_stream',
'torch.Tensor.qscheme',
'torch.Tensor.ndimension',
'torch.Tensor.smm',
'torch.Tensor.sspaddmm',
'torch.Tensor.retain_grad',
'torch.Tensor.sparse_mask',
'torch.Tensor.sparse_dim',
'torch.Tensor.dense_dim',
'torch.Tensor.values',
'torch.Tensor.indices',
'torch.Tensor.numel',
'torch.Tensor.size',
'torch.Tensor.nelement',
'torch.Tensor.q_scale',
'torch.Tensor.q_zero_point',
'torch.Tensor.q_per_channel_scales',
'torch.Tensor.q_per_channel_zero_points',
'torch.Tensor.q_per_channel_axis',
'torch.Tensor.int_repr',
'torch.Tensor.to_sparse',
'torch.Tensor.is_inference',
'torch.Tensor.storage',
'torch.Tensor.storage_type',
}
def get_method_only_ops_we_care_about():
apis = get_public_overridable_apis()
result = []
for key, _ in apis.items():
if not key.startswith('torch.Tensor'):
continue
if key in denylist:
continue
api = key.split('.')[2]
# filter out in-place
if api.endswith('_'):
continue
if f'torch.{api}' not in apis.keys():
result.append(api)
return result
# Deduplicates torch.abs and Tensor.abs
def get_public_overridable_ops():
results = get_public_overridable_apis()
cpy = copy.deepcopy(results)
for key, _ in cpy.items():
if not key.startswith('torch.Tensor'):
continue
api = key.split('.')[2]
if f'torch.{api}' in results.keys():
del results[key]
return results
def get_public_overridable_outplace_ops():
results = get_public_overridable_ops()
cpy = copy.deepcopy(results)
for key, _ in cpy.items():
# NB: there are no dunder methods bcs we don't document those
if key.endswith('_'):
del results[key]
return results
def get_public_overridable_outplace_we_care_about():
results = get_public_overridable_outplace_ops()
cpy = copy.deepcopy(results)
for key, _ in cpy.items():
# quantization
if 'quant' in key or '.q_' in key:
del results[key]
# is_cpu, etc. It doesn't make sense to have OpInfos for these
if '.is_' in key:
del results[key]
if key in denylist and key in results:
del results[key]
return results
# e.g. nn.functional.softmax
def get_op(dotted_name):
names = dotted_name.split('.')
mod = torch
for name in names:
if not hasattr(mod, name):
return None
mod = getattr(mod, name)
return mod
# Maps function -> [OpInfo]
def get_ops_covered_by_opinfos():
ops = {}
def safe_append(dct, key, val):
if key in dct:
dct[key].append(val)
else:
dct[key] = [val]
for opinfo in op_db:
func_op = get_op(opinfo.name)
if func_op:
safe_append(ops, func_op, opinfo)
if opinfo.method_variant:
safe_append(ops, opinfo.method_variant, opinfo)
if opinfo.inplace_variant:
safe_append(ops, opinfo.inplace_variant, opinfo)
for alias in opinfo.aliases:
safe_append(ops, alias.op, opinfo)
return ops
factory_fns = {
'tensor', 'zeros', 'ones', 'randn', 'arange', 'rand', 'empty', 'randperm',
'linspace', 'logspace', 'hann_window', 'full', 'eye', 'blackman_window',
'barlett_window', 'randint', 'range', 'arange',
}
def get_top_ops(torch_threshold, nn_fn_threshold, with_counts=False):
denylist = set({
# These are either not real "operators", factory functions
# that trivially work, or not-documented ops.
'load', 'no_grad', 'save', 'from_numpy',
'manual_seed', 'set_grad_enabled',
'set_default_tensor_type', 'set_num_threads',
'set_printoptions', 'numel',
'set_default_dtype', 'sparse_coo_tensor', 'set_rng_state',
'get_rng_state', 'get_default_dtype', 'initial_seed',
'get_num_threads', 'quantize_per_tensor',
'hann_window', 'is_tensor', 'as_tensor',
'equal', 'enable_grad', 'seed', 'is_storage',
'is_floating_point', 'nn.functional.torch',
'set_flush_denormal', 'set_num_interop_threads', 'dequantize',
'get_num_interop_threads', 'nn.functional.math',
'nn.functional.threshold_',
'nn.functional.selu_',
'nn.functional.elu_',
'nn.functional.rrelu_',
'nn.functional.leaky_relu_',
'nn.functional.hardtanh_',
'nn.functional.has_torch_function',
'nn.functional.has_torch_function_unary',
'nn.functional.has_torch_function_variadic',
'nn.functional.handle_torch_function',
'nn.functional.adaptive_max_pool1d_with_indices',
'nn.functional.adaptive_max_pool2d_with_indices',
'nn.functional.adaptive_max_pool3d_with_indices',
'nn.functional.fractional_max_pool2d_with_indices',
'nn.functional.fractional_max_pool3d_with_indices',
'is_complex',
'grad',
'quantize_per_channel',
'nn.functional.max_pool2d_with_indices',
'nn.functional.max_pool3d_with_indices',
'nn.functional.max_pool1d_with_indices',
'nn.functional.celu_',
'nn.functional.grad',
'nn.functional.relu_',
'nn.functional.boolean_dispatch',
'nn.functional.assert_int_or_pair',
'fft', # is namespace
})
torch_ops = top_ops.top_torch
nn_fn_ops = top_ops.get_nn_functional_top_list()
torch_ops = [op for op in torch_ops if op[0] not in denylist]
nn_fn_ops = [op for op in nn_fn_ops if op[0] not in denylist]
ops = torch_ops[:torch_threshold] + nn_fn_ops[:nn_fn_threshold]
# Now, sort by priority
ops.sort(reverse=True, key=lambda op: op[1])
if not with_counts:
ops = [op[0] for op in ops]
return ops
def get_ops_percentage(torch_threshold, nn_fn_threshold):
data = top_ops.top_torch + top_ops.get_nn_functional_top_list()
def get_num_usages(opname):
# Ignore this, this is heavily inflated
if opname == 't':
return 0
result = [op[1] for op in data if op[0] == opname]
assert len(result) == 1
return result[0]
# get all operators that are not in the denylist
all_ops = get_top_ops(999999, 999999)
total_op_usages = sum([get_num_usages(op) for op in all_ops])
# get subset of all operators
subset_ops = get_top_ops(torch_threshold, nn_fn_threshold)
subset_op_usages = sum([get_num_usages(op) for op in subset_ops])
return subset_op_usages / total_op_usages
def get_top_ops_not_covered_by_opinfo(torch_threshold=0, nn_fn_threshold=0):
ops = get_top_ops(torch_threshold, nn_fn_threshold)
ops_with_opinfo = []
for op in op_db:
ops_with_opinfo.append(op.name)
ops_with_opinfo.extend([op.name for op in op.aliases])
ops_with_opinfo = set(ops_with_opinfo)
result = [op for op in ops if op not in ops_with_opinfo]
result = [op for op in result if op not in denylist]
result = [op for op in result if op not in factory_fns]
return result
def get_covered_ops(ops_list, invert=False):
ops_covered_by_opinfo = get_ops_covered_by_opinfos()
overridable_outplace_ops = ops_list
results = {}
for key, op in overridable_outplace_ops.items():
cond = op in ops_covered_by_opinfo
if invert:
cond = not cond
if cond:
results[key] = op
return results
class Status(Enum):
Correct = 0
Fast = 1
tests = {
'test_vmap_exhaustive',
'test_op_has_batch_rule',
'test_vjp',
'test_vmapvjp',
'test_vmapvjp_has_batch_rule',
'test_jvp',
'test_vmapjvp',
}
def is_decorateinfo_skip_or_xfail(decorateinfo):
assert len(decorateinfo.decorators) == 1
actual_decorator = decorateinfo.decorators[0]
if isinstance(actual_decorator, toleranceOverride):
return False
if actual_decorator == unittest.expectedFailure:
return True
# Assume the rest are skips
return True
def get_all_tested_ops():
overridable_outplace_we_care_about = get_public_overridable_outplace_we_care_about()
op_to_opinfo = get_ops_covered_by_opinfos()
result = set({})
for name, op in get_covered_ops(overridable_outplace_we_care_about).items():
opinfos = op_to_opinfo[op]
for opinfo in opinfos:
result.add(opinfo.name)
return result
def get_skipped_or_xfailed_ops_for(test_name):
overridable_outplace_we_care_about = get_public_overridable_outplace_we_care_about()
op_to_opinfo = get_ops_covered_by_opinfos()
result = set({})
for name, op in get_covered_ops(overridable_outplace_we_care_about).items():
opinfos = op_to_opinfo[op]
for opinfo in opinfos:
for decorator in opinfo.decorators:
if not hasattr(decorator, 'test_name'):
continue
if decorator.test_name != test_name:
continue
if is_decorateinfo_skip_or_xfail(decorator):
result.add(opinfo.name)
return result
def get_statuses(for_subset=None, invert=False):
overridable_outplace_we_care_about = get_public_overridable_outplace_we_care_about()
if for_subset is not None:
overridable_outplace_we_care_about = {
k: v
for k, v in overridable_outplace_we_care_about.items()
# Removes "torch."
if k[6:] in for_subset
}
op_to_opinfo = get_ops_covered_by_opinfos()
result = {}
_ = get_covered_ops(overridable_outplace_we_care_about)
def get_covered_tests(op):
opinfos = op_to_opinfo[op]
result = copy.deepcopy(tests)
for opinfo in opinfos:
for decorator in opinfo.decorators:
if not hasattr(decorator, 'test_name'):
continue
if decorator.test_name in tests and decorator.test_name in result:
result.remove(decorator.test_name)
return result
def get_all_aliases(op):
opinfos = op_to_opinfo[op]
result = []
for opinfo in opinfos:
result.append(opinfo.name)
result.extend(opinfo.aliases)
return set(result)
for name, op in get_covered_ops(overridable_outplace_we_care_about).items():
successful_tests = get_covered_tests(op)
failed_tests = tests - successful_tests
result[name] = failed_tests if invert else successful_tests
return result
def transpose_statuses(for_subset=None, invert=False):
statuses = get_statuses(for_subset, invert=invert)
result = {}
for test in tests:
result[test] = set({})
for op, supported in statuses.items():
for test in supported:
result[test].add(op)
return result
overridable_apis = get_public_overridable_apis()
overridable_ops = get_public_overridable_ops()
overridable_outplace_ops = get_public_overridable_outplace_ops()
overridable_outplace_we_care_about = get_public_overridable_outplace_we_care_about()
tested_overridable_outplace_ops = get_covered_ops(overridable_outplace_we_care_about)
untested_overridable_outplace_ops = get_covered_ops(overridable_outplace_we_care_about, invert=True)
# print("List of OpInfos we need:")
# for key in untested_overridable_outplace_ops.keys():
# print(key)
# print("-" * 80)
# print("")
print(f'Overridable public APIs: {len(overridable_apis)}')
print(f'Overridable public ops: {len(overridable_ops)}')
print(f'Overridable public outplace ops: {len(overridable_outplace_ops)}')
print(f'Overridable public outplace ops we care about: {len(overridable_outplace_we_care_about)}')
print(f'OpInfo-tested overridable public outplace ops: {len(tested_overridable_outplace_ops)}')
def remove_torch(name):
assert name[:6] == 'torch.'
return name[6:]
def get_list_of_all_tests():
all_tests = list(tested_overridable_outplace_ops.keys())
return set([remove_torch(test) for test in all_tests])
mytest = {
'test_vmap_exhaustive',
'test_op_has_batch_rule',
'test_vjp',
'test_vmapvjp',
'test_vmapvjp_has_batch_rule',
}
print('*' * 80)
all_tests = get_list_of_all_tests()
for test in mytest:
result = get_skipped_or_xfailed_ops_for(test)
diff = len(all_tests - result)
print(f'{test}: {diff}')
def get_jvp_coverage(subset=None):
# - number that support autograd
# - number that support forward_ad (in pytorch core)
# - number that support functorch.jvp
op_to_opinfo = get_ops_covered_by_opinfos()
ops_dct = tested_overridable_outplace_ops
if subset is not None:
ops_dct = {name: op for name, op in ops_dct.items()
if remove_torch(name) in subset}
supports_autograd_ops_dct = {name: op_to_opinfo[fn] for name, fn in ops_dct.items()
if op_to_opinfo[fn][0].supports_autograd}
supports_forwardad_ops_dct = {name: op_to_opinfo[fn] for name, fn in ops_dct.items()
if op_to_opinfo[fn][0].supports_forward_ad}
ops = set([remove_torch(test) for test in list(ops_dct.keys())])
supports_autograd = set([remove_torch(test)
for test in list(supports_autograd_ops_dct.keys())])
supports_forward_ad = set([remove_torch(test)
for test in list(supports_forwardad_ops_dct.keys())])
assert supports_forward_ad.issubset(supports_autograd)
assert supports_autograd.issubset(ops)
failed_ops = get_skipped_or_xfailed_ops_for('test_jvp')
coverage = len(supports_forward_ad - failed_ops)
no_forward_ad = len(supports_autograd) - len(supports_forward_ad)
print(f'test_jvp, {coverage}, {no_forward_ad}, {len(ops)}')
get_jvp_coverage()
get_jvp_coverage(get_top_ops(100, 25))
for op in get_top_ops(100, 25):
print(op)
print('*' * 80)
# result = get_skipped_or_xfailed_ops_for('test_vmap_exhaustive')
# result = get_skipped_or_xfailed_ops_for('test_op_has_batch_rule')
# result = get_skipped_or_xfailed_ops_for('test_vjp')
# result = get_skipped_or_xfailed_ops_for('test_vmapvjp')
# result = get_skipped_or_xfailed_ops_for('test_vmapvjp_has_batch_rule')
# import pdb; pdb.set_trace()
statuses = transpose_statuses()
for test in tests:
print(f'{test} coverage {len(statuses[test])}')
method_only_ops = get_method_only_ops_we_care_about()
# for op in method_only_ops:
# print(f' {op},')
top_ops_not_covered_by_opinfo = get_top_ops_not_covered_by_opinfo(100, 25)
print('=' * 80)
for op in top_ops_not_covered_by_opinfo:
print(f'{op}, {top_ops.usage_count[op]}')
# print("top ops not covered by opinfo: ")
# top_ops_not_covered_by_opinfo = get_top_ops_not_covered_by_opinfo(200, 50)
# for op in top_ops_not_covered_by_opinfo:
# print(f'{op}, {top_ops.usage_count[op]}')
# print("top ops not covered by opinfo: ")
# top_ops_not_covered_by_opinfo = get_top_ops_not_covered_by_opinfo(220, 92)
# for op in top_ops_not_covered_by_opinfo:
# print(f'{op}, {top_ops.usage_count[op]}')
# print("top ops not covered by opinfo: ")
# top_ops_not_covered_by_opinfo = get_top_ops_not_covered_by_opinfo(999, 999)
# for op in top_ops_not_covered_by_opinfo:
# print(f'{op}, {top_ops.usage_count[op]}')
def remove_from_set(parent, to_remove):
for to_remove_elt in to_remove:
if to_remove_elt in parent:
parent.remove(to_remove_elt)
def print_coverage_info(th=100, nn=25):
print('=' * 80)
print(f"top {th}, {nn} coverage")
statuses = transpose_statuses(get_top_ops(th, nn), invert=True)
top_ops_not_covered_by_opinfo = get_top_ops_not_covered_by_opinfo(th, nn)
# testing problems
exemptions = {
'torch.nn.functional.dropout', # randomness
}
# Allowed exemptions
vmap_exemptions = {
'torch.randn_like', # randomness
'torch.rand_like', # randomness
'torch.allclose', # number output
'torch.unique', # dynamic
'torch.nonzero', # dynamic
'torch.masked_select', # dynamic
'torch.prod', # dynamic (backward)
'torch.norm', # norm with nuc is not commonly used; we support the other cases.
'torch.svd', # There isn't a bug, it is just nondeterministic so we can't test it.
'torch.nn.functional.embedding', # We support everything except the sparse option.
}
remove_from_set(statuses['test_vmap_exhaustive'], vmap_exemptions)
remove_from_set(statuses['test_vmapvjp'], vmap_exemptions)
remove_from_set(statuses['test_vmapvjp_has_batch_rule'], vmap_exemptions)
remove_from_set(statuses['test_op_has_batch_rule'], vmap_exemptions)
remove_from_set(statuses['test_vmapjvp'], vmap_exemptions)
for test in tests:
remove_from_set(statuses[test], exemptions)
print(f"total ops in set: {th + nn}")
print(f"tested by OpInfo: {th + nn - len(top_ops_not_covered_by_opinfo)}")
for test in tests:
if test in {'test_jvp', 'test_vmapjvp'}:
continue
print(f'{test} failing coverage {len(statuses[test])}')
# We don't care about these yet
del statuses['test_jvp']
del statuses['test_vmapjvp']
pprint.pprint(statuses)
def get_name_to_opinfo_map():
dct = {}
for op in (op_db + additional_op_db):
def add(name, op):
if name not in dct:
dct[name] = []
dct[name].append(op)
add(op.name, op)
for alias in op.aliases:
add(alias.name, op)
return dct
NAME_TO_OPINFO = get_name_to_opinfo_map()
class Support(enum.Enum):
NO = 0
YES = 1
UNKNOWN = 2
FACTORY_FNS = {
'tensor', 'zeros', 'ones', 'randn', 'arange', 'rand', 'empty', 'range',
'full', 'randperm', 'eye', 'randint', 'linspace', 'logspace',
}
VJP_EXEMPTIONS = {
'nn.functional.dropout', # not actually problem, randomness testing artifact
'nn.functional.dropout2d', # not actually problem, randomness testing artifact
'nn.functional.rrelu', # not actually problem, randomness testing artifact
'bernoulli', # not actually problem, randomness testing artifact
'normal', # not actually problem, randomness testing artifact
}
VMAP_EXEMPTIONS = {
'randn_like', # randomness
'rand_like', # randomness
'allclose', # number output
'unique', # dynamic
'nonzero', # dynamic
'masked_select', # dynamic
'prod', # dynamic (backward)
'norm', # norm with nuc is not commonly used; we support the other cases.
'svd', # There isn't a bug, it is just nondeterministic so we can't test it.
'nn.functional.embedding', # We support everything except the sparse option.
'nn.functional.dropout', # randomness
'nn.functional.dropout2d', # randomness
'bernoulli', # randomness
'multinomial', # randomness
'normal', # randomness
}
JVP_EXEMPTIONS = {
'nn.functional.dropout', # not actually problem, randomness testing artifact
'nn.functional.dropout2d', # not actually problem, randomness testing artifact
'nn.functional.rrelu', # not actually problem, randomness testing artifact
'normal', # not actually problem, randomness testing artifact
'bernoulli', # not actually problem, randomness testing artifact
}
class Operator:
def __init__(self, name):
self.name = name
self.opinfos = NAME_TO_OPINFO.get(name, None)
assert self.opinfos is None or len(self.opinfos) > 0
def has_opinfo(self):
return self.opinfos is not None
def __repr__(self):
return f'Operator("{self.name}")'
def __hash__(self):
return hash(self.name)
def no_opinfos_skip_test(self, test_name):
"""Returns NO if any opinfos have a skip or xfail for the test"""
if not self.has_opinfo():
return Support.UNKNOWN
for opinfo in self.opinfos:
for decorator in opinfo.decorators:
if not hasattr(decorator, 'test_name'):
continue
if decorator.test_name != test_name:
continue
if is_decorateinfo_skip_or_xfail(decorator):
return Support.NO
return Support.YES
def any_opinfo_attr(self, attr):
if not self.has_opinfo():
raise RuntimeError()
return any([getattr(opinfo, attr) for opinfo in self.opinfos])
def all_opinfo_attr(self, attr):
if not self.has_opinfo():
raise RuntimeError()
return all([getattr(opinfo, attr) for opinfo in self.opinfos])
def supports_vjp(self):
if self.name in FACTORY_FNS:
return Support.YES
if self.name in VJP_EXEMPTIONS:
return Support.YES
return self.no_opinfos_skip_test('test_vjp')
def supports_vmap(self):
if self.name in FACTORY_FNS:
return Support.YES
if self.name in VMAP_EXEMPTIONS:
return Support.YES
return self.no_opinfos_skip_test('test_vmap_exhaustive')
def supports_fast_vmap(self):
if self.name in FACTORY_FNS:
return Support.YES
if self.name in VMAP_EXEMPTIONS:
return Support.YES
return self.no_opinfos_skip_test('test_op_has_batch_rule')
def supports_vmapvjp(self):
if self.name in FACTORY_FNS:
return Support.YES
if self.name in VMAP_EXEMPTIONS:
return Support.YES
return self.no_opinfos_skip_test('test_vmapvjp')
def supports_fast_vmapvjp(self):
if self.name in FACTORY_FNS:
return Support.YES
if self.name in VMAP_EXEMPTIONS:
return Support.YES
return self.no_opinfos_skip_test('test_vmapvjp_has_batch_rule')
def supports_jvp(self):
if self.name in FACTORY_FNS:
return Support.YES
if self.name in JVP_EXEMPTIONS:
return Support.YES
if not self.has_opinfo():
return Support.UNKNOWN
if self.any_opinfo_attr('supports_autograd') and \
not self.all_opinfo_attr('supports_forward_ad'):
return Support.NO
return self.no_opinfos_skip_test('test_jvp')
def supports_jvpvjp(self):
if self.name in FACTORY_FNS:
return Support.YES
exemptions = {
# we have support (see OpInfo), testing artifact
'nn.functional.dropout2d',
'nn.functional.dropout',
# exception: we dont even support double backward for this
'nn.functional.hardswish',
'bernoulli', # this isn't differentiable
'normal', # not differentiable
}
if self.name in exemptions:
return Support.YES
return self.no_opinfos_skip_test('test_jvpvjp')
def _supports_vmapjvp_base(self, test):
if self.name in FACTORY_FNS:
return Support.YES
VMAPJVP_EXEMPTIONS = {
'prod', # dynamic (backward)
'nn.functional.batch_norm', # testing problem
'normal', # not actually problem, randomness testing artifact
'bernoulli', # not actually problem, randomness testing artifact
'nn.functional.dropout2d', # not actually problem, randomness testing artifact
'nn.functional.dropout', # not actually problem, randomness testing artifact
# Not a problem.
# It's just that the max_norm testing mutates inputs...
# (we have our own functorch variant of the OpInfo without max_norm)
'nn.functional.embedding',
}
if self.name in VMAPJVP_EXEMPTIONS:
return Support.YES
if not self.has_opinfo():
return Support.UNKNOWN
if self.any_opinfo_attr('supports_autograd') and \
not self.all_opinfo_attr('supports_forward_ad'):
return Support.NO
return self.no_opinfos_skip_test(test)
def supports_vmapjvp(self):
return self._supports_vmapjvp_base('test_vmapjvpall')
def supports_fast_vmapjvp(self):
return self._supports_vmapjvp_base('test_vmapjvpall_has_batch_rule')
class OperatorSet:
def __init__(self, operators):
self.data = set(operators)
@classmethod
def from_names(cls, names):
return OperatorSet([Operator(name) for name in names])
@classmethod
def from_top_ops_threshold(cls, torch_threshold, nn_fn_threshold):
names = get_top_ops(torch_threshold, nn_fn_threshold)
return cls.from_names(names)
@classmethod
def from_top125(cls):
return cls.from_top_ops_threshold(100, 25)
@classmethod
def from_top160(cls):
return cls.from_top_ops_threshold(107, 53)
@classmethod
def all(cls):
dct = get_public_overridable_outplace_we_care_about()
names = dct.keys()
names_sanitized = []
for n in names:
torch_tensor = 'torch.Tensor.'
torch_dot = 'torch.'
if n.startswith(torch_tensor):
names_sanitized.append(n[len(torch_tensor):])
elif n.startswith(torch_dot):
names_sanitized.append(n[len(torch_dot):])
else:
raise AssertionError()
return cls.from_names(names_sanitized)
def query(self, operator_method, filter=(Support.NO, Support.YES, Support.UNKNOWN)):
result = {}
for key in filter:
result[key] = set([])
for op in self.data:
support_status = operator_method(op)
if support_status in filter:
result[support_status].add(op)
return result
def summary(self):
checks = [
'supports_vjp',
'supports_vmap',
'supports_fast_vmap',
'supports_vmapvjp',
'supports_fast_vmapvjp',
'supports_jvp',
'supports_vmapjvp',
'supports_fast_vmapjvp',
'supports_jvpvjp',
]
result = ['test, yes, no, unknown']
for check in checks:
accessor = getattr(Operator, check)
all_results = self.query(accessor)
yes_amt = len(all_results[Support.YES])
no_amt = len(all_results[Support.NO])
unknown_amt = len(all_results[Support.UNKNOWN])
result.append(f'{check}, {yes_amt}, {no_amt}, {unknown_amt}')
return '\n'.join(result)
opset = OperatorSet.all()
has_no_opinfo = opset.query(Operator.has_opinfo, (False,))
print("=" * 30 + " Summary " + "=" * 30)
print(f'% of usages on github: {get_ops_percentage(99999, 99999)}')
print(opset.summary())
# sanity checks
result = opset.query(Operator.supports_vjp, (Support.NO, Support.UNKNOWN))
# pprint.pprint(result)
print("=" * 30 + " Top 60 Summary " + "=" * 30)
print(f'% of usages on github: {get_ops_percentage(35, 25)}')
opset = OperatorSet.from_top_ops_threshold(35, 25)
# result = opset.query(Operator.supports_vmapjvp, (Support.NO, Support.UNKNOWN))
# pprint.pprint(result)
# result = opset.query(Operator.supports_jvp, (Support.NO, Support.UNKNOWN))
# pprint.pprint(result)
# kresult = opset.query(Operator.supports_jvpvjp, (Support.NO, Support.UNKNOWN))
# kpprint.pprint(result)
# result = opset.query(Operator.supports_vmapjvp, (Support.NO, Support.UNKNOWN))
# pprint.pprint(result)
# result = opset.query(Operator.supports_fast_vmapjvp, (Support.NO, Support.UNKNOWN))
# pprint.pprint(result)
# pprint.pprint(result)
print(opset.summary())
print("=" * 30 + " Top 125 Summary " + "=" * 30)
print(f'% of usages on github: {get_ops_percentage(100, 25)}')
opset = OperatorSet.from_top125()
# result = opset.query(Operator.supports_vmap, (Support.NO, Support.UNKNOWN))
# pprint.pprint(result)
# result = opset.query(Operator.supports_jvpvjp, (Support.NO, Support.UNKNOWN))
# pprint.pprint(result)
print("supports_vjp")
result = opset.query(Operator.supports_vjp, (Support.NO, Support.UNKNOWN))
pprint.pprint(result)
print("supports_jvp")
result = opset.query(Operator.supports_jvp, (Support.NO, Support.UNKNOWN))
pprint.pprint(result)
print("supports_vmapjvp")
result = opset.query(Operator.supports_vmapjvp, (Support.NO, Support.UNKNOWN))
pprint.pprint(result)
print("supports_jvpvjp")
result = opset.query(Operator.supports_jvpvjp, (Support.NO, Support.UNKNOWN))
pprint.pprint(result)
# result = opset.query(Operator.supports_fast_vmapjvp, (Support.NO, Support.UNKNOWN))
# pprint.pprint(result)
# pprint.pprint(result)
print(opset.summary())
# print("=" * 30 + " Top 160 Summary " + "=" * 30)
# opset = OperatorSet.from_top160()
# result = opset.query(Operator.supports_jvpvjp, (Support.NO, Support.UNKNOWN))
# pprint.pprint(result)
# print(opset.summary())
# Print list of everything in order
# all_ops = get_top_ops(999999, 999999, with_counts=True)
# for op, count in all_ops:
# print(f'{op}, {count}')
| pytorch-master | functorch/test/discover_coverage.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
import math
class BertSelfAttention(nn.Module):
def __init__(self, hidden_size, num_attention_heads,
attention_probs_dropout_prob,
position_embedding_type=None, max_position_embeddings=None):
super().__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
f"The hidden size ({hidden_size}) is not a multiple of the number of attention "
f"heads ({num_attention_heads})"
)
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size)
self.key = nn.Linear(hidden_size, self.all_head_size)
self.value = nn.Linear(hidden_size, self.all_head_size)
self.dropout = nn.Dropout(attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type
if self.position_embedding_type is not None:
assert max_position_embeddings is not None
self.max_position_embeddings = max_position_embeddings
self.distance_embedding = nn.Embedding(2 * max_position_embeddings - 1, self.attention_head_size)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
past_key_value=None,
):
q = self.query(hidden_states)
k = self.key(hidden_states)
v = self.value(hidden_states)
q = self.transpose_for_scores(q)
k = self.transpose_for_scores(k)
v = self.transpose_for_scores(v)
if past_key_value is not None:
k = torch.cat([past_key_value[0], k], dim=2)
v = torch.cat([past_key_value[1], v], dim=2)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(q, k.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if self.position_embedding_type is not None:
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=q.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", q, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", q, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", k, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_probs = attention_scores
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# # This is actually dropping out entire tokens to attend to, which might
# # seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, v)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
| pytorch-master | functorch/test/attn_positional.py |
import re
import torch
"""
Instructions:
1. pytest -n 8 test/test_vmap.py test/test_ops.py test/test_pythonkey.py > result.txt
2. python test/xfail_suggester.py
"""
with open('result.txt') as f:
lines = f.readlines()
failed = [line for line in lines if line.startswith('FAILED')]
p = re.compile('FAILED test/test_\w+.py::\w+::(\S+)') # noqa: W605
def get_failed_test(line):
m = p.match(line)
if m is None:
return None
return m.group(1)
base_names = {
'test_grad_',
'test_vjp_',
'test_vmapvjp_',
'test_vmapvjp_has_batch_rule_',
'test_vjpvmap_',
'test_jvp_',
'test_vmapjvp_',
'test_vmapjvpall_has_batch_rule_',
'test_vmapjvpall_',
'test_jvpvjp_',
'test_vjpvjp_',
'test_decomposition_',
'test_make_fx_exhaustive_',
'test_vmap_exhaustive_',
'test_op_has_batch_rule_',
'test_vmap_autograd_grad_',
}
failed_tests = [get_failed_test(line) for line in lines]
failed_tests = [match for match in failed_tests if match is not None]
failed_tests = sorted(failed_tests)
suggested_xfails = {}
def remove_device_dtype(test):
return '_'.join(test.split('_')[:-2])
def belongs_to_base(test, base):
if not test.startswith(base):
return False
candidates = [try_base for try_base in base_names if len(try_base) > len(base)]
for candidate in candidates:
if test.startswith(candidate):
return False
return True
def parse_namespace(base):
mappings = {
'nn_functional_': 'nn.functional',
'fft_': 'fft',
'linalg_': 'linalg',
'_masked_': '_masked',
'sparse_': 'sparse',
'speical_': 'special',
}
for heading in mappings.keys():
if base.startswith(heading):
return mappings[heading], base[len(heading):]
return None, base
def get_torch_module(namespace):
if namespace is None:
return torch
if namespace == 'nn.functional':
return torch.nn.functional
return getattr(torch, namespace)
def parse_base(base):
namespace, rest = parse_namespace(base)
apis = dir(get_torch_module(namespace))
apis = sorted(apis, key=lambda x: -len(x))
api = rest
variant = ''
for candidate in apis:
if rest.startswith(candidate):
api = candidate
variant = rest[len(candidate) + 1:]
break
print(base, namespace, api, variant)
return namespace, api, variant
def any_starts_with(strs, thing):
for s in strs:
if s.startswith(thing):
return True
return False
def get_suggested_xfails(base, tests):
result = []
tests = [test[len(base):] for test in tests if
belongs_to_base(test, base)]
base_tests = set([remove_device_dtype(test) for test in tests])
tests = set(tests)
for base in base_tests:
cpu_variant = base + '_cpu_float32'
cuda_variant = base + '_cuda_float32'
namespace, api, variant = parse_base(base)
if namespace is None:
api = api
else:
api = f'{namespace}.{api}'
if cpu_variant in tests and cuda_variant in tests:
result.append(f"xfail('{api}', '{variant}'),")
continue
if cpu_variant in tests:
result.append(f"xfail('{api}', '{variant}', device_type='cpu'),")
continue
if cuda_variant in tests:
result.append(f"xfail('{api}', '{variant}', device_type='cuda'),")
continue
result.append(f"skip('{api}', '{variant}',")
return result
result = {base: get_suggested_xfails(base, failed_tests) for base in base_names}
for k, v in result.items():
print('=' * 50)
print(k)
print('=' * 50)
print('\n'.join(v))
| pytorch-master | functorch/test/xfail_suggester.py |
# Owner(s): ["module: functorch"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn as nn
import torch.utils._pytree as pytree
import unittest
import warnings
import itertools
from functools import partial
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_methods_invocations import op_db
from functorch import (
grad, vjp, vmap, jacrev,
make_fx
)
from functorch._src.aot_autograd import aot_module_simplified
from functorch.compile import (
nnc_jit, compiled_function, compiled_module,
min_cut_rematerialization_partition, aot_function, aot_module, decomposition_table, nop,
num_of_recompilations, default_partition, default_decompositions, memory_efficient_fusion, clear_compile_cache
)
from torch.testing._internal.common_device_type import ops
from functorch_additional_op_db import additional_op_db
from common_utils import (
xfail,
skip,
skipOps,
)
USE_TORCHVISION = False
try:
import torchvision
USE_TORCHVISION = True
except ImportError:
warnings.warn("Couldn't import torchvision. Some of our tests use it, try "
"to install it with commands from pytorch.org, post-fixed with "
"`--no-deps` to avoid overwriting the pytorch installation",
UserWarning)
USE_NETWORKX = False
try:
import networkx # noqa: F401
USE_NETWORKX = True
except ImportError:
warnings.warn("Some tests use networkx but it was not installed",
UserWarning)
# NB: numpy is a testing dependency!
class TestPythonKey(TestCase):
def test_make_fx(self, device):
def f(x):
return torch.sin(x)
inp = torch.randn(3)
fx_f = make_fx(f)(inp)
new_inp = torch.randn(3)
self.assertEqual(fx_f(new_inp), f(new_inp))
def test_make_fx_grad(self, device):
def f(x):
return torch.sin(x).sum()
inp = torch.randn(3)
f = grad(f)
fx_f = make_fx(f)(inp)
new_inp = torch.randn(3)
self.assertEqual(fx_f(new_inp), f(new_inp))
def test_scalar_device(self, device):
def f(a, b):
return a + b
inps = [torch.randn(3, device=device), torch.tensor(5)]
fx_f = make_fx(f)(*inps)
self.assertEqual(fx_f(*inps), f(*inps))
def test_make_fx_vmap(self, device):
def f(x):
return torch.sin(x)
inp = torch.randn(5, 3)
f = vmap(f)
fx_f = make_fx(f)(inp)
new_inp = torch.randn(5, 3)
self.assertEqual(fx_f(new_inp), f(new_inp))
def test_make_fx_jacrev(self, device):
def f(x):
return x.sin().sum()
inp = torch.randn(3)
f = jacrev(jacrev(f))
fx_f = make_fx(f)(inp)
new_inp = torch.randn(3)
self.assertEqual(fx_f(new_inp), f(new_inp))
def test_make_fx_vjp(self, device):
def f(x):
return torch.sin(x).sum()
primals = torch.randn(3)
_, vjp_fn = vjp(f, primals)
cotangent = torch.randn(())
fx_f = make_fx(vjp_fn)(cotangent, True, True)
new_cotangent = torch.randn(())
self.assertEqual(fx_f(new_cotangent, True, True), vjp_fn(new_cotangent))
def test_make_fx_no_decompose(self, device):
# FIXME
return self.skipTest("error: maximum recursion reached")
def f(x):
return torch.tanh(x).sum()
fx_f = make_fx(grad(f))(torch.randn(5))
ops = set([i.target for i in fx_f.graph.nodes])
self.assertEqual(torch.ops.aten.tanh_backward in ops, True)
fx_f = make_fx(grad(f), decomposition_table)(torch.randn(5))
ops = set([i.target for i in fx_f.graph.nodes])
self.assertEqual(torch.ops.aten.tanh_backward in ops, False)
def test_nnc_jit(self, device):
def f(x):
return torch.sin(x)
jit_f = nnc_jit(f)
inp = torch.randn(3)
self.assertEqual(jit_f(inp), f(inp))
def test_nnc_scalar(self, device):
def f(x):
return torch.sin(x)
jit_f = nnc_jit(f)
inp = torch.randn(())
self.assertEqual(jit_f(inp), f(inp))
def test_nnc_pytrees(self, device):
def f(x):
return [torch.sin(x[0])]
jit_f = nnc_jit(f)
inp = [torch.randn(3)]
self.assertEqual(jit_f(inp), f(inp))
def test_external_calls(self, device):
def f(a, b):
return torch.mv(a, b)
jit_f = nnc_jit(f)
inp = [torch.randn(3, 3), torch.randn(3)]
self.assertEqual(jit_f(*inp), f(*inp))
def test_nnc_passthrough(self, device):
def f(x, y):
return x + y, y
inp = (torch.randn(3), torch.randn(3))
jit_f = nnc_jit(f)
self.assertEqual(jit_f(*inp), f(*inp))
def f(x):
x['a'] = x['a'] * 2
return x
inp = ({'a': torch.randn(3), 'b': torch.randn(3)},)
jit_f = nnc_jit(f)
self.assertEqual(jit_f(*inp), f(*inp))
@unittest.skipIf(not USE_TORCHVISION, "test requires torchvision")
def test_resnet18_backward_trace(self, device):
mod = torchvision.models.resnet18()
def f(x):
out = mod(x)
out.sum().backward()
return [a.grad for a in mod.parameters()]
inp = torch.randn(3, 3, 250, 250, requires_grad=True)
grads = f(inp)
mod.zero_grad()
mod(inp).sum().backward()
grads2 = [a.grad for a in mod.parameters()]
self.assertEqual(grads, grads2)
def _outs_and_grads(fn, inps):
outs = fn(*inps)
for out in pytree.tree_flatten(outs)[0]:
if isinstance(out, torch.Tensor) and out.requires_grad:
out.sum().backward(retain_graph=True)
grads = [inp.grad for inp in pytree.tree_flatten(inps)[0]]
for inp in pytree.tree_flatten(inps)[0]:
inp.grad = None
return outs, grads
class TestAOTAutograd(TestCase):
def verify_aot_autograd(self, f, inp):
if isinstance(f, nn.Module):
compiled_f = aot_module(f, nop)
else:
compiled_f = aot_function(f, nop)
ref_out, ref_grad = _outs_and_grads(f, inp)
test_out, test_grad = _outs_and_grads(compiled_f, inp)
self.assertEqual(ref_out, test_out)
self.assertEqual(ref_grad, test_grad)
def test_single_output(self):
def f(a, b):
return a + b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output(self):
def f(a, b):
return a + b, a - b
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_multi_output_list(self):
def f(a, b):
return [a + b, a - b]
inp = [torch.randn(3, 3, requires_grad=True), torch.randn(3, 3)]
self.verify_aot_autograd(f, inp)
def test_no_grad_input_output(self):
def f(a, b):
return a.cos(), b.cos(), a * b
inp_thunks = [lambda: torch.randn(5, requires_grad=True), lambda: torch.randn(5, requires_grad=False)]
for inps in itertools.product(inp_thunks, repeat=2):
inps = [i() for i in inps]
self.verify_aot_autograd(f, inps)
def test_inner_grad(self):
def foo(x):
y = torch.exp(x)
z = torch.autograd.grad(y, x)
return z
inps = [torch.randn((), requires_grad=True)]
self.verify_aot_autograd(foo, inps)
def test_grad_context(self):
def foo(x):
return x * 2
inps = [torch.randn((), requires_grad=True)]
graph_size = None
def get_graph_size(fx_g, _):
nonlocal graph_size
graph_size = len(fx_g.graph.nodes)
return fx_g
start_recompilations = num_of_recompilations()
f = aot_function(foo, nop, get_graph_size)
with torch.set_grad_enabled(False):
f(*inps)
self.assertIsNone(graph_size)
with torch.set_grad_enabled(True):
f(*inps)
self.assertTrue(graph_size > 2)
self.assertEqual(num_of_recompilations() - start_recompilations, 2)
def test_output_dict(self):
def f(x):
return {'a': x, 'b': x}
inp = [torch.randn(3, 3, requires_grad=True)]
self.verify_aot_autograd(f, inp)
def f(x, y):
return {'a': x, 'b': y + x}
inp = [torch.randn(3, requires_grad=True), torch.randn(3)]
self.verify_aot_autograd(f, inp)
def f(x):
new_d = {}
for k in x:
new_d[k] = x[k] * 2
return new_d
inp = [{'a': torch.randn(3, requires_grad=True), 'b': torch.randn(3, requires_grad=True)}]
self.verify_aot_autograd(f, inp)
def test_module(self):
mod = nn.Sequential(nn.Linear(32, 32), nn.ReLU())
compiled_mod = compiled_module(mod, nop, nop)
inp = torch.randn(32, 32)
ref_out = mod(inp)
ref_out.sum().backward()
ref_grads = sorted([(name, p.grad) for name, p in mod.named_parameters()])
out = compiled_mod(inp)
out.sum().backward()
grads = sorted([(name, p.grad) for name, p in mod.named_parameters()])
self.assertEqual((out, grads), (ref_out, ref_grads))
def test_batchnorm(self):
mod = compiled_module(nn.BatchNorm2d(4), nop, nop)
x = torch.ones(1, 4, 2, 2)
mod(x).sum().backward()
def test_list_codegen(self):
def list_nop(f, _):
def g(inps):
return f(*inps)
g._boxed_call = True
return g
def f(a, b, c):
return a.sin() * b.cos() * c.sin()
f = aot_function(f, list_nop)
inp = [torch.randn(5, requires_grad=True) for _ in range(3)]
f(*inp).sum().backward()
class TestEagerFusionOpInfo(TestCase):
@ops(op_db + additional_op_db, allowed_dtypes=(torch.float,))
# entries in here need don't work and need to be fixed.
# Each one of these is a bug (or needs to be investigated)
@skipOps('TestEagerFusionOpInfo', 'test_aot_autograd_exhaustive', {
xfail('linalg.cholesky'),
skip('msort'),
xfail('nn.functional.dropout'),
xfail('to_sparse'),
xfail('addcdiv'),
xfail('cholesky'),
xfail('cumulative_trapezoid'),
xfail('diag_embed'),
xfail('linalg.householder_product'),
xfail('logit'),
xfail('trapezoid'),
xfail('trapz'),
xfail('corrcoef'),
xfail('cov'),
xfail('chalf'), # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf'
skip('nn.functional.binary_cross_entropy_with_logits'), # seems to fail sometimes?
skip('nn.functional.margin_ranking_loss'), # seems flaky
})
@unittest.skip("Currently flaky on master for unclear reasons. Skipping for now")
def test_aot_autograd_exhaustive(self, device, dtype, op):
def f(args, kwargs):
return op.op(*args, **kwargs)
if not op.supports_autograd:
return
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=True)
for sample_input in sample_inputs_itr:
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
if not all([isinstance(i, torch.Tensor) and i.dtype == torch.float for i in args]):
self.skipTest("not all inputs are float tensors")
if not all([isinstance(i, torch.Tensor) and i.dtype == torch.float for i in kwargs.values()]):
self.skipTest("not all inputs are float tensors")
continue
t = f(args, kwargs)
if isinstance(t, tuple):
self.skipTest("output is a tuple")
continue
def reset_grads():
def f(x):
x.grad = None
pytree.tree_map(f, args)
def get_grads(args):
return pytree.tree_map(lambda x: x.grad, args)
# NB: We cache on function id, which is unreliable
# Can fix by using weakrefs, but not sure if it matters
clear_compile_cache()
compiled_f = compiled_function(f, nop, nop)
reset_grads()
compiled_f(args, kwargs).sum().backward()
compiled_grad = get_grads(args)
reset_grads()
f(args, kwargs).sum().backward()
orig_grad = get_grads(args)
self.assertEqual(orig_grad, compiled_grad)
def create_new_arg(x):
return x.detach().uniform_(0, 1).requires_grad_(x.requires_grad)
args = pytree.tree_map(create_new_arg, args)
reset_grads()
compiled_f(args, kwargs).sum().backward()
compiled_grad = get_grads(args)
reset_grads()
f(args, kwargs).sum().backward()
orig_grad = get_grads(args)
self.assertEqual(orig_grad, compiled_grad)
def extract_graph(fx_g, _, graph_cell):
graph_cell[0] = fx_g
return fx_g
def get_ins_outs(fx_g):
ins = []
outs = []
for n in fx_g.graph.nodes:
if n.op == 'placeholder':
ins.append(n)
elif n.op == 'output':
outs = tuple(n.args[0])
return ins, outs
def get_num_ins_outs(fx_g):
return tuple(len(i) for i in get_ins_outs(fx_g))
def get_fw_bw_graph(f, inps, partitioner=min_cut_rematerialization_partition):
fw_graph_cell = [None]
bw_graph_cell = [None]
aot_function(f,
fw_compiler=partial(extract_graph, graph_cell=fw_graph_cell),
bw_compiler=partial(extract_graph, graph_cell=bw_graph_cell),
partition_fn=partitioner,
decompositions=default_decompositions)(*inps)
return (fw_graph_cell[0], bw_graph_cell[0])
class TestPartitioning(TestCase):
@unittest.skipIf(not USE_NETWORKX, "networkx not available")
def test_recompute_partitioning(self):
def fn(a, b):
return torch.sin(torch.sin(a)) + b
# Reference calculation
ref_a = torch.rand(10, 10, requires_grad=True)
ref_b = torch.rand(10, 10, requires_grad=True)
ref = fn(ref_a, ref_b)
ref.sum().backward()
# Compiled function calculation
res_a = ref_a.clone().detach().requires_grad_(True)
res_b = ref_b.clone().detach().requires_grad_(True)
def compile_fn(x, _):
return x
compiled_fn = compiled_function(fn, compile_fn, compile_fn, min_cut_rematerialization_partition)
res = compiled_fn(res_a, res_b)
res.sum().backward()
assert torch.allclose(ref, res, atol=1e-3, rtol=1e-3)
assert torch.allclose(ref_a.grad, res_a.grad, atol=1e-3, rtol=1e-3)
assert torch.allclose(ref_b.grad, res_b.grad, atol=1e-3, rtol=1e-3)
def test_meta_tensor_inplace_op(self):
# Following module results in inplace ops while tracing. The test checks
# that the meta tensor information is stored for inplace ops.
class MockModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.weight = torch.nn.Parameter(torch.randn(3072, 768, requires_grad=True))
self.bias = torch.nn.Parameter(torch.randn(3072, requires_grad=True))
def forward(self, add_4):
linear_4 = torch.nn.functional.linear(add_4, self.weight, bias=self.bias)
gelu = torch.nn.functional.gelu(linear_4)
return gelu
def check_meta_tensor(fx_g, _):
for node in fx_g.graph.nodes:
if node.op != 'output':
assert 'tensor_meta' in node.meta
return fx_g
inp0 = torch.randn(16, 128, 768, requires_grad=True)
inputs = [inp0, ]
mod = MockModule().to(device="cpu")
aot_mod = aot_module(mod, fw_compiler=check_meta_tensor)
aot_mod(*inputs)
def test_default_partitioner_getitem(self):
mod = nn.LayerNorm([10])
def f(x, mod_weight, mod_bias):
return torch.nn.functional.layer_norm(x, [10], mod_weight, mod_bias, eps=1e-6)
fw_graph, bw_graph = get_fw_bw_graph(f, [torch.randn(3, 10, requires_grad=True), mod.weight, mod.bias],
partitioner=default_partition)
self.assertEqual(get_num_ins_outs(fw_graph), (3, 6))
self.assertEqual(get_num_ins_outs(bw_graph), (6, 3))
@unittest.skipIf(not USE_NETWORKX, "networkx not available")
def test_min_cut_partitioner(self):
def f(x):
return x.cos().cos().cos()
fw_graph, bw_graph = get_fw_bw_graph(f, [torch.randn(3, requires_grad=True)])
self.assertEqual(get_num_ins_outs(fw_graph), (1, 2))
self.assertEqual(get_num_ins_outs(bw_graph), (2, 1))
def f(a, b, c, d):
x = a + b + c + d
return x.cos().cos()
fw_graph, bw_graph = get_fw_bw_graph(f, [torch.randn(3, requires_grad=True) for _ in range(4)])
self.assertEqual(get_num_ins_outs(fw_graph), (4, 2))
self.assertEqual(get_num_ins_outs(bw_graph), (2, 4))
def f(x):
return torch.mm(x, torch.ones(x.shape)).tanh().tanh()
fw_graph, bw_graph = get_fw_bw_graph(f, [torch.randn(5, 5, requires_grad=True)])
self.assertEqual(get_num_ins_outs(fw_graph), (1, 3))
ins, outs = get_ins_outs(fw_graph)
self.assertEqual(outs[1].target, torch.ops.aten.mm.default)
class TestContiguous(TestCase):
def test_contiguous(self):
# The test simulates the condition where transpose followed by view
# happens in the backward pass.
# https://discuss.pytorch.org/t/error-on-transpose-and-view/434
def f(x):
return x.view(2, 3).t()
inp = torch.randn(6, requires_grad=True)
out = aot_function(f, nop)(inp)
torch.autograd.grad(out, inp, torch.randn(3, 2))
class TestAOTModuleSimplified(TestCase):
def test_aot_module_simplified(self):
class MockModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(20, 30)
def forward(self, x, y):
return (self.linear(x) + y, )
mod = MockModule()
mod.zero_grad()
x = torch.randn(128, 20, requires_grad=True)
y = torch.randn(128, 30, requires_grad=True)
inputs = [x, y]
cloned_inputs = [x.detach().clone().requires_grad_(True) for x in inputs]
ref = mod(*inputs)
ref[0].sum().backward()
aot_mod = aot_module_simplified(mod, nop)
aot_mod.zero_grad()
res = aot_mod(*cloned_inputs)
res[0].sum().backward()
assert torch.allclose(ref[0], res[0])
assert torch.allclose(inputs[0].grad, cloned_inputs[0].grad)
assert torch.allclose(inputs[1].grad, cloned_inputs[1].grad)
def test_aot_module_simplified_preserves_stack_trace(self):
class MockModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(20, 30)
def forward(self, x, y):
return (self.linear(x) + y, )
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(MockModule())
mod = torch.fx.GraphModule(tracer.root, graph)
for node in mod.graph.nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_pythonkey.py' in node.stack_trace
def assert_compiler(gm: torch.fx.GraphModule, _):
for node in gm.graph.nodes:
if node.op == 'output' or node.op == 'placeholder':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_pythonkey.py' in node.stack_trace
return gm.forward # return a python callable
aot_mod = aot_module_simplified(mod, fw_compiler=assert_compiler, bw_compiler=nop)
x = torch.randn(128, 20, requires_grad=True)
y = torch.randn(128, 30, requires_grad=True)
inputs = [x, y]
res = aot_mod(*inputs)
class TestRandom(TestCase):
def test_preserve_random(self):
def fn(x):
return torch.nn.functional.dropout(x, 0.5) + x
x = torch.randn(4)
torch.manual_seed(0)
ref = fn(x)
torch.manual_seed(0)
aot_fn = aot_function(fn, nop)
res = aot_fn(x)
assert torch.allclose(ref, res)
class TestAutocast(TestCase):
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is unavailable")
@unittest.skipIf(not USE_TORCHVISION, "test requires torchvision")
def test_autocast(self):
mod = torchvision.models.resnet18().cuda()
mod.train()
x = torch.randn(16, 3, 32, 32, device="cuda")
aot_mod = memory_efficient_fusion(mod)
# Ensure that AOT Autograd works with AMP
with torch.cuda.amp.autocast(True):
res = aot_mod(x)
res.sum().backward()
only_for = ("cpu")
instantiate_device_type_tests(
TestPythonKey,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(TestEagerFusionOpInfo, globals(), only_for=only_for)
if __name__ == '__main__':
run_tests()
| pytorch-master | functorch/test/test_pythonkey.py |
from functools import partial
import itertools
import unittest
import torch
from torch.testing import \
(floating_types, floating_types_and, all_types_and_complex_and)
from torch.testing._internal.common_utils import make_tensor
from torch.testing._internal.common_methods_invocations import OpInfo, SampleInput, DecorateInfo
# List of OpInfos that aren't in PyTorch Core yet.
# They are here because we wanted a fast way of writing OpInfos and may not be
# 100% correct (w.r.t. to dtypes and other options).
# TODO: Figure out how to upstream these, delete them when they're upstreamed
additional_op_db = []
# https://github.com/pytorch/pytorch/pull/61068
def sample_inputs_conv2d(has_bias, self, device, dtype, requires_grad, extra_args=(), groups=1):
in_ch, out_ch = 6, 4
inp = make_tensor((2, in_ch * groups, 7, 5), device=device, dtype=dtype,
requires_grad=requires_grad, low=-1, high=1)
weight = make_tensor((out_ch * groups, in_ch, 3, 2), device=device, dtype=dtype,
requires_grad=requires_grad, low=-1, high=1)
bias = None
if has_bias:
bias = make_tensor((out_ch * groups,), device=device, dtype=dtype,
requires_grad=requires_grad, low=-1, high=1)
return [SampleInput(inp, args=((weight, bias) + extra_args))]
additional_op_db.extend([
OpInfo('nn.functional.conv2d',
aten_name="conv2d",
variant_test_name='no_bias',
supports_autograd=True,
supports_forward_ad=True,
sample_inputs_func=partial(sample_inputs_conv2d, False),
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
supports_out=False),
OpInfo('nn.functional.conv2d',
aten_name="conv2d",
variant_test_name='with_bias',
supports_autograd=True,
supports_forward_ad=True,
sample_inputs_func=partial(sample_inputs_conv2d, True),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
dtypes=floating_types(),
supports_out=False),
OpInfo('nn.functional.conv2d',
aten_name="conv2d",
variant_test_name='stride_with_bias',
supports_autograd=True,
supports_forward_ad=True,
sample_inputs_func=partial(sample_inputs_conv2d, True, extra_args=((2, 2))),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
dtypes=floating_types(),
supports_out=False),
OpInfo('nn.functional.conv2d',
aten_name="conv2d",
variant_test_name='stride_no_bias',
supports_autograd=True,
supports_forward_ad=True,
sample_inputs_func=partial(sample_inputs_conv2d, False, extra_args=((2, 2))),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
dtypes=floating_types(),
supports_out=False),
OpInfo('nn.functional.conv2d',
aten_name="conv2d",
variant_test_name='stride_padding_with_bias',
supports_autograd=True,
supports_forward_ad=True,
sample_inputs_func=partial(sample_inputs_conv2d, True, extra_args=((2, 2), (1, 1))),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
dtypes=floating_types(),
supports_out=False),
OpInfo('nn.functional.conv2d',
aten_name="conv2d",
variant_test_name='stride_padding_no_bias',
supports_autograd=True,
supports_forward_ad=True,
sample_inputs_func=partial(sample_inputs_conv2d, False, extra_args=((2, 2), (1, 1))),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
dtypes=floating_types(),
supports_out=False),
OpInfo('nn.functional.conv2d',
aten_name="conv2d",
variant_test_name='strided_padding_dilation_with_bias',
supports_autograd=True,
supports_forward_ad=True,
sample_inputs_func=partial(sample_inputs_conv2d, True, extra_args=((2, 2), (1, 1), (2, 2))),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
dtypes=floating_types(),
supports_out=False),
OpInfo('nn.functional.conv2d',
aten_name="conv2d",
variant_test_name='strided_padding_dilation_no_bias',
supports_autograd=True,
supports_forward_ad=True,
sample_inputs_func=partial(sample_inputs_conv2d, True, extra_args=((2, 2), (1, 1), (2, 2))),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
dtypes=floating_types(),
supports_out=False),
OpInfo('nn.functional.conv2d',
aten_name="conv2d",
variant_test_name='stride_groups_with_bias',
supports_autograd=True,
supports_forward_ad=True,
sample_inputs_func=partial(sample_inputs_conv2d, True, extra_args=((2, 3), 0, 1, 2), groups=2),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
dtypes=floating_types(),
supports_out=False),
OpInfo('nn.functional.conv2d',
aten_name="conv2d",
variant_test_name='stride_depthwise_with_bias',
supports_autograd=True,
supports_forward_ad=True,
sample_inputs_func=partial(sample_inputs_conv2d, True, extra_args=((2, 3), 0, 1, 6), groups=6),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
dtypes=floating_types(),
supports_out=False),
])
# TODO: PyTorch core has a check for if requires_grad=True or not.
# We actually want to test more things for backward here which is why we have our own
def sample_inputs_embedding(op_info, device, dtype, requires_grad, **kwargs):
def make_input(shape):
return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad)
def make_long_input(shape, *, low, high):
return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high)
M = 20
S = 5
def generator():
# 0-D index tensor
idx = make_long_input((), low=0, high=M)
yield SampleInput(make_input((M, S)), args=(idx,),)
# 1-D index tensor
idx = make_long_input((S,), low=0, high=M)
yield SampleInput(make_input((M, S)), args=(idx,),)
# 2-D index tensor
idx = make_long_input((S, S), low=0, high=M)
yield SampleInput(make_input((M, S)), args=(idx,),)
idx = make_long_input((2, 2), low=0, high=S)
idx[0, 0] = 2
idx[1, 1] = 2
yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': 2},)
idx = make_long_input((2, 2), low=0, high=S)
idx[0, 0] = 4
idx[1, 1] = 4
yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': -1},)
# Scale the gradient based on the inverse frequency of a particular index.
idx = make_long_input((2, 2), low=0, high=S)
idx[0, 0] = 1
idx[0, 1] = 1
weights = make_input((S, S))
yield SampleInput(weights, args=(idx,), kwargs={'scale_grad_by_freq': True},)
return list(generator())
additional_op_db.append(
OpInfo(
"nn.functional.embedding",
variant_test_name="functorch",
# We use lambda to reshuffle the positional arguments.
# This is because currently only the `input` field of SampleInput
# is tested in gradient tests.
op=lambda weight, idx, **kwargs: torch.nn.functional.embedding(idx, weight, **kwargs),
dtypes=floating_types_and(torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_embedding,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
supports_out=False,
))
def sample_inputs_mse_loss(op_info, device, dtype, requires_grad, **kwargs):
def make_input(shape, requires_grad=requires_grad):
return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad)
rhs_requires_grad = kwargs.get('rhs_requires_grad', requires_grad)
S = 5
shapes = ((S, S), (S, S, S), (S, S, S, S))
reductions = ("none", "mean", "sum")
for shape, reduction in itertools.product(shapes, reductions):
yield SampleInput(make_input(shape),
args=(make_input(shape, requires_grad=rhs_requires_grad),),
kwargs={"reduction": reduction})
additional_op_db.append(
OpInfo(
"nn.functional.mse_loss",
variant_test_name="functorch",
sample_inputs_func=sample_inputs_mse_loss,
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.float16),
backward_dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
backward_dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
))
# TODO: upstream sample inputs to pytorch/pytorch.
# We are more comprehensive.
def sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs):
# Short for "advanced index"
adv_idx = torch.LongTensor([[0, 1], [2, 3]])
S = 5
# self_dim, indices
test_args = [
(3, ([1, 2],)),
(3, (slice(0, 3),)),
(3, ([slice(0, 3), 1],)),
(3, ([[0, 2, 3], [1, 3, 3], [0, 0, 2]],)),
(3, ([[0, 0, 3], [1, 1, 3], [0, 0, 2]],)),
(3, ([slice(None), slice(None), [0, 3]],)),
(3, ([slice(None), [0, 3], slice(None)],)),
(3, ([[0, 3], slice(None), slice(None)],)),
(3, ([[0, 3], [1, 2], slice(None)],)),
(3, ([[0, 3], ],)),
(3, ([[0, 3], slice(None)],)),
(3, ([[0, 3], Ellipsis],)),
(3, ([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])],)),
(4, ([slice(None), adv_idx, adv_idx, slice(None)],)),
(4, ([slice(None), adv_idx, slice(None), adv_idx],)),
(4, ([adv_idx, slice(None), slice(None), adv_idx],)),
(4, ([slice(None), slice(None), adv_idx, adv_idx],)),
(4, ([Ellipsis, adv_idx, adv_idx],)),
(5, ([slice(None), slice(None), adv_idx, slice(None), adv_idx],)),
(5, ([slice(None), slice(None), adv_idx, adv_idx, slice(None)],)),
(5, ([slice(None), slice(None), adv_idx, None, adv_idx, slice(None)],)),
(6, ([slice(None), slice(None), slice(None), adv_idx, adv_idx],)),
(6, ([slice(None), slice(None), adv_idx, adv_idx, adv_idx],)),
(6, ([slice(None), slice(None), None, adv_idx, adv_idx, adv_idx],)),
]
def get_shape(dim):
return tuple(S + i for i in range(dim))
return tuple(SampleInput(
make_tensor(get_shape(self_dim), device=device, dtype=dtype, low=None, high=None, requires_grad=requires_grad),
args=args)
for self_dim, args in test_args)
# TODO: split PyTorch's __getitem__. The problem is we don't support indexing
# with masks with vmap.
additional_op_db.append(
OpInfo('__getitem__',
variant_test_name='functorch',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_inplace_autograd=False,
supports_scripting=False,
op=torch.Tensor.__getitem__,
assert_jit_shape_analysis=False, # TODO: support index.Tensor()
supports_forward_ad=True,
sample_inputs_func=sample_inputs_getitem,))
# Turns out at::index_put is different from torch.index_put...
# TODO: figure out how to upstream this
def sample_inputs_aten_index_put(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
inputs = []
adv_idx = torch.LongTensor([[0, 1], [2, 3]])
# self_shape, indices
additional = [
((5, 6, 7, 8), [None, adv_idx, adv_idx, None]),
((5, 6, 7, 8), [None, adv_idx, None, adv_idx]),
((5, 6, 7, 8), [adv_idx, None, None, adv_idx]),
((5, 6, 7, 8), [None, None, adv_idx, adv_idx]),
((5, 6, 7, 8, 9), [None, None, adv_idx, None, adv_idx]),
((5, 6, 7, 8, 9), [None, None, adv_idx, adv_idx, None]),
((5, 6, 7, 8, 9, 10), [None, None, None, adv_idx, adv_idx]),
((5, 6, 7, 8, 9, 10), [None, None, adv_idx, adv_idx, adv_idx]),
]
for self_shape, indices in additional:
for broadcast_value in [False, True]:
inp = make_arg(self_shape)
tmp_indices = [slice(None) if idx is None else idx for idx in indices]
values_shape = inp[tmp_indices].shape
if broadcast_value:
values_shape = values_shape[3:]
values = make_arg(values_shape)
inputs.append(SampleInput(inp, args=(tuple(indices), values)))
return inputs
def sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
make_idx = partial(make_tensor, dtype=torch.long, device=device, requires_grad=False)
S = 5
inputs = []
for accumulate in [False, True]:
# putting vectors at indexed locations
inputs.append(SampleInput(
make_arg((S, S)),
args=((make_idx((2,), low=0, high=4),), make_arg((2, S))),
kwargs=dict(accumulate=accumulate)))
# putting multi-dim tensors at indexed locations
inputs.append(SampleInput(
make_arg((S, S, 2)),
args=((make_idx((3,), low=0, high=4),), make_arg((3, S, 2))),
kwargs=dict(accumulate=accumulate)))
# value with size `0` dim
inputs.append(SampleInput(
make_arg((S, 0)),
args=((make_idx((3,), low=0, high=4),), make_arg((3, 0))),
kwargs=dict(accumulate=accumulate)))
# scalar value
inputs.append(SampleInput(
make_arg((S,)),
args=((make_idx((), low=0, high=S),), make_arg(())),
kwargs=dict(accumulate=accumulate)))
# cuda and accumulate don't work well
# Reference: https://github.com/pytorch/pytorch/issues/72053
if not accumulate and device == 'cuda':
# Broadcast `values`
inputs.append(SampleInput(
make_arg((S, S)),
args=((make_idx((2,), low=0, high=S),), make_arg((S,))),
kwargs=dict(accumulate=accumulate)))
return inputs
additional_op_db.append(
OpInfo(
"index_put",
variant_test_name='functorch',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_index_put,
supports_forward_ad=True,
))
additional_op_db.append(
OpInfo(
"ops.aten.index_put",
variant_test_name='functorch',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
sample_inputs_func=sample_inputs_aten_index_put,
supports_forward_ad=True,
))
def sample_inputs_masked_fill(op_info, device, dtype, requires_grad, **kwargs):
S = 3
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, 10))
yield SampleInput(make_arg((S, S)), args=(torch.randn(S, device=device) > 0, 10))
yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, 10))
yield SampleInput(make_arg((S, S)), args=(torch.randn((), device=device) > 0, 10))
yield SampleInput(make_arg((S,)),
args=(torch.randn(S, S, device=device) > 0, 10),
broadcasts_input=True)
additional_op_db.append(
OpInfo('masked_fill',
variant_test_name='functorch_Scalar_only',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf),
sample_inputs_func=sample_inputs_masked_fill,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
check_batched_forward_grad=False,
supports_out=False)
)
def sample_inputs_new_zeros_with_same_feature_meta(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
matrix = [
# tangent, base, num_tangent_bdims
([5], [2, 3], 0),
([2, 3], [2, 3], 0),
([5], [2], 0),
([1, 0, 2], [1, 2], 0),
([], [1, 2], 0),
([8, 7, 5], [2, 3, 11], 1),
([6, 7, 5], [2, 3, 4], 2),
([6, 4], [3], 2),
]
results = []
for tangent_shape, base_shape, num_tangent_bdims in matrix:
tangent = make_arg(tangent_shape)
base = make_arg(base_shape)
results.append(SampleInput(
tangent,
args=(base,),
kwargs=dict(self_num_batch_dims=num_tangent_bdims)))
return results
additional_op_db.append(
OpInfo(
"ops.aten._new_zeros_with_same_feature_meta",
variant_test_name='functorchonly',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_out=False,
supports_autograd=False,
supports_forward_ad=False,
sample_inputs_func=sample_inputs_new_zeros_with_same_feature_meta,
))
def sample_inputs_conversion(op_info, device, dtype, requires_grad, **kwargs):
make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)
shapes = ((),
(2, 3))
memory_format_options = [None, torch.contiguous_format]
for shape, memory_format in itertools.product(shapes, memory_format_options):
yield SampleInput(make_arg(shape),
kwargs={'memory_format': memory_format} if memory_format else {})
additional_op_db.extend([
OpInfo('bfloat16',
op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='functorch_no_channels_last',
sample_inputs_func=sample_inputs_conversion,
skips=(
# autograd tests don't handle operators that change dtype
DecorateInfo(unittest.expectedFailure, 'TestGradients'),
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'),
DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo', 'test_nvfuser_correctness'),
)),
OpInfo('bool',
op=lambda x, *args, **kwargs: x.bool(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='functorch_no_channels_last',
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('byte',
op=lambda x, *args, **kwargs: x.byte(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='functorch_no_channels_last',
sample_inputs_func=sample_inputs_conversion,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('char',
op=lambda x, *args, **kwargs: x.char(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='functorch_no_channels_last',
sample_inputs_func=sample_inputs_conversion,
# The autograd test runner cannot handle functions that change dtype
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('double',
op=lambda x, *args, **kwargs: x.double(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='functorch_no_channels_last',
sample_inputs_func=sample_inputs_conversion,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('float',
op=lambda x, *args, **kwargs: x.float(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='functorch_no_channels_last',
sample_inputs_func=sample_inputs_conversion,
skips=(
# autograd tests don't handle operators that change dtype
DecorateInfo(unittest.expectedFailure, 'TestGradients'),
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('half',
op=lambda x, *args, **kwargs: x.half(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='functorch_no_channels_last',
sample_inputs_func=sample_inputs_conversion,
skips=(
# autograd tests don't handle operators that change dtype
DecorateInfo(unittest.expectedFailure, 'TestGradients'),
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('int',
op=lambda x, *args, **kwargs: x.int(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='functorch_no_channels_last',
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('long',
op=lambda x, *args, **kwargs: x.long(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='functorch_no_channels_last',
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('short',
op=lambda x, *args, **kwargs: x.short(*args, **kwargs),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_out=False,
variant_test_name='functorch_no_channels_last',
sample_inputs_func=sample_inputs_conversion,
supports_autograd=False,
skips=(
DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),
# RuntimeError: attribute lookup is not defined on builtin
DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),
)),
])
| pytorch-master | functorch/test/functorch_additional_op_db.py |
# Owner(s): ["module: functorch"]
import torch
import torch.nn as nn
import torch.fx as fx
from functorch import make_fx
from torch.nn import functional as F
from functorch.compile import memory_efficient_fusion
from functorch._src.compile_utils import fx_graph_cse
from torch.testing._internal.common_utils import TestCase, run_tests
import inspect
import random
from typing import Callable
import unittest
HAS_CUDA = torch.cuda.is_available()
def _num_args(fn: Callable):
return len(inspect.signature(fn).parameters)
def gelu_bias(bias, y):
x = bias + y
return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
def swish(x):
return x * torch.sigmoid(x)
def mish(x):
return x.mul(torch.tanh(F.softplus(x)))
def hard_sigmoid(x):
return (x + 3.0).clamp(min=0.0, max=6.0).div(6.0)
def hard_swish(x):
return x * (x + 3.0).clamp(min=0.0, max=6.0).div(6.0)
def hard_mish(x):
return 0.5 * x * (x + 2.0).clamp(min=0.0, max=2.0)
# todo: convert these into tests
# def group_std(x, groups: int = 32, eps: float = 1e-5, flatten: bool = False):
# B, C, H, W = x.shape
# x_dtype = x.dtype
# if flatten:
# x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues
# std = x.float().var(dim=2, unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype)
# else:
# x = x.reshape(B, groups, C // groups, H, W)
# std = x.float().var(dim=(2, 3, 4), unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype)
# return std.expand(x.shape).reshape(B, C, H, W)
# class EvoNorm2dS0(nn.Module):
# def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-5, **_):
# super().__init__()
# self.apply_act = apply_act # apply activation (non-linearity)
# if group_size:
# assert num_features % group_size == 0
# self.groups = num_features // group_size
# else:
# self.groups = groups
# self.eps = eps
# self.weight = nn.Parameter(torch.ones(num_features))
# self.bias = nn.Parameter(torch.zeros(num_features))
# self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None
# self.reset_parameters()
# def reset_parameters(self):
# nn.init.ones_(self.weight)
# nn.init.zeros_(self.bias)
# if self.v is not None:
# nn.init.ones_(self.v)
# def forward(self, x):
# x_dtype = x.dtype
# v_shape = (1, -1, 1, 1)
# if self.v is not None:
# v = self.v.view(v_shape).to(dtype=x_dtype)
# x = x * (x * v).sigmoid() / group_std(x, self.groups, self.eps)
# return x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype)
# device = "cuda"
# dtype = torch.float
# evo_norm = EvoNorm2dS0(2048)
# evo_norm_inp = [(128, 2048, 8, 8)]
def run_and_compare_activation(self, fn, inps):
with torch.jit.fuser("fuser1"):
device = "cuda"
dtype = torch.float
if isinstance(fn, nn.Module):
fn = fn.to(device=device, dtype=dtype)
ref_args = [torch.randn(shape, device=device, dtype=dtype, requires_grad=True) for shape in inps]
res_args = [i.clone().detach().requires_grad_(True) for i in ref_args]
ref = fn(*ref_args)
ref.sum().backward()
mem_optimized_fn = memory_efficient_fusion(fn)
for _ in range(5):
for i in res_args:
i.grad = None
res = mem_optimized_fn(*res_args)
res.sum().backward()
self.assertEqual(ref, res)
for ref_arg, res_arg in zip(ref_args, res_args):
self.assertEqual(ref_arg.grad, res_arg.grad)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is unavailable")
class TestMemoryEfficientOpAuthoring(TestCase):
def test_gelu_bias(self):
run_and_compare_activation(self, gelu_bias, [(1024,), (1024,)])
def test_mish(self):
run_and_compare_activation(self, mish, [(1024,)])
def test_swish(self):
run_and_compare_activation(self, swish, [(1024,)])
def test_hard_sigmoid(self):
run_and_compare_activation(self, hard_sigmoid, [(1024,)])
def test_hard_swish(self):
run_and_compare_activation(self, hard_swish, [(1024,)])
def test_layer_norm(self):
def layer_norm(x, weight, bias):
dim = -1
eps = 1e-5
mean = torch.mean(x, dim, keepdim=True)
centered = x - mean
var = torch.sum(centered * centered, dim, keepdim=True) / x.size(-1)
rvar = 1. / torch.sqrt(var + eps)
normed = (x - mean) * rvar
return normed * weight + bias
bs = 10
ln_size = 16
layer_norm_inps = [(bs, ln_size), (ln_size,), (ln_size,)]
run_and_compare_activation(self, layer_norm, layer_norm_inps)
def test_rmsnorm(self):
class T5LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the T5 style No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
# layer norm should always be calculated in float32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
bs = 256
seq = 256
hidden = 1024
t5_norm = T5LayerNorm(hidden)
t5_norm_inputs = [(bs, seq, hidden)]
run_and_compare_activation(self, t5_norm, t5_norm_inputs)
# TODO - Assertion failure
# def test_hard_mish(self):
# for compiler in compilers:
# run_and_compare_activation(hard_mish, 1024)
# check if the CSE modified graph of f has delta less nodes, and do not reduce the number of nodes further on a second pass.
# delta is an integer >= -1. If delta = -1, only check if the new graph
# has less or equal number of nodes
def check(f, t, delta, check_val=True, graph_input=False):
if graph_input:
fx_g = f
else:
fx_g = make_fx(f)(t)
new_graph = fx_graph_cse(fx_g.graph)
new_g = fx.GraphModule(fx_g, new_graph)
# the number of nodes decrease/ or stay the same
old_num_nodes = len(fx_g.graph.nodes)
new_num_nodes = len(new_graph.nodes)
if delta == -1:
assert old_num_nodes >= new_num_nodes, (
f"number of nodes increased {old_num_nodes}, {new_num_nodes}")
else:
assert old_num_nodes == new_num_nodes + delta, (
f"number of nodes not the same {old_num_nodes - delta}, {new_num_nodes}\n {fx_g.graph} \n {new_graph}")
# a second pass should not reduce more nodes
pass_2_graph = fx_graph_cse(new_graph)
pass_2_num_nodes = len(pass_2_graph.nodes)
assert pass_2_num_nodes == new_num_nodes, (
f"second pass graph has less node {pass_2_num_nodes}, {new_num_nodes}\n {new_graph} \n {pass_2_graph}")
# check correctness
if check_val:
true_result = fx_g(t)
our_result = new_g(t)
if true_result is None: # both return None
assert our_result is None, f"true result is None, CSE result is {our_result}"
else: # results returned are the same
assert torch.all(true_result == our_result), (
f"results are different {true_result}, {our_result}") # check results are the same
class NoChangeTestCase(TestCase):
def test_nochange(self):
def f(x):
a = x + 1
b = x + a
a = x
d = x + a
return b + d
t = torch.randn(2, 2)
check(f, t, 0)
def test_empty(self):
def f(x):
pass
t = torch.randn(2, 2)
check(f, t, 0)
def test_rand_like(self):
def f(x):
a = torch.rand_like(x)
b = torch.rand_like(x)
return a + b
t = torch.randn(2, 2)
check(f, t, 0, check_val=False)
def test_rand_n(self):
def f(x):
a = torch.randn(4)
b = torch.randn(4)
return a + b
t = torch.randn(2, 2)
check(f, t, 0, check_val=False)
class ReduceTestCase(TestCase):
def test_immutable_list_type(self):
def f(x):
a = x.sum(dim=1)
b = x.sum(dim=1)
c = x.sum()
d = x.sum()
return a + b + c + d
t = torch.randn(2, 2)
check(f, t, 2)
def test_immutable_list_multiple_entries(self):
def f(x):
a = x.sum(dim=[0, 1])
b = x.sum(dim=[0, 1])
c = x.sum(dim=1)
d = x.sum(dim=1)
return a + b + c + d
t = torch.randn(2, 2)
check(f, t, 2)
def test_simple(self):
def f(x):
a = x.cos()
b = x.cos()
c = a + a
d = b + b
return c + d
t = torch.randn(2, 2)
check(f, t, 2)
def test_simple_2(self):
def f(x):
a = x.cos().sin()
b = x.cos().sin()
c = a + a
d = b + b
return c + d
t = torch.randn(1)
check(f, t, 3)
def test_two_args_default(self):
def f(x):
a = x.sum(dim=1)
b = x.sum(dim=1, keepdim=False)
c = x.sum(dim=1, keepdim=False)
d = x.sum(dim=1)
return a + b + c + d
t = torch.randn(2, 2)
check(f, t, 3)
def test_two_args(self):
def f(x):
a = x.sum(dim=1)
b = x.sum(dim=1, keepdim=True)
c = x.sum(dim=1, keepdim=True)
d = x.sum(dim=1)
return a + b + c + d
t = torch.randn(2, 2)
check(f, t, 2)
def test_simple_multiple_same_ops(self):
def f(x):
a = x.sum()
b = x.sum()
c = x.sum()
d = x.sum()
return a + b + c + d
t = torch.randn(2, 2)
check(f, t, 3)
def test_nested_immutable_list_type(self):
def f(x):
a = torch.cat((x, x))
b = torch.cat((x, x))
return a + b
t = torch.randn(2, 2)
check(f, t, 1)
def test_kwarg(self):
def f(x):
a = torch.ones_like(x)
b = torch.ones_like(x)
return a + b
t = torch.randn(2, 2)
check(f, t, 1)
class RandomOpTestCase(TestCase):
def test_random(self):
def f(x):
vals = [x]
ops = [torch.clone, torch.cos, torch.tanh, torch.nn.functional.gelu]
for _ in range(100):
new_val = random.choice(ops)(random.choice(vals))
vals.append(new_val)
return vals[-1]
fx_g = fx.symbolic_trace(f)
fx_g.graph.eliminate_dead_code()
fx_g.recompile()
t = torch.randn(2, 2)
for _ in range(30):
check(fx_g, t, -1, graph_input=True)
if __name__ == "__main__":
run_tests()
| pytorch-master | functorch/test/test_memory_efficient_fusion.py |
# Owner(s): ["module: functorch"]
import torch
import functorch
from torch.testing._internal.common_utils import run_tests, TestCase, IS_WINDOWS
import unittest
from functorch.compile import aot_function, nop
class TestCompileCache(TestCase):
def check(self, a, b, aot_fn, fn):
a_clone = a.clone().detach().requires_grad_(True)
b_clone = b.clone().detach().requires_grad_(True)
ref = fn(a, b)
ref.sum().backward()
res = aot_fn(a_clone, b_clone)
res.sum().backward()
assert torch.allclose(res, ref)
assert torch.allclose(a.grad, a_clone.grad)
assert torch.allclose(b.grad, b_clone.grad)
def test_recompilation_on_broadcast(self):
def fn(x, bias):
return x + bias
for hasher_type in ["DynamicShapeHasher", "StaticShapeHasher"]:
functorch.compile.clear_compile_cache()
start_num_recomps = functorch.compile.num_of_recompilations()
aot_autograd_fn = aot_function(fn, nop, nop, hasher_type=hasher_type)
a = torch.randn(10, 20, requires_grad=True)
b = torch.randn(20, requires_grad=True)
self.check(a, b, aot_autograd_fn, fn)
a = torch.randn(10, 20, requires_grad=True)
b = torch.randn(10, 20, requires_grad=True)
self.check(a, b, aot_autograd_fn, fn)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
assert total_recomps == 2
def test_compilation_for_dynamic_shape(self):
def fn(x, bias):
return x + bias
for hasher_type in ["DynamicShapeHasher", "StaticShapeHasher"]:
functorch.compile.clear_compile_cache()
start_num_recomps = functorch.compile.num_of_recompilations()
aot_autograd_fn = aot_function(fn, nop, nop, hasher_type=hasher_type)
for s in range(10, 20):
a = torch.randn(s, requires_grad=True)
b = torch.randn(s, requires_grad=True)
self.check(a, b, aot_autograd_fn, fn)
for s in range(10, 20):
a = torch.randn(s, requires_grad=True)
b = torch.randn(s, requires_grad=True)
self.check(a, b, aot_autograd_fn, fn)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
if hasher_type == "DynamicShapeHasher":
assert total_recomps == 1
elif hasher_type == "StaticShapeHasher":
assert total_recomps == 10
for s in range(10, 20):
a = torch.randn(s, s, requires_grad=True)
b = torch.randn(s, s, requires_grad=True)
self.check(a, b, aot_autograd_fn, fn)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
if hasher_type == "DynamicShapeHasher":
assert total_recomps == 2
elif hasher_type == "StaticShapeHasher":
assert total_recomps == 20
def test_global_cache_no_recompilations(self):
def f(x, bias):
return x + bias
def g(x, bias):
return aot_function(f, nop, nop, hasher_type="DynamicShapeHasher")(x, bias)
start_num_recomps = functorch.compile.num_of_recompilations()
for _ in range(10):
a = torch.randn(10, 20, requires_grad=True)
b = torch.randn(10, 20, requires_grad=True)
self.check(a, b, g, f)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
assert total_recomps == 1
def test_multiple_functions(self):
def f(x, bias):
return x + bias
def g(x, y):
return x * y
for hasher_type in ["DynamicShapeHasher", "StaticShapeHasher"]:
functorch.compile.clear_compile_cache()
aot_autograd_f = aot_function(f, nop, nop, hasher_type=hasher_type)
aot_autograd_g = aot_function(g, nop, nop, hasher_type=hasher_type)
start_num_recomps = functorch.compile.num_of_recompilations()
a = torch.randn(10, requires_grad=True)
b = torch.randn(10, requires_grad=True)
self.check(a, b, aot_autograd_f, f)
a = torch.randn(10, requires_grad=True)
b = torch.randn(10, requires_grad=True)
self.check(a, b, aot_autograd_g, g)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
assert total_recomps == 2
# Force recompilation for function f and check num of recompilations again
a = torch.randn(10, 20, requires_grad=True)
b = torch.randn(10, 20, requires_grad=True)
self.check(a, b, aot_autograd_f, f)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
assert total_recomps == 3
def test_high_number_of_args(self):
def f(*args):
res = args[0]
for arg in args:
res = res * arg
return res
def check(args, aot_autograd_fn, fn):
args_clone = [arg.clone().detach().requires_grad_(True) for arg in args]
ref = fn(*args)
ref.sum().backward()
res = aot_autograd_fn(*args_clone)
res.sum().backward()
assert torch.allclose(res, ref)
for (arg, arg_clone) in zip(args, args_clone):
assert torch.allclose(arg.grad, arg_clone.grad)
for hasher_type in ["DynamicShapeHasher", "StaticShapeHasher"]:
functorch.compile.clear_compile_cache()
aot_autograd_f = aot_function(f, nop, nop, hasher_type=hasher_type)
args = [torch.randn(10, requires_grad=True) for _ in range(100)]
check(args, aot_autograd_f, f)
def test_multiple_compiler(self):
def fn(x, bias):
return x + bias
def nop_duplicate(fx_g, _):
return fx_g
for hasher_type in ["DynamicShapeHasher", "StaticShapeHasher"]:
functorch.compile.clear_compile_cache()
start_num_recomps = functorch.compile.num_of_recompilations()
nop_fn = aot_function(fn, nop, nop, hasher_type=hasher_type)
nop_duplicate_fn = aot_function(
fn, nop_duplicate, nop_duplicate, hasher_type=hasher_type
)
a = torch.randn(10, 20, requires_grad=True)
b = torch.randn(20, requires_grad=True)
nop_fn(a, b)
nop_duplicate_fn(a, b)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
assert total_recomps == 2
@unittest.skipIf(IS_WINDOWS, 'test broken on windows')
class TestCompileCacheStaticArgs(TestCase):
def check(self, a, b, aot_autograd_fn, fn):
a_clone = a.clone().detach().requires_grad_(True)
ref = fn(a, b)
ref.sum().backward()
res = aot_autograd_fn(a_clone, b)
res.sum().backward()
assert torch.allclose(res, ref)
assert torch.allclose(a.grad, a_clone.grad)
def test_failure(self):
# Test that not setting up static_argnums should raise exception
def fn(x, p):
return x * p
aot_autograd_f = aot_function(fn, nop, nop)
a = torch.randn(2, 2, requires_grad=True)
b = 2
try:
# Since b is not marked as static, it should raise exception
aot_autograd_f(a, b)
raise AssertionError()
except RuntimeError:
pass
def test_simple(self):
def fn(x, static_arg):
return x * static_arg
functorch.compile.clear_compile_cache()
start_num_recomps = functorch.compile.num_of_recompilations()
aot_autograd_f = aot_function(fn, nop, nop, static_argnums=1)
a = torch.randn(2, 2, requires_grad=True)
b = 2
self.check(a, b, aot_autograd_f, fn)
# Same type of args, so no recompilation
a = torch.randn(2, 2, requires_grad=True)
b = 2
self.check(a, b, aot_autograd_f, fn)
# Trigger recompilation
a = torch.randn(2, 2, requires_grad=True)
b = 3
self.check(a, b, aot_autograd_f, fn)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
assert total_recomps == 2
def test_static_arg_before_tensor_arg(self):
def fn(static_arg, x):
return static_arg - x
def check(a, b, aot_autograd_fn, fn):
b_clone = b.clone().detach().requires_grad_(True)
ref = fn(a, b)
ref.sum().backward()
res = aot_autograd_fn(a, b_clone)
res.sum().backward()
assert torch.allclose(res, ref)
assert torch.allclose(b.grad, b_clone.grad)
functorch.compile.clear_compile_cache()
start_num_recomps = functorch.compile.num_of_recompilations()
aot_autograd_f = aot_function(fn, nop, nop, static_argnums=0)
a = 2
b = torch.randn(2, 2, requires_grad=True)
check(a, b, aot_autograd_f, fn)
a = 3
b = torch.randn(2, 2, requires_grad=True)
check(a, b, aot_autograd_f, fn)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
assert total_recomps == 2
def test_interleaved_static_args(self):
def fn(static_arg1, x, static_arg2):
return static_arg1 - x - static_arg2
def check(a, b, c, aot_autograd_fn, fn):
b_clone = b.clone().detach().requires_grad_(True)
ref = fn(a, b, c)
ref.sum().backward()
res = aot_autograd_fn(a, b_clone, c)
res.sum().backward()
assert torch.allclose(res, ref)
assert torch.allclose(b.grad, b_clone.grad)
functorch.compile.clear_compile_cache()
start_num_recomps = functorch.compile.num_of_recompilations()
aot_autograd_f = aot_function(fn, nop, nop, static_argnums=(0, 2))
a = 2
b = torch.randn(2, 2, requires_grad=True)
c = 0.1
check(a, b, c, aot_autograd_f, fn)
a = 3
b = torch.randn(2, 2, requires_grad=True)
c = 0.1
check(a, b, c, aot_autograd_f, fn)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
assert total_recomps == 2
def test_dropout(self):
def fn(x, prob):
return torch.nn.functional.dropout(x, p=prob)
functorch.compile.clear_compile_cache()
start_num_recomps = functorch.compile.num_of_recompilations()
aot_autograd_f = aot_function(fn, nop, nop, static_argnums=[1])
a = torch.randn(2, 2, requires_grad=True)
b = 0.3
aot_autograd_f(a, b)
# Setting the prob to 0. This should cause recompilation.
a = torch.randn(2, 2, requires_grad=True)
b = 0
self.check(a, b, aot_autograd_f, fn)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
assert total_recomps == 2
def test_if_condition(self):
def fn(x, state: bool):
if state:
return torch.sin(x)
else:
return torch.cos(x)
functorch.compile.clear_compile_cache()
start_num_recomps = functorch.compile.num_of_recompilations()
aot_autograd_f = aot_function(fn, nop, nop, static_argnums=[1])
a = torch.randn(2, 2, requires_grad=True)
b = True
self.check(a, b, aot_autograd_f, fn)
a = torch.randn(2, 2, requires_grad=True)
b = True
self.check(a, b, aot_autograd_f, fn)
a = torch.randn(2, 2, requires_grad=True)
b = False
self.check(a, b, aot_autograd_f, fn)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
assert total_recomps == 2
def test_custom(self):
class Record:
def __init__(self, name, multiplier):
self.name = name
self.multiplier = multiplier
def __eq__(self, other):
return self.name == other.name and self.multiplier == other.multiplier
def __hash__(self):
return hash((self.name, self.multiplier))
def fn(x, record):
return x * record.multiplier
functorch.compile.clear_compile_cache()
start_num_recomps = functorch.compile.num_of_recompilations()
aot_autograd_f = aot_function(fn, nop, nop, static_argnums=[1])
a = torch.randn(2, 2, requires_grad=True)
b = Record("Foo", 0.5)
self.check(a, b, aot_autograd_f, fn)
a = torch.randn(2, 2, requires_grad=True)
b = Record("Bar", 10.2)
self.check(a, b, aot_autograd_f, fn)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
assert total_recomps == 2
def test_tuple(self):
def fn(a_tuple, static_arg):
return torch.sin(a_tuple[0]) - a_tuple[1] - static_arg
def check(a_tuple, b, aot_autograd_fn, fn):
a0 = a_tuple[0]
a1 = a_tuple[1]
a0_clone = a0.clone().detach().requires_grad_(True)
a1_clone = a1.clone().detach().requires_grad_(True)
ref = fn(a, b)
ref.sum().backward()
res = aot_autograd_fn((a0_clone, a1_clone), b)
res.sum().backward()
assert torch.allclose(res, ref)
assert torch.allclose(a0.grad, a0_clone.grad)
assert torch.allclose(a1.grad, a1_clone.grad)
functorch.compile.clear_compile_cache()
start_num_recomps = functorch.compile.num_of_recompilations()
aot_autograd_f = aot_function(fn, nop, nop, static_argnums=(1,))
a = (
torch.randn(2, 2, requires_grad=True),
torch.randn(2, 2, requires_grad=True),
)
b = 0.1
check(a, b, aot_autograd_f, fn)
a = (
torch.randn(2, 2, requires_grad=True),
torch.randn(2, 2, requires_grad=True),
)
b = 1
check(a, b, aot_autograd_f, fn)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
assert total_recomps == 2
def test_tuple_with_first_arg_as_static(self):
def fn(static_arg, a_tuple):
return torch.sin(a_tuple[0]) - a_tuple[1] - static_arg
def check(a, b_tuple, aot_autograd_fn, fn):
b0 = b_tuple[0]
b1 = b_tuple[1]
b0_clone = b0.clone().detach().requires_grad_(True)
b1_clone = b1.clone().detach().requires_grad_(True)
ref = fn(a, b_tuple)
ref.sum().backward()
res = aot_autograd_fn(a, (b0_clone, b1_clone))
res.sum().backward()
assert torch.allclose(res, ref)
assert torch.allclose(b0.grad, b0_clone.grad)
assert torch.allclose(b1.grad, b1_clone.grad)
functorch.compile.clear_compile_cache()
start_num_recomps = functorch.compile.num_of_recompilations()
aot_autograd_f = aot_function(fn, nop, nop, static_argnums=(0,))
a = 0.1
b = (
torch.randn(2, 2, requires_grad=True),
torch.randn(2, 2, requires_grad=True),
)
check(a, b, aot_autograd_f, fn)
a = 1
b = (
torch.randn(2, 2, requires_grad=True),
torch.randn(2, 2, requires_grad=True),
)
check(a, b, aot_autograd_f, fn)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
assert total_recomps == 2
def test_dict(self):
def fn(a_dict, static_arg):
return torch.sin(a_dict["foo"]) - a_dict["bar"] - static_arg
def check(a_dict, b, aot_autograd_fn, fn):
a0 = a_dict["foo"]
a1 = a_dict["bar"]
a0_clone = a0.clone().detach().requires_grad_(True)
a1_clone = a1.clone().detach().requires_grad_(True)
ref = fn(a_dict, b)
ref.sum().backward()
a_clone = {}
a_clone["foo"] = a0_clone
a_clone["bar"] = a1_clone
res = aot_autograd_fn(a_clone, b)
res.sum().backward()
assert torch.allclose(res, ref)
assert torch.allclose(a0.grad, a0_clone.grad)
assert torch.allclose(a1.grad, a1_clone.grad)
functorch.compile.clear_compile_cache()
start_num_recomps = functorch.compile.num_of_recompilations()
aot_autograd_f = aot_function(fn, nop, nop, static_argnums=(1,))
a = {}
a["foo"] = torch.zeros(2, 2, requires_grad=True)
a["bar"] = torch.ones(2, 2, requires_grad=True)
b = 0
check(a, b, aot_autograd_f, fn)
a = {}
a["foo"] = torch.randn(2, 2, requires_grad=True)
a["bar"] = torch.randn(2, 2, requires_grad=True)
b = 0.2
check(a, b, aot_autograd_f, fn)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
assert total_recomps == 2
def test_dict_with_static_arg_before_dict(self):
def fn(static_arg, a_dict):
return torch.sin(a_dict["foo"]) - a_dict["bar"] - static_arg
def check(a, b_dict, aot_autograd_fn, fn):
ref = fn(a, b_dict)
res = aot_autograd_fn(a, b_dict)
assert torch.allclose(res, ref)
b0 = b_dict["foo"]
b1 = b_dict["bar"]
b0_clone = b0.clone().detach().requires_grad_(True)
b1_clone = b1.clone().detach().requires_grad_(True)
ref.sum().backward()
b_clone = {}
b_clone["foo"] = b0_clone
b_clone["bar"] = b1_clone
res = aot_autograd_fn(a, b_clone)
res.sum().backward()
assert torch.allclose(res, ref)
assert torch.allclose(b0.grad, b0_clone.grad)
assert torch.allclose(b1.grad, b1_clone.grad)
functorch.compile.clear_compile_cache()
start_num_recomps = functorch.compile.num_of_recompilations()
aot_autograd_f = aot_function(fn, nop, nop, static_argnums=(0,))
a = 0.1
b = {}
b["foo"] = torch.randn(2, 2, requires_grad=True)
b["bar"] = torch.randn(2, 2, requires_grad=True)
check(a, b, aot_autograd_f, fn)
a = 0.2
b = {}
b["foo"] = torch.randn(2, 2, requires_grad=True)
b["bar"] = torch.randn(2, 2, requires_grad=True)
check(a, b, aot_autograd_f, fn)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
assert total_recomps == 2
def test_tuple_static_args(self):
def fn(x, tuple_static_arg):
return x * tuple_static_arg[0] * tuple_static_arg[1]
functorch.compile.clear_compile_cache()
start_num_recomps = functorch.compile.num_of_recompilations()
aot_autograd_f = aot_function(fn, nop, nop, static_argnums=1)
a = torch.randn(2, 2, requires_grad=True)
b = (2, 3)
self.check(a, b, aot_autograd_f, fn)
# Same type of args, so no recompilation
a = torch.randn(2, 2, requires_grad=True)
b = (2, 3)
self.check(a, b, aot_autograd_f, fn)
# Trigger recompilation
a = torch.randn(2, 2, requires_grad=True)
b = (3, 4)
self.check(a, b, aot_autograd_f, fn)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
assert total_recomps == 2
def test_arg_none(self):
def check(a, b, c, aot_autograd_fn, fn):
def cloner(x):
if x is not None:
return x.clone().detach().requires_grad_(True)
return None
def check_grad(x, x_clone):
if x is not None:
return torch.allclose(x.grad, x_clone.grad)
return True
ref = fn(a, b, c)
res = aot_autograd_fn(a, b, c)
assert torch.allclose(res, ref)
a_clone = cloner(a)
b_clone = cloner(b)
c_clone = cloner(c)
ref.sum().backward()
res = aot_autograd_fn(a_clone, b_clone, c_clone)
res.sum().backward()
check_grad(a, a_clone)
check_grad(b, b_clone)
check_grad(c, c_clone)
def fn(a, b, c):
if a is None and b is None:
return c
elif a is None and c is None:
return b
elif b is None and c is None:
return a
elif a is None:
return b + c
elif b is None:
return a + c
elif c is None:
return a + b
return a + b + c
functorch.compile.clear_compile_cache()
start_num_recomps = functorch.compile.num_of_recompilations()
aot_autograd_f = aot_function(fn, nop, nop)
t1 = torch.randn(2, 2, requires_grad=True)
check(t1, None, None, aot_autograd_f, fn)
check(None, t1, None, aot_autograd_f, fn)
check(None, None, t1, aot_autograd_f, fn)
t2 = torch.randn(2, 2, requires_grad=True)
check(t1, t2, None, aot_autograd_f, fn)
check(t1, None, t2, aot_autograd_f, fn)
check(None, t1, t2, aot_autograd_f, fn)
t3 = torch.randn(2, 2, requires_grad=True)
check(t1, t2, t3, aot_autograd_f, fn)
# Same type of args, so no recompilation
check(t1, t2, None, aot_autograd_f, fn)
end_num_recomps = functorch.compile.num_of_recompilations()
total_recomps = end_num_recomps - start_num_recomps
assert total_recomps == 7
if __name__ == "__main__":
run_tests()
| pytorch-master | functorch/test/test_compile_cache.py |
# Owner(s): ["module: functorch"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from torch.testing._internal.common_utils import TestCase, run_tests, is_iterable_of_tensors
import torch
from torch import Tensor
import functools
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
from functorch_additional_op_db import additional_op_db
from torch.testing._internal.common_methods_invocations import op_db
from common_utils import (
get_fallback_and_vmap_exhaustive,
get_exhaustive_batched_inputs,
get_exhaustive_batched_inputs_batch_norm_is_training,
xfail,
skip,
skipOps,
tol1,
# tol2,
opsToleranceOverride,
check_vmap_fallback,
is_batch_norm_training,
is_valid_inplace_sample_input,
)
from torch.utils._pytree import tree_flatten, tree_unflatten, tree_map
from functorch import grad, vjp, vmap, jacrev, jacfwd
import torch.autograd.forward_ad as fwAD
from functorch._src.eager_transforms import _as_tuple, jvp
aten = torch.ops.aten
# Version of autograd.grad with some differences:
# - pytree inputs is allowed (but leaves of the pytree have to all
# be tensors)
# - if an input is not used as part of derivatives, we will return a
# zero-filled tensor for the result
def _autograd_grad(
outputs, inputs, grad_outputs=None, retain_graph=False, create_graph=True
):
inputs, inputs_spec = tree_flatten(inputs)
diff_inputs = tuple(inp for inp in inputs if inp.requires_grad)
if grad_outputs is None:
diff_outputs = tuple(out for out in outputs if out.requires_grad)
else:
diff_grad_outputs = [
(out, go) for out, go in zip(outputs, grad_outputs) if out.requires_grad
]
if len(diff_grad_outputs) == 0:
diff_outputs, grad_outputs = (), ()
else:
diff_outputs, grad_outputs = zip(*diff_grad_outputs)
grad_inputs = torch.autograd.grad(
diff_outputs,
diff_inputs,
grad_outputs,
retain_graph=retain_graph,
create_graph=create_graph,
allow_unused=True,
)
result = []
grad_inputs_iter = iter(grad_inputs)
for inp in inputs:
if inp.requires_grad:
grad_input = next(grad_inputs_iter)
if grad_input is None:
result.append(torch.zeros_like(inp))
else:
result.append(grad_input)
else:
result.append(torch.zeros_like(inp))
return tree_unflatten(result, inputs_spec)
def diff_arg(arg, requires_grad=True):
def is_differentiable_arg(arg):
if requires_grad:
return arg.requires_grad
else:
return arg.is_floating_point() or arg.is_complex()
if is_iterable_of_tensors(arg):
if all([is_differentiable_arg(a) for a in arg]):
return True
if all([not is_differentiable_arg(a) for a in arg]):
return False
raise RuntimeError("NYI: The test runner can't handle this")
return isinstance(arg, Tensor) and is_differentiable_arg(arg)
# Given f, returns an f' such that:
# - f' takes only positional arguments
# - All arguments to f' are floating-point Tensors
# - All outputs of f' are floating-point Tensors
def normalize_op_input_output2(f, args, kwargs, output_process_fn_grad=None, requires_grad=True):
flat_args, args_spec = tree_flatten(args)
diff_argnums = tuple(i for i, arg in enumerate(flat_args) if diff_arg(arg, requires_grad=requires_grad))
assert len(diff_argnums) > 0
primals = tuple(flat_args[i] for i in diff_argnums)
@functools.wraps(f)
def wrapped(*primals):
_args = list(flat_args)
for num, arg in zip(diff_argnums, primals):
_args[num] = arg
_args = tree_unflatten(_args, args_spec)
result = f(*_args, **kwargs)
if output_process_fn_grad is not None:
result = output_process_fn_grad(result)
if isinstance(result, tuple):
result = tuple(r for r in result if torch.is_floating_point(r))
assert len(result) > 0
return result
return wrapped, primals
# TODO: consolidate with normalize_op_input_output2
def normalize_op_input_output3(f, args, kwargs, sample_args, output_process_fn_grad=None):
flat_args, args_spec = tree_flatten(args)
flat_sample_args, _ = tree_flatten(sample_args)
diff_argnums = tuple(i for i, (arg, sample) in enumerate(zip(flat_args, flat_sample_args))
if diff_arg(sample, requires_grad=True))
assert len(diff_argnums) > 0
primals = tuple(flat_args[i] for i in diff_argnums)
@functools.wraps(f)
def wrapped(*primals):
_args = list(flat_args)
for num, arg in zip(diff_argnums, primals):
_args[num] = arg
_args = tree_unflatten(_args, args_spec)
result = f(*_args, **kwargs)
if output_process_fn_grad is not None:
result = output_process_fn_grad(result)
if isinstance(result, tuple):
result = tuple(r for r in result if torch.is_floating_point(r))
assert len(result) > 0
return result
return wrapped, primals
def normalize_op_input_output(f, sample, requires_grad=True):
args = tuple([sample.input] + list(sample.args))
return normalize_op_input_output2(
f, args, sample.kwargs, sample.output_process_fn_grad, requires_grad=requires_grad
)
def ref_vjp(f, *primals):
result = f(*primals)
def wrapped(cotangents):
return _autograd_grad(_as_tuple(result), primals, _as_tuple(cotangents))
return result, wrapped
def simulate_jvp(f, primals, tangents):
primals_out, tangents_out = torch.autograd.functional.jvp(f, primals, tangents)
return primals_out, tangents_out
def ref_jvp(f, primals, tangents):
with fwAD.dual_level():
duals = tuple(fwAD.make_dual(p, t) for p, t in zip(primals, tangents))
result_duals = f(*duals)
result_duals, spec = tree_flatten(result_duals)
primals_out, tangents_out = zip(*(fwAD.unpack_dual(d) for d in result_duals))
return tree_unflatten(primals_out, spec), tree_unflatten(tangents_out, spec)
def get_sample_cotangents(f, sample):
fn, primals = normalize_op_input_output(f, sample)
output = fn(*primals)
return tree_map(torch.randn_like, output)
# returns a new function g(*args, *cotangents)
# that computes vjps and (*args, cotangents)
def get_vjp_fn_and_args_with_cotangents(f, sample, cotangents):
args = tuple([sample.input] + list(sample.args))
kwargs = sample.kwargs
flat_args, args_spec = tree_flatten(args)
flat_cotangents, cotangents_spec = tree_flatten(cotangents)
@functools.wraps(f)
def wrapped(*args):
assert len(args) == len(flat_args) + len(flat_cotangents)
actual_args = args[:len(flat_args)]
cotangents = args[len(flat_args):]
actual_args = tree_unflatten(actual_args, args_spec)
cotangents = tree_unflatten(cotangents, cotangents_spec)
fn, primals = normalize_op_input_output3(f, actual_args, kwargs,
flat_args,
sample.output_process_fn_grad)
_, vjp_fn = vjp(fn, *primals)
return vjp_fn(cotangents)
return wrapped, tuple(flat_args + flat_cotangents)
# Returns a new function g(*args, *cotangents) that computes vjps and
# sample (*args, *cotangents)
def get_vjpfull_variant(f, sample):
fn, primals = normalize_op_input_output(f, sample)
result = fn(*primals)
cotangents = _as_tuple(
tree_map(lambda x: torch.randn_like(x, requires_grad=True), result))
num_primals = len(primals)
args = (*primals, *cotangents)
@functools.wraps(f)
def wrapped(*args):
primals = args[:num_primals]
cotangents = args[num_primals:]
result, vjp_fn = vjp(fn, *primals)
if isinstance(result, torch.Tensor):
assert len(cotangents) == 1
cotangents = cotangents[0]
return vjp_fn(cotangents)
return wrapped, args
def get_jvp_variant(f, sample):
# We want this higher-order variant of jvp, so that it can
# be used to wrap vmap
fn, primals = normalize_op_input_output(f, sample, requires_grad=False)
tangents = _as_tuple(
tree_map(lambda x: torch.randn_like(x), primals))
@functools.wraps(f)
def wrapped(*args):
tangents = args
primals_out, tangents_out = jvp(fn, primals, tangents)
if isinstance(primals_out, torch.Tensor):
return (primals_out, tangents_out)
else:
flat_primals_out, _ = tree_flatten(primals_out)
flat_tangents_out, _ = tree_flatten(tangents_out)
return tuple(flat_primals_out + flat_tangents_out)
return wrapped, tangents
def get_jvp_variant_primals_tangents(f, sample):
# We want this higher-order variant of jvp, so that it can
# be used to wrap vmap
fn, primals = normalize_op_input_output(f, sample, requires_grad=False)
tangents = _as_tuple(
tree_map(lambda x: torch.randn_like(x), primals))
@functools.wraps(f)
def wrapped(*args):
primals_in = args[:len(primals)]
tangents_in = args[len(primals):]
primals_out, tangents_out = jvp(fn, primals_in, tangents_in)
if isinstance(primals_out, torch.Tensor):
return (primals_out, tangents_out)
else:
flat_primals_out, _ = tree_flatten(primals_out)
flat_tangents_out, _ = tree_flatten(tangents_out)
return tuple(flat_primals_out + flat_tangents_out)
return wrapped, primals + tangents
def is_inplace(op, variant):
if hasattr(variant, "__wrapped__"):
return variant.__wrapped__ is op.get_inplace()
return variant is op.get_inplace()
vjp_fail = {
xfail('tensor_split'),
xfail('to_sparse'),
xfail('nn.functional.ctc_loss'),
skip('pca_lowrank', ''), # fails on cuda, runs okay on cpu
skip('svd_lowrank', ''), # fails on cuda, runs okay on cpu
}
class TestOperators(TestCase):
@ops(op_db + additional_op_db, allowed_dtypes=(torch.float,))
@skipOps('TestOperators', 'test_grad', vjp_fail.union({
xfail('linalg.eig'), # diagonal_scatter does not support complex
xfail('chalf', '', device_type='cpu'),
skip('as_strided_scatter', ''), # seems flaky
xfail('sparse.sampled_addmm', ''),
}))
@opsToleranceOverride('TestOperators', 'test_grad', (
tol1('nn.functional.binary_cross_entropy_with_logits',
{torch.float32: tol(atol=1e-04, rtol=1e-04)}),
))
def test_grad(self, device, dtype, op):
if op.name in vjp_fail:
self.skipTest("Skipped; Expected failures")
return
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
if is_inplace(op, op.get_op()):
self.skipTest("Skipped for redundancy. test_vjp handles in-place testing.")
return
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
diff_argnums = tuple(i for i, arg in enumerate(args) if diff_arg(arg))
assert len(diff_argnums) > 0
diff_args = tuple(args[i] for i in diff_argnums)
def wrapped_fn(*args, **kwargs):
result = op(*args, **kwargs)
if sample.output_process_fn_grad is not None:
result = sample.output_process_fn_grad(result)
# Reduce into single value for grad
if isinstance(result, torch.Tensor):
return result.sum()
result = sum([res.sum() for res in result])
return result
result = grad(wrapped_fn, diff_argnums)(*args, **kwargs)
expected = _autograd_grad(_as_tuple(wrapped_fn(*args, **kwargs)), diff_args)
self.assertEqual(result, expected)
@ops(op_db + additional_op_db, allowed_dtypes=(torch.float,))
@skipOps('TestOperators', 'test_jvp', set({
skip('nn.functional.max_pool1d'), # fails on cpu, runs okay on cuda
skip('pca_lowrank', ''), # fails on cuda, runs okay on cpu
skip('svd_lowrank', ''), # fails on cuda, runs okay on cpu
# =============================================
# NB: The above failures also fail using PyTorch core's
# forward-mode AD and vmap.
# The failures below are functorch-specific issues
# =============================================
# Composite ops that do bad things. Need to be fixed in PyTorch core.
# RuntimeError: Cannot access data pointer of Tensor that doesn't have storage
xfail('tensor_split'),
# BUG: runs and produces numerical differences
skip('nn.functional.max_unpool1d'), # fails everywhere except on mac
skip('nn.functional.max_unpool2d'), # fails everywhere except on windows
skip('nn.functional.max_unpool3d'), # fails everywhere except on mac
xfail('nn.functional.rrelu') # in-place test fails
}))
@opsToleranceOverride('TestOperators', 'test_jvp', (
tol1('nn.functional.conv_transpose3d',
{torch.float32: tol(atol=1e-04, rtol=1.3e-06)}, device_type='cuda'),
tol1('nn.functional.binary_cross_entropy_with_logits',
{torch.float32: tol(atol=4e-04, rtol=4e-04)}),
))
def test_jvp(self, device, dtype, op):
# TODO: get rid of vjp_decomp when we add decomposition support to
# PyTorch's forward-mode ad. Currently the decomposition support only
# works for functorch.jvp
VJP_DECOMP = {
'nn.functional.logsigmoid',
}
if op.name in VJP_DECOMP:
fixme_ref_jvp_local = simulate_jvp
else:
fixme_ref_jvp_local = ref_jvp
if not op.supports_forward_ad and op.name not in VJP_DECOMP:
self.skipTest("Skipped! Forward AD not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
outplace_variant = op if not is_inplace(op, op.get_op()) else None
inplace_variant = op.inplace_variant if op.supports_inplace_autograd else None
for sample in samples:
args = (sample.input,) + sample.args
kwargs = sample.kwargs
if outplace_variant:
self.jvp_opinfo_test(outplace_variant, args, kwargs,
sample.output_process_fn_grad,
clone_inputs=False,
fixme_ref_jvp_local=fixme_ref_jvp_local)
if is_valid_inplace_sample_input(sample, op, inplace_variant):
self.jvp_opinfo_test(inplace_variant, args, kwargs,
sample.output_process_fn_grad,
clone_inputs=True,
fixme_ref_jvp_local=fixme_ref_jvp_local)
def jvp_opinfo_test(self, fn, args, kwargs, output_process_fn,
clone_inputs, fixme_ref_jvp_local):
# NB: we used requires_grad=True to determine where the primals are,
# but don't need that information otherwise
fn, primals = normalize_op_input_output2(
fn, args, kwargs, output_process_fn, requires_grad=True)
orig_primals = tree_map(lambda x: x.detach(), primals)
orig_tangents = tree_map(lambda x: torch.randn_like(x), primals)
def maybe_clone_inputs():
if clone_inputs:
primals = tree_map(torch.clone, orig_primals)
tangents = tree_map(torch.clone, orig_tangents)
return primals, tangents
return orig_primals, orig_tangents
primals, tangents = maybe_clone_inputs()
expected_primal_outs, expected_tangent_outs = \
fixme_ref_jvp_local(fn, primals, tangents)
primals, tangents = maybe_clone_inputs()
primal_outs, tangent_outs = jvp(fn, primals, tangents)
self.assertEqual(primal_outs, expected_primal_outs)
self.assertEqual(tangent_outs, expected_tangent_outs)
@ops(op_db + additional_op_db, allowed_dtypes=(torch.float,))
@skipOps('TestOperators', 'test_vjp', vjp_fail.union({
xfail('pca_lowrank', ''),
xfail('svd_lowrank', ''),
xfail('as_strided_scatter', ''),
xfail('sparse.sampled_addmm', ''),
}))
@opsToleranceOverride('TestOperators', 'test_vjp', (
tol1('nn.functional.conv_transpose3d',
{torch.float32: tol(atol=5e-05, rtol=9e-05)}, device_type='cuda'),
tol1('nn.functional.binary_cross_entropy_with_logits',
{torch.float32: tol(atol=1e-04, rtol=1e-04)}),
))
def test_vjp(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
def _test(_op, inplace=False):
for sample in samples:
if inplace and not is_valid_inplace_sample_input(sample, op, op.inplace_variant):
continue
fn, primals = normalize_op_input_output(_op, sample)
result = fn(*primals)
cotangents = tree_map(lambda x: torch.randn_like(x), result)
out, vjp_fn = vjp(fn, *primals)
self.assertEqual(out, result)
result_vjps = vjp_fn(cotangents)
_, vjp_fn = ref_vjp(fn, *primals)
expected_vjps = vjp_fn(cotangents)
self.assertEqual(result_vjps, expected_vjps)
_test(op)
for a_op in op.aliases:
_test(a_op)
if op.inplace_variant:
def f(inp, *args, **kwargs):
return op.inplace_variant(inp.clone(), *args, **kwargs)
_test(f, inplace=True)
@ops(op_db + additional_op_db, allowed_dtypes=(torch.float,))
@skipOps('TestOperators', 'test_vjpvjp', vjp_fail.union({
skip('nn.functional.max_unpool1d'), # Flaky
skip('nn.functional.max_unpool2d'), # Flaky
xfail('native_layer_norm', ''),
xfail('sparse.sampled_addmm', ''),
}))
@opsToleranceOverride('TestOperators', 'test_vjpvjp', (
tol1('nn.functional.conv_transpose3d',
{torch.float32: tol(atol=5e-05, rtol=9e-05)}, device_type='cuda'),
))
def test_vjpvjp(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
if not op.supports_gradgrad:
self.skipTest("Skipped! Operation does not support gradgrad")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
def test(_op, inplace=False):
for sample in samples:
if inplace and not is_valid_inplace_sample_input(sample, op, op.inplace_variant):
continue
fn, args = get_vjpfull_variant(_op, sample)
result = fn(*args)
cotangents = tree_map(lambda x: torch.randn_like(x), result)
# Compute vjp of vjp
_, vjp_fn = vjp(fn, *args)
result_vjps = vjp_fn(cotangents)
# Compute ref_vjp of vjp. We could have done ref_vjp of ref_vjp,
# but since we're confident that vjp works by itself, this is
# an equivalent way to test that.
_, vjp_fn = ref_vjp(fn, *args)
expected_vjps = vjp_fn(cotangents)
self.assertEqual(result_vjps, expected_vjps)
test(op)
if op.inplace_variant:
def fn(inp, *args, **kwargs):
return op.inplace_variant(inp.clone(), *args, **kwargs)
test(fn, inplace=True)
@ops(op_db + additional_op_db, allowed_dtypes=(torch.float,))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
def test_vmapvjpvjp(self, device, dtype, op):
self.skipTest("Skipped; these tests take too long")
op_skip = set({
})
op_skip = op_skip.union(vjp_fail)
if op.name in op_skip:
self.skipTest("Skipped; Expected failures")
return
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
if not op.supports_gradgrad:
self.skipTest("Skipped! Operation does not support gradgrad")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
for sample in samples:
fn, args = get_vjpfull_variant(op, sample)
result = fn(*args)
cotangents = tree_map(lambda x: torch.randn_like(x), result)
cotangents, _ = tree_flatten(cotangents)
num_args = len(args)
args_and_cotangents = tuple(args) + tuple(cotangents)
def vjp_of_vjp(*args_and_cotangents):
args = args_and_cotangents[:num_args]
cotangents = args_and_cotangents[num_args:]
result, vjp_fn = vjp(fn, *args)
result_vjps = vjp_fn(cotangents)
result, _ = tree_flatten(result)
result_vjps, _ = tree_flatten(result_vjps)
return (*result, *result_vjps)
is_batch_norm_and_training = is_batch_norm_training(op.name, sample.kwargs)
generator = get_fallback_and_vmap_exhaustive(
vjp_of_vjp, args_and_cotangents, {}, is_batch_norm_and_training=is_batch_norm_and_training)
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out)
vmapvjp_fail = vjp_fail.union({
# The following are not bugs and are expected behavior
xfail('masked_select'), # Not possible due to dynamic shapes
skip('bernoulli'), # randomness
skip('normal', ''), # randomness
skip('normal', 'number_mean'), # randomness
skip('nn.functional.rrelu'), # randomness
skip('nn.functional.feature_alpha_dropout', 'with_train'), # randomness
skip('nn.functional.feature_alpha_dropout', 'without_train'), # randomness
skip('nn.functional.dropout'), # randomness
skip('nn.functional.dropout2d'), # randomness
xfail('as_strided'), # as_strided is too wild for us to support, wontfix
xfail('index_put', ''), # not possible due to dynamic shapes; we support a subset
xfail('masked_scatter'), # dynamic
xfail('nn.functional.fractional_max_pool2d'), # random
xfail('nn.functional.fractional_max_pool3d'), # random
xfail('take'), # dynamic
# All of the following are bugs and need to be fixed
skip('linalg.svdvals'), # # really annoying thing where it passes correctness check but not has_batch_rule
xfail('__getitem__', ''), # dynamic error
xfail('_masked.prod'), # calls aten::item
xfail('eig'), # calls aten::item
xfail('linalg.eig'), # Uses aten::allclose
xfail('linalg.householder_product'), # needs select_scatter
xfail('nanquantile', device_type='cpu'), # checks q via a .item() call
xfail('nn.functional.gaussian_nll_loss'), # checks var for if any value < 0
xfail('prod'), # calls nonzero
xfail('quantile', device_type='cpu'), # checks q via a .item() call
xfail('stft'),
xfail('view_as_complex'),
# required rank 4 tensor to use channels_last format
xfail('bfloat16'),
xfail('double'),
xfail('float'),
xfail('half'),
xfail('scatter_reduce', 'prod'), # item call
# NYI: querying is_contiguous inside of vmap for memory_format other than torch.contiguous_format
xfail('nn.functional.max_unpool2d'),
xfail('nn.functional.max_unpool2d', 'grad'),
xfail('chalf', ''),
xfail('sparse.sampled_addmm', ''),
xfail('as_strided_scatter', ''),
xfail('index_reduce', ''),
xfail('nn.functional.dropout3d', ''),
})
@ops(op_db + additional_op_db, allowed_dtypes=(torch.float,))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@opsToleranceOverride('TestOperators', 'test_vmapvjp', (
tol1('linalg.svd',
{torch.float32: tol(atol=1.5e-04, rtol=1e-04)}, device_type="cuda"),
tol1('svd',
{torch.float32: tol(atol=1.5e-04, rtol=1e-04)}, device_type="cuda"),
))
@skipOps('TestOperators', 'test_vmapvjp', vmapvjp_fail)
def test_vmapvjp(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
for sample in samples:
cotangents = get_sample_cotangents(op, sample)
fn, args = get_vjp_fn_and_args_with_cotangents(op, sample, cotangents)
is_batch_norm_and_training = is_batch_norm_training(op.name, sample.kwargs)
generator = get_fallback_and_vmap_exhaustive(
fn, args, {}, is_batch_norm_and_training=is_batch_norm_and_training)
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out)
vmapjvpall_fail = {
# The following are expected (not a bug)
skip('bernoulli', ''), # randomness
skip('nn.functional.dropout'), # randomness
skip('nn.functional.rrelu'), # randomness
skip('nn.functional.dropout2d', ''),
skip('nn.functional.dropout3d', ''),
skip('nn.functional.feature_alpha_dropout', 'without_train'),
skip('nn.functional.feature_alpha_dropout', 'with_train'),
xfail('nn.functional.fractional_max_pool2d'), # Cannot access data pointer of Tensor that doesn't have storage
xfail('nn.functional.fractional_max_pool3d'), # Cannot access data pointer of Tensor that doesn't have storage
# The following are bugs that we should fix
skip('nn.functional.max_pool1d'), # fails on cpu, runs on cuda
xfail('_masked.mean'),
xfail('_masked.prod'),
# Not actually a problem: embedding with max_norm mutates the weight
# and causes different runs to produce different results.
# skip because this is flaky depending on what the max_norm is!
skip('nn.functional.embedding', ''),
xfail('nn.functional.soft_margin_loss', ''),
xfail('linalg.householder_product'),
xfail('tensor_split'),
xfail('quantile'),
xfail('as_strided'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('scatter'),
xfail('nanquantile'),
xfail('view_as_complex'),
xfail('prod'),
skip('pca_lowrank', ''),
skip('svd_lowrank', ''),
xfail('stft'), # transpose_ fallback
xfail('double'), # required rank 4 tensor to use channels_last format
skip('nn.functional.max_unpool1d'), # Flaky, seems to sometimes his max_unpool2d
skip('nn.functional.max_unpool2d'), # fails everywhere except on mac
skip('nn.functional.max_unpool3d'), # fails everywhere except on mac
xfail('nn.functional.prelu'), # Call Tensor.as_strided
# erroring because running_mean and running_var aren't differentiable
xfail('nn.functional.batch_norm'),
xfail('nn.functional.batch_norm', 'without_cudnn'),
}
@ops(op_db + additional_op_db, allowed_dtypes=(torch.float,))
@opsToleranceOverride('TestOperators', 'test_vmapjvpall', (
tol1('nn.functional.conv_transpose3d',
{torch.float32: tol(atol=2e-04, rtol=9e-3)}, device_type='cuda'),
))
@skipOps('TestOperators', 'test_vmapjvpall', vmapjvpall_fail)
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
# This is technically a superset of test_vmapjvp. We should either delete test_vmapjvp
# or figure out if we can split vmapjvpall. It's useful to keep test_vmapjvp intact
# because that coresponds to "batched forward-mode AD" testing in PyTorch core
def test_vmapjvpall(self, device, dtype, op):
if is_inplace(op, op.get_op()):
# TODO: test in-place
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=False)
if not op.supports_forward_ad:
self.skipTest("Skipped! Forward AD not supported.")
return
for sample in samples:
arg_values = [sample.input] + list(sample.args)
kwarg_values = sample.kwargs
args = tuple(arg_values) + tuple(kwarg_values)
fn, args = get_jvp_variant_primals_tangents(op, sample)
is_batch_norm_and_training = is_batch_norm_training(op.name, kwarg_values)
generator = get_fallback_and_vmap_exhaustive(
fn, args, {}, is_batch_norm_and_training=is_batch_norm_and_training)
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out)
@ops(op_db + additional_op_db, allowed_dtypes=(torch.float,))
@skipOps('TestOperators', 'test_vmapjvpall_has_batch_rule', vmapjvpall_fail.union({
xfail('nn.functional.huber_loss'),
xfail('lu'),
skip('linalg.det', 'singular'), # https://github.com/pytorch/functorch/issues/961
xfail('cumprod'),
xfail('lu_solve'),
xfail('linalg.det'),
xfail('linalg.lstsq', 'grad_oriented'),
xfail('cross'),
xfail('linalg.pinv'),
xfail('masked_fill'),
xfail('copysign'),
xfail('linalg.solve'),
xfail('linalg.eig'),
xfail('complex'),
xfail('linalg.pinv', 'hermitian'),
xfail('pinverse'),
skip('_masked.mean'), # ???
xfail('masked_scatter'),
xfail('index_fill'),
xfail('put'),
xfail('take'),
xfail('linalg.eigvals'),
xfail('linalg.tensorsolve'),
xfail('nn.functional.max_pool3d'),
xfail('vdot'),
xfail('linalg.cross'),
xfail('nanmean'),
xfail('nansum'),
xfail('nn.functional.feature_alpha_dropout', 'without_train'),
xfail('linalg.lu_factor', ''),
xfail('nn.functional.dropout2d', ''),
xfail('pca_lowrank', ''),
xfail('svd_lowrank', ''),
xfail('linalg.lu_factor_ex', ''),
xfail('nn.functional.feature_alpha_dropout', 'with_train'),
xfail('special.log_ndtr', ''),
xfail('fft.ihfft2'), # conj_physical fallback
xfail('fft.ihfftn'), # conj_physical fallback
xfail('istft'), # col2im fallback
xfail('polar'), # complex fallback
xfail('nn.functional.max_unpool3d', 'grad'),
xfail('nn.functional.smooth_l1_loss', ''),
xfail('nn.functional.max_unpool2d', 'grad'),
xfail('nn.functional.soft_margin_loss', ''),
xfail('nn.functional.max_unpool1d', 'grad'),
xfail('nn.functional.embedding', ''),
xfail('lu_unpack'),
xfail('nn.functional.glu'),
xfail('nn.functional.bilinear'), # trilinear doesn't have batching rule
xfail('logdet'), # _linalg_slogdet doesn't have batching rule
xfail('linalg.slogdet'), # _linalg_slogdet doesn't have batching rule
xfail('linalg.lu', ''),
xfail('linalg.lu_solve', ''),
xfail('linalg.solve_ex', ''),
xfail('nn.functional.dropout3d', ''),
xfail('as_strided_scatter', ''),
xfail('_masked.cumprod', ''),
xfail('linalg.vecdot', ''),
}))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
def test_vmapjvpall_has_batch_rule(self, device, dtype, op):
if is_inplace(op, op.get_op()):
# TODO: test in-place
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=False)
if not op.supports_forward_ad:
self.skipTest("Skipped! Forward AD not supported.")
return
def test():
for sample in samples:
arg_values = [sample.input] + list(sample.args)
kwarg_values = sample.kwargs
args = tuple(arg_values) + tuple(kwarg_values)
fn, args = get_jvp_variant_primals_tangents(op, sample)
is_batch_norm_and_training = is_batch_norm_training(op.name, kwarg_values)
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(
fn, args, {}, is_batch_norm_and_training=is_batch_norm_and_training, compute_loop_out=False):
pass
check_vmap_fallback(self, test, op, dry_run=False)
@ops(op_db + additional_op_db, allowed_dtypes=(torch.float,))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@skipOps('TestOperators', 'test_vmapvjp_has_batch_rule', vmapvjp_fail.union({
xfail('view_as_complex'),
xfail('complex'),
xfail('copysign'),
xfail('cummax'),
xfail('cummin'),
xfail('cumprod'),
xfail('eig'),
xfail('nansum'),
xfail('nanmean'),
xfail('special.log_ndtr'),
xfail('index_copy'),
xfail('index_fill'),
xfail('linalg.det'),
xfail('linalg.eig'),
xfail('linalg.eigvals'),
xfail('linalg.householder_product'),
xfail('linalg.lstsq', ''),
xfail('linalg.lstsq', 'grad_oriented'),
xfail('linalg.pinv'),
xfail('linalg.pinv', 'hermitian'),
xfail('linalg.slogdet'),
xfail('linalg.solve'),
xfail('logdet'),
xfail('lu'),
xfail('lu_solve'),
xfail('lu_unpack'),
xfail('masked_fill'),
xfail('masked_scatter'),
xfail('masked_select'),
xfail('nanquantile'),
xfail('pinverse'),
xfail('prod'),
xfail('put'),
skip('linalg.det'), # https://github.com/pytorch/functorch/issues/961
xfail('quantile'),
xfail('renorm'),
xfail('take'),
xfail('tensor_split'),
xfail('to_sparse'),
xfail('unfold'),
xfail('vdot'),
xfail('nn.functional.dropout'),
xfail('_masked.prod'),
xfail('fft.ihfft2'),
xfail('fft.ihfftn'),
xfail('cross'),
xfail('linalg.cross'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('nn.functional.huber_loss'),
xfail('nn.functional.bilinear'),
xfail('nn.functional.fractional_max_pool3d'),
xfail('as_strided'),
xfail('stft'),
xfail('nn.functional.rrelu'),
xfail('nn.functional.embedding_bag'),
xfail('nn.functional.max_pool3d'),
xfail('istft'),
xfail('nn.functional.fractional_max_pool2d'),
xfail('linalg.tensorsolve'),
xfail('linalg.lu_factor', ''),
xfail('nn.functional.feature_alpha_dropout', 'with_train'),
xfail('pca_lowrank', ''),
xfail('nn.functional.dropout2d', ''),
xfail('nn.functional.feature_alpha_dropout', 'without_train'),
xfail('svd_lowrank', ''),
xfail('linalg.lu_factor_ex', ''),
xfail('nn.functional.max_unpool2d', ''),
xfail('nn.functional.multi_margin_loss', ''),
xfail('nn.functional.multilabel_margin_loss', ''),
xfail('nn.functional.pdist', ''),
xfail('nn.functional.smooth_l1_loss', ''),
xfail('scatter_reduce', 'prod'),
xfail('scatter_reduce', 'amax'),
xfail('nn.functional.max_unpool1d', ''),
xfail('nn.functional.max_unpool3d', ''),
xfail('scatter_reduce', 'sum'),
xfail('scatter_reduce', 'mean'),
xfail('nn.functional.max_unpool3d', 'grad'),
xfail('nn.functional.soft_margin_loss', ''),
xfail('scatter_reduce', 'amin'),
xfail('nn.functional.max_unpool1d', 'grad'),
xfail('nn.functional.max_unpool2d', 'grad'),
xfail('linalg.lu', ''),
xfail('linalg.lu_solve', ''),
xfail('chalf', ''),
xfail('index_reduce', ''),
xfail('linalg.vander', ''),
xfail('linalg.solve_ex', ''),
xfail('nn.functional.dropout3d', ''),
xfail('as_strided_scatter', ''),
xfail('segment_reduce', 'offsets'),
xfail('_masked.cumprod', ''),
xfail('linalg.vecdot', ''),
xfail('segment_reduce', 'lengths'),
xfail('sparse.sampled_addmm', ''),
}))
def test_vmapvjp_has_batch_rule(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
def test():
for sample in samples:
cotangents = get_sample_cotangents(op, sample)
fn, args = get_vjp_fn_and_args_with_cotangents(op, sample, cotangents)
is_batch_norm_and_training = is_batch_norm_training(op.name, sample.kwargs)
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(
fn, args, {}, is_batch_norm_and_training=is_batch_norm_and_training, compute_loop_out=False):
pass
for a_op in op.aliases:
fn, args = get_vjp_fn_and_args_with_cotangents(a_op, sample, cotangents)
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(
fn, args, {}, is_batch_norm_and_training=is_batch_norm_and_training, compute_loop_out=False):
pass
check_vmap_fallback(self, test, op, dry_run=False)
@ops(op_db + additional_op_db, allowed_dtypes=(torch.float,))
@skipOps('TestOperators', 'test_vjpvmap', vjp_fail.union({
skip('bernoulli', ''), # vjpvmap testing can't handle randomness
skip('normal', ''), # vjpvmap testing can't handle randomness
skip('normal', 'number_mean'), # vjpvmap testing can't handle randomness
skip('nn.functional.rrelu'), # randomness
skip('nn.functional.feature_alpha_dropout', 'with_train'), # randomness
skip('nn.functional.feature_alpha_dropout', 'without_train'), # randomness
# fallback path doesn't work
# All of the following are bugs and need to be fixed
xfail('__getitem__', ''),
xfail('index_put', ''),
xfail('view_as_complex'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('masked_select'),
skip('nn.functional.fractional_max_pool3d'), # generator works on cpu, fails on cuda
xfail('__rpow__'), # https://github.com/pytorch/functorch/issues/617
xfail('as_strided'),
skip('nn.functional.fractional_max_pool2d'), # generator works on cpu, fails on cuda
xfail('column_stack', ''),
xfail('nn.functional.dropout2d', ''),
xfail('svd_lowrank', ''),
xfail('pca_lowrank', ''),
xfail('clamp'),
# something weird happening with channels_last
xfail('bfloat16'),
xfail('double'),
xfail('float'),
xfail('half'),
xfail('nn.functional.dropout3d', ''),
xfail('as_strided_scatter', ''),
xfail('sparse.sampled_addmm', ''),
}))
def test_vjpvmap(self, device, dtype, op):
# NB: there is no vjpvmap_has_batch_rule test because that is almost
# certainly redundant with the vmap_has_batch_rule test in test_vmap.py
# one-off skip
if op.name == 'nn.functional.dropout':
self.skipTest("Skipped!")
if not op.supports_autograd:
# If the op doesn't support autograd, vmap(op) won't either
self.skipTest("Skipped! Autograd not supported.")
return
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
batch_norm_fns = ("nn.functional.batch_norm", "nn.functional.instance_norm") # instance norm calls batch norm
is_batch_norm = op.name in batch_norm_fns
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
if is_batch_norm and is_batch_norm_training(op.name, kwargs):
generator = get_exhaustive_batched_inputs_batch_norm_is_training(args, kwargs)
else:
generator = get_exhaustive_batched_inputs(args, kwargs)
for batched_args, in_dims, kwargs in generator:
vmapped_op = vmap(op, in_dims)
fn, primals = normalize_op_input_output2(vmapped_op, batched_args, kwargs,
sample.output_process_fn_grad)
result = fn(*primals)
cotangents = tree_map(lambda x: torch.randn_like(x), result)
_, vjp_fn = vjp(fn, *primals)
result_vjps = vjp_fn(cotangents)
_, vjp_fn = ref_vjp(fn, *primals)
expected_vjps = vjp_fn(cotangents)
self.assertEqual(result_vjps, expected_vjps)
def _compare_jacobians_of_vjp(self, fn, cotangents_and_primals, argnums=None, atol_rtol=None):
if argnums is None:
argnums = tuple(range(len(cotangents_and_primals)))
def get_vjp(cotangents, *primals):
_, vjp_fn = vjp(fn, *primals)
return vjp_fn(cotangents)
jacobian_jvp = jacfwd(get_vjp, argnums)(*cotangents_and_primals)
jacobian_vjp = jacrev(get_vjp, argnums)(*cotangents_and_primals)
# For dtype changing operations, the jacobians have different dtype.
jacobian_jvp = tree_map(lambda x: x.to(torch.float), jacobian_jvp)
jacobian_vjp = tree_map(lambda x: x.to(torch.float), jacobian_vjp)
if atol_rtol is not None:
(atol, rtol) = atol_rtol
self.assertEqual(jacobian_jvp, jacobian_vjp, atol=atol, rtol=rtol)
else:
self.assertEqual(jacobian_jvp, jacobian_vjp)
@ops(op_db + additional_op_db, allowed_dtypes=(torch.float,))
@skipOps('TestOperators', 'test_jvpvjp', vjp_fail.union({
# RuntimeError: Trying to set a forward gradient that has a different size than that of the original Tensor,
# this is not supported. Tensor is of size [5, 2, 3] while the given forward gradient is of size [1, 2, 3].
xfail('normal', ''),
xfail('_masked.log_softmax', ''),
xfail('_masked.softmax', ''),
xfail('_masked.softmin', ''),
xfail('cdist', ''),
xfail('cholesky', ''),
xfail('eig', ''),
xfail('logcumsumexp', ''),
xfail('nn.functional.embedding_bag', ''),
xfail('nn.functional.grid_sample', ''),
xfail('nn.functional.hardsigmoid', ''),
xfail('nn.functional.huber_loss', ''),
xfail('nn.functional.instance_norm', ''),
xfail('nn.functional.logsigmoid', ''),
xfail('nn.functional.softmin', ''),
xfail('nn.functional.softmin', 'with_dtype'),
xfail('renorm', ''),
xfail('symeig', ''),
xfail('pca_lowrank', ''),
xfail('svd_lowrank', ''),
xfail('nn.functional.multilabel_margin_loss', ''),
xfail('nn.functional.multilabel_soft_margin_loss', ''),
xfail('scatter_reduce', 'amax'),
xfail('scatter_reduce', 'amin'),
xfail('nn.functional.soft_margin_loss', ''),
xfail('nn.functional.pdist', ''),
xfail('scatter_reduce', 'sum'),
xfail('nn.functional.multi_margin_loss', ''),
xfail('scatter_reduce', 'mean'),
xfail('scatter_reduce', 'prod'),
skip('linalg.householder_product', '', device_type='cuda'), # flaky, I'm not sure why
xfail('native_layer_norm', ''),
xfail('sparse.sampled_addmm', ''),
skip('as_strided_scatter', ''), # seems flaky
xfail('segment_reduce', 'offsets'),
xfail('index_reduce', ''),
xfail('segment_reduce', 'lengths'),
}))
def test_jvpvjp(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
for sample in samples:
fn, primals = normalize_op_input_output(op, sample)
result = fn(*primals)
cotangents = tree_map(lambda x: torch.randn_like(x), result)
primals_tangents = tree_map(lambda x: torch.randn_like(x), primals)
cotangents_tangents = tree_map(lambda x: torch.randn_like(x), cotangents)
if isinstance(primals[0], torch.Tensor) and primals[0].numel() == 0:
# typically the first primal arg is the input. If the input has no elements, we will typically run
# into an issue of "Expected Tensor but got None"
continue
def push_vjp(primals, cotangents):
_, vjp_fn = vjp(fn, *primals)
return vjp_fn(cotangents)
result = jvp(push_vjp, (primals, cotangents), (primals_tangents, cotangents_tangents))
self.assertEqual(len(result), 2)
def tree_map2(fn, first, second):
flat_first, spec_first = tree_flatten(first)
flat_second, spec_second = tree_flatten(second)
assert spec_first == spec_second
flat_result = [fn(f, s) for f, s in zip(flat_first, flat_second)]
return tree_unflatten(flat_result, spec_first)
def reference(primals, cotangents, primals_tangents, cotangents_tangents):
with fwAD.dual_level():
primal_duals = tree_map2(fwAD.make_dual, primals, primals_tangents)
_, vjp_fn = ref_vjp(fn, *primal_duals)
cotangent_duals = tree_map2(fwAD.make_dual, cotangents, cotangents_tangents)
result = vjp_fn(cotangent_duals)
flat_result, spec = tree_flatten(result)
primals_out, tangents_out = zip(*[fwAD.unpack_dual(r) for r in flat_result])
tangents_out = [t if t is not None else torch.zeros_like(p)
for p, t in zip(primals_out, tangents_out)]
expected = (tree_unflatten(primals_out, spec), tree_unflatten(tangents_out, spec))
return expected
# HACK: obviously pytorch should also have the same coverage
# For things that do have the same coverage, we test that jvp x vjp
# are the same between PyTorch and functorch. For things that don't,
# we check that jacfwd(vjp) and jacrev(vjp) are the same. This results
# in slower tests.
FUNCTORCH_HAS_FORMULA_BUT_NOT_PYTORCH = {
'nn.functional.nll_loss',
'softmax',
'log_softmax',
'nn.functional.cross_entropy',
'nn.functional.layer_norm',
'nn.functional.batch_norm',
}
if op.name in FUNCTORCH_HAS_FORMULA_BUT_NOT_PYTORCH:
self.assertFalse(op.supports_fwgrad_bwgrad,
f"{op.name} now supports forward over reverse without a decomposition. " +
"Please remove the decomposition version")
def is_differentiable(t):
return isinstance(t, torch.Tensor) and t.dtype == torch.float32
args = (cotangents, *primals)
if op.name == 'nn.functional.binary_cross_entropy':
argnums = (0, 1) # targets is float32 but isn't differentiable
atol_rtol = 1.5e-4, 1.3e-06
else:
argnums = tuple(i for i in range(len(args)) if is_differentiable(args[i]))
atol_rtol = None
self._compare_jacobians_of_vjp(fn, args, argnums, atol_rtol)
else:
expected = reference(primals, cotangents, primals_tangents, cotangents_tangents)
self.assertEqual(result, expected)
def _make_extremal_inputs(self, shape, device):
if shape is None:
return (None,)
return (
torch.full(shape, -1000., device=device),
torch.zeros(shape, device=device),
torch.full(shape, 1000., device=device),
)
def _arg_and_kwarg_options(self, args_options, kwargs_options):
return itertools.product(*args_options, kwargs_options)
def test_extremal_numerics_nll_loss(self, device):
N, C = 3, 4
d1, d2, d3 = 5, 6, 7
shapes = (
((N, C), (N,), (C,)),
((N, C), (N,), None),
((N, C, d1, d2, d3), (N, d1, d2, d3), (C,)),
((N, C, d1, d2, d3), (N, d1, d2, d3), None),
)
kwargs_options = ({'ignore_index': 0, 'reduction': 'mean'}, {'reduction': 'sum'}, {'reduction': 'none'}, {})
for input_shape, target_shape, weight_shape in shapes:
input_options = self._make_extremal_inputs(input_shape, device)
for input, kwargs in self._arg_and_kwarg_options((input_options,), kwargs_options):
if weight_shape is None:
weight = None
else:
weight = torch.randn(weight_shape, device=device)
target = torch.randint(0, C, target_shape, device=device)
target[0] = 1 # since we're ignoring index 0, at least one element must be non-zero
fn = functools.partial(torch.nn.functional.nll_loss, target=target, weight=weight, **kwargs)
result = fn(input)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(fn, (cotangents, input))
def test_extremal_numerics_l1_loss(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
kwargs_options = ({'reduction': 'sum'}, {'reduction': 'none'}, {})
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
target_options = self._make_extremal_inputs(shape, device)
for input, target, kwargs in self._arg_and_kwarg_options((input_options, target_options), kwargs_options):
result = torch.nn.functional.l1_loss(input, target)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(torch.nn.functional.l1_loss, (cotangents, input, target))
def test_extremal_numerics_mse_loss(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
kwargs_options = ({'reduction': 'sum'}, {'reduction': 'none'}, {})
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
target_options = self._make_extremal_inputs(shape, device)
for input, target, kwargs in self._arg_and_kwarg_options((input_options, target_options), kwargs_options):
result = torch.nn.functional.mse_loss(input, target)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(torch.nn.functional.mse_loss, (cotangents, input, target))
def test_extremal_numerics_softmax(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
kwargs_options = ({'dim': 1}, {})
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
for input, kwargs in self._arg_and_kwarg_options((input_options,), kwargs_options):
result = torch.nn.functional.softmax(input)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(torch.nn.functional.softmax, (cotangents, input))
def test_extremal_numerics_log_softmax(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
kwargs_options = ({'dim': 1}, {})
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
for input, kwargs in self._arg_and_kwarg_options((input_options,), kwargs_options):
result = torch.nn.functional.log_softmax(input)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(torch.nn.functional.log_softmax, (cotangents, input))
def test_extremal_numerics_cross_entropy(self, device):
N, C = 3, 4
d1, d2, d3 = 5, 6, 7
shapes = (
((N, C), (N,), (C,)),
((N, C), (N,), None),
((N, C), (N, C), (C,)),
((N, C), (N, C), None),
((C,), (), (C,)),
((C,), (), None),
((C,), (C,), (C,)),
((C,), (C,), None),
((N, C, d1, d2, d3), (N, d1, d2, d3), (C,)),
((N, C, d1, d2, d3), (N, d1, d2, d3), None),
((N, C, d1, d2, d3), (N, C, d1, d2, d3), (C,)),
((N, C, d1, d2, d3), (N, C, d1, d2, d3), None),
)
for input_shape, target_shape, weight_shape in shapes:
input_options = self._make_extremal_inputs(input_shape, device)
kwargs_options = [{'reduction': 'sum'}, {'reduction': 'none'}, {}]
if input_shape != target_shape:
kwargs_options.append({'ignore_index': 0, 'reduction': 'mean'})
for input, kwargs in self._arg_and_kwarg_options((input_options,), kwargs_options):
if weight_shape is None:
weight = None
else:
weight = torch.randn(weight_shape, device=device)
if input_shape == target_shape:
target = torch.rand(target_shape, device=device)
elif len(target_shape) == 0:
target = torch.tensor(1, device=device) # must be non-zero since ignore_index may be 0
else:
target = torch.randint(0, C, target_shape, device=device)
fn = functools.partial(torch.nn.functional.cross_entropy, target=target, weight=weight, **kwargs)
result = fn(input)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(fn, (cotangents, input), atol_rtol=(1e-4, 1e-5))
def test_extremal_numerics_binary_cross_entropy(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
for shape in shapes:
weight_options = self._make_extremal_inputs(shape, device)
kwargs_options = [{'reduction': 'sum'}, {'reduction': 'none'}, {}]
for weight, kwargs in self._arg_and_kwarg_options((weight_options,), kwargs_options):
input = torch.rand(shape, device=device)
target = torch.rand(shape, device=device)
fn = functools.partial(torch.nn.functional.binary_cross_entropy, target=target, weight=weight, **kwargs)
result = fn(input)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(fn, (cotangents, input), atol_rtol=(1e-4, 2e-5))
def test_extremal_numerics_layer_norm(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
normalized_shape = shape[1:]
weight_options = self._make_extremal_inputs(normalized_shape, device)
bias_options = self._make_extremal_inputs(normalized_shape, device)
for input, bias, weight in self._arg_and_kwarg_options((input_options, bias_options, weight_options), ()):
def fn(input, weight, bias):
return torch.nn.functional.layer_norm(input, normalized_shape, weight=weight, bias=bias)
result = fn(input, weight, bias)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(fn, (cotangents, input, weight, bias))
@ops(op_db + additional_op_db, allowed_dtypes=(torch.float32, torch.double))
@skipOps('TestOperators', 'test_vmap_autograd_grad', {
# call inplace functions
xfail('linalg.householder_product'), # inplace
xfail('linalg.eig'), # all close?
# The size of tensor a (4) must match the size of tensor b (10) at non-singleton dimension 0
xfail('masked_select'),
xfail('nn.functional.max_unpool2d', 'grad'), # contiguous call
xfail('nn.functional.max_unpool2d'), # contiguous call
xfail('to_sparse'), # dispatch key issue
# numerical inconsistencies, look like bugs
skip('matrix_exp', dtypes=(torch.float32,), device_type='cuda'), # fails on linux, passes on windows
skip('ldexp', dtypes=(torch.float32,), device_type='cpu'), # fails on all but mac
skip('__rmatmul__'), # flaky needs investigation
skip('matmul'), # flaky needs investigation
skip('nn.functional.conv_transpose3d'), # flaky needs investigation
skip('nn.functional.conv_transpose2d'), # flaky needs investigation
skip('nn.functional.conv_transpose1d'), # flaky needs investigation
skip('nn.functional.layer_norm', dtypes=(torch.float32,), device_type='cpu'), # fails on windows
skip('linalg.lu_factor', dtypes=(torch.float32,), device_type='cuda'), # fails on all but windows
skip('linalg.lu_factor_ex', dtypes=(torch.float32,), device_type='cuda'), # fails on all but windows
skip('linalg.multi_dot', '', device_type='cpu'),
skip('sparse.sampled_addmm', ''),
skip('native_layer_norm', '', device_type='cpu'),
xfail('as_strided_scatter', ''),
})
def test_vmap_autograd_grad(self, device, dtype, op):
def is_differentiable(inp):
return isinstance(inp, Tensor) and (inp.grad_fn is not None or inp.requires_grad)
def get_flat_differentiable(pytree):
flattened = tree_flatten(pytree)[0]
return tuple(i for i in flattened if is_differentiable(i))
def get_differentiable_linked(list1, list2):
paired_list = zip(list1, list2)
paired_list = tuple((first, second) for (first, second) in paired_list if is_differentiable(first))
return zip(*paired_list)
def filter_none(out):
flattened = tree_flatten(out)[0]
return tuple(o for o in flattened if o is not None)
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
sample_inputs = op.sample_inputs(device, dtype, requires_grad=True)
for sample_input in sample_inputs:
fn, primals = normalize_op_input_output(op, sample_input)
out = fn(*primals)
cotangents = tree_map(torch.randn_like, out)
def compute_grad(cotangents):
out_flattened = out
cotangents_flattened = cotangents
if not isinstance(out_flattened, torch.Tensor):
out_flattened = tree_flatten(out)[0]
cotangents_flattened = tree_flatten(cotangents)[0]
out_flattened, cotangents_flattened = get_differentiable_linked(out_flattened, cotangents_flattened)
return filter_none(
torch.autograd.grad(out_flattened, get_flat_differentiable(primals), cotangents_flattened,
retain_graph=True, allow_unused=True))
is_batch_norm_and_training = is_batch_norm_training(op, sample_input.kwargs)
generator = get_fallback_and_vmap_exhaustive(
compute_grad, (cotangents,), {}, is_batch_norm_and_training=is_batch_norm_and_training)
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out)
only_for = ("cpu", "cuda")
instantiate_device_type_tests(TestOperators, globals(), only_for=only_for)
if __name__ == '__main__':
run_tests()
| pytorch-master | functorch/test/test_ops.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import torch
import functorch
from functorch import vmap
import torch.utils._pytree as pytree
from functorch_additional_op_db import additional_op_db
from torch.testing._internal.common_methods_invocations import DecorateInfo
from torch.testing._internal.common_methods_invocations import op_db
import os
import unittest
from torch.testing._internal.common_device_type import toleranceOverride
IS_FBCODE = os.getenv('FUNCTORCH_TEST_FBCODE') == '1'
def loop(op, in_dims, out_dim, batch_size, *batched_args, **kwarg_values):
outs = []
for idx in range(batch_size):
flat_args, args_spec = pytree.tree_flatten(batched_args)
flat_dims, dims_spec = pytree.tree_flatten(in_dims)
assert(args_spec == dims_spec)
new_args = [a.select(in_dim, idx) if in_dim is not None else a for a, in_dim in zip(flat_args, flat_dims)]
out = op(*pytree.tree_unflatten(new_args, args_spec), **kwarg_values)
outs.append(out)
loop_out = []
if isinstance(outs[0], torch.Tensor):
loop_out = torch.stack(outs)
else:
for idx in range(len(outs[0])):
loop_out.append(torch.stack([i[idx] for i in outs], out_dim))
return loop_out
def is_valid_inplace_sample_input(sample_input, op, inplace_variant):
if inplace_variant is None:
return False
if sample_input.broadcasts_input:
return False
# Check if input's dtype matches the output's dtype
args = (sample_input.input,) + sample_input.args
kwargs = sample_input.kwargs
output_dtype = op(*args, **kwargs).dtype
return sample_input.input.dtype == output_dtype
# This is kind of dangerous, please think carefully before using it.
# Known risks:
# - the return better not be mutated so it's best to return immutable types
# (e.g. prefer tuples to list)
# - Don't hash tensors in a global context, that'll keep them around forever
def memoize(fn):
memo = {}
def wrapped(*args):
if args not in memo:
memo[args] = fn(*args)
return memo[args]
return wrapped
# NB: This is O(2 ** num_tensors).
# num_tensors ranges from 1 to 10, with 2-4 being most common.
# Try not to extravagate it if you're modifying it.
@memoize
def get_bdim_choices(num_tensors):
choices = []
# full of zeros
choices.append((0,) * num_tensors)
# All permutations of (-1, None)
options = (-1, None)
for choice in itertools.product(options, repeat=num_tensors):
choices.append(choice)
assert choices[-1] == (None,) * num_tensors
return tuple(choices[:-1])
# NB: This is O(2 ** num_tensors).
# num_tensors ranges from 1 to 10, with 2-4 being most common.
# Try not to extravagate it if you're modifying it.
def get_bdim_choices_batch_norm(num_tensors, _, running_mean=None, running_var=None, *args):
choices = []
options = (-1, None)
# instance norm turns these into unbatched 0 tensors, so we cannot batch the input if either is not specified
if running_mean is None or running_var is None:
choices.append((None,) + (0,) * (num_tensors - 1))
for choice in itertools.product(options, repeat=num_tensors - 1):
choices.append((None,) + choice)
else:
# running_mean and running_var are specified as tensors. Batch norm doesn't work if the input is batched but
# running_mean/var are unbatched, so this tests all other cases
choices.append((0,) * num_tensors)
for choice in itertools.product(options, repeat=num_tensors):
input_bdim = choice[0]
running_mean_bdim = choice[1]
running_var_bdim = choice[2]
if input_bdim and (not running_mean_bdim or not running_var_bdim):
continue
choices.append(choice)
assert choices[-1] == (None,) * num_tensors
return tuple(choices[:-1])
def add_batch_dim(arg, bdim, batch_size=3):
assert bdim == 0 or bdim == -1
assert isinstance(arg, torch.Tensor)
if bdim == 0:
shape = [1] * len(arg.shape)
shape.insert(bdim, batch_size)
return (arg.repeat(shape), bdim)
if bdim == -1:
arg = arg.unsqueeze(-1).expand(*arg.shape, batch_size).contiguous()
return (arg, bdim)
def construct_in_dims(bdim_choice_for_tensors, is_tensors):
result = []
bdim = iter(bdim_choice_for_tensors)
for is_tensor in is_tensors:
if not is_tensor:
result.append(None)
continue
result.append(next(bdim))
return tuple(result)
def get_exhaustive_batched_inputs(arg_values, kwarg_values, batch_size=2):
flat_args, arg_spec = pytree.tree_flatten(tuple(arg_values))
is_tensors = [isinstance(a, torch.Tensor) for a in flat_args]
bdim_choices = get_bdim_choices(sum(is_tensors))
@memoize
def get_batched_arg(arg, bdim):
assert isinstance(arg, torch.Tensor)
assert bdim is not None
result, _ = add_batch_dim(arg, bdim, batch_size)
return result
for bdim_choice in bdim_choices:
flat_in_dims = construct_in_dims(bdim_choice, is_tensors)
flat_batched_args = tuple(arg if in_dim is None else get_batched_arg(arg, in_dim)
for arg, in_dim in zip(flat_args, flat_in_dims))
batched_args = pytree.tree_unflatten(flat_batched_args, arg_spec)
in_dims = pytree.tree_unflatten(flat_in_dims, arg_spec)
yield batched_args, in_dims, kwarg_values
def is_batch_norm_training(op_name, kwarg_values):
batch_norm_fns = ("nn.functional.batch_norm", "nn.functional.instance_norm") # instance norm calls batch norm
if op_name not in batch_norm_fns:
return False
# batch norm and instance norm require the value to be a plain bool
default_training = op_name == "nn.functional.instance_norm" # instance norm defaults to training, batch norm doesn't
is_training = tuple(arg for arg in tuple(kwarg_values.values()) if isinstance(arg, bool))
if len(is_training) == 0:
return default_training
else:
assert len(is_training) == 1
return is_training[0]
def get_exhaustive_batched_inputs_batch_norm_is_training(arg_values, kwarg_values, batch_size=2):
flat_args, arg_spec = pytree.tree_flatten(tuple(arg_values))
is_tensors = [isinstance(a, torch.Tensor) for a in flat_args]
num_tensors = sum(is_tensors)
if num_tensors == 1: # if there's only an input, can't batch it since running_mean/var will be seen as unbatched tensors
return
bdim_choices = get_bdim_choices_batch_norm(num_tensors, *arg_values)
@memoize
def get_batched_arg(arg, bdim):
assert isinstance(arg, torch.Tensor)
assert bdim is not None
result, _ = add_batch_dim(arg, bdim, batch_size)
return result
for bdim_choice in bdim_choices:
flat_in_dims = construct_in_dims(bdim_choice, is_tensors)
flat_batched_args = tuple(arg if in_dim is None else get_batched_arg(arg, in_dim)
for arg, in_dim in zip(flat_args, flat_in_dims))
batched_args = pytree.tree_unflatten(flat_batched_args, arg_spec)
in_dims = pytree.tree_unflatten(flat_in_dims, arg_spec)
yield batched_args, in_dims, kwarg_values
def generate_vmap_inputs(args, kwargs, is_batch_norm_and_training=False, batch_size=2):
if is_batch_norm_and_training:
return get_exhaustive_batched_inputs_batch_norm_is_training(
args, kwargs, batch_size)
return get_exhaustive_batched_inputs(args, kwargs, batch_size)
def clone_if_tensor(x):
if isinstance(x, torch.Tensor):
return x.clone()
return x
def compute_quantities_for_vmap_test(
op, orig_batched_args, orig_kwarg_values, in_dims,
out_dim=0, batch_size=2, compute_loop_out=True,
clone_inputs=False):
def maybe_clone_inputs():
if clone_inputs:
batched_args = pytree.tree_map(clone_if_tensor, orig_batched_args)
kwarg_values = pytree.tree_map(clone_if_tensor, orig_kwarg_values)
return batched_args, kwarg_values
return orig_batched_args, orig_kwarg_values
batched_args, kwarg_values = maybe_clone_inputs()
if compute_loop_out:
loop_out = loop(op, in_dims, out_dim, batch_size, *batched_args, **kwarg_values)
else:
loop_out = None
# Used for debugging the resulting operations
# from functorch import make_fx
# def f(a):
# return op(a)
# t = make_fx(vmap(f, in_dims=in_dims, out_dims=out_dim))(*batched_args, **kwarg_values)
# print(in_dims, [arg.shape for arg in batched_args], kwarg_values)
batched_args, kwarg_values = maybe_clone_inputs()
batched_out = vmap(op, in_dims=in_dims, out_dims=out_dim)(*batched_args, **kwarg_values)
yield (loop_out, batched_out)
# Tests case where we dispatch to a batching rule with no bdims
# This should be handled by autogenerated plumbing. For vmap support
# added via a manual plumbing you may need to handle this specially.
def add_bdim_if_tensor(x):
if isinstance(x, torch.Tensor):
return x.unsqueeze(1)
return x
def f(dummy, *args, **kwargs):
return op(*args, **kwargs)
dummy = torch.ones(batch_size, 1)
expected = pytree.tree_map(add_bdim_if_tensor, batched_out)
inner_in_dims = (0,) + pytree.tree_map(lambda x: None, in_dims)
outer_in_dims = (0,) + in_dims
batched_args, kwarg_values = maybe_clone_inputs()
output = vmap(vmap(f, inner_in_dims), outer_in_dims)(dummy, *batched_args, **kwarg_values)
yield (expected, output)
def get_fallback_and_vmap_exhaustive(op, arg_values, kwarg_values, is_batch_norm_and_training=False, compute_loop_out=True):
out_dim = 0
batch_size = 2
generator = generate_vmap_inputs(arg_values, kwarg_values, is_batch_norm_and_training)
for batched_args, in_dims, kwarg_values in generator:
for quantities in compute_quantities_for_vmap_test(
op, batched_args, kwarg_values, in_dims, out_dim, batch_size, compute_loop_out):
yield quantities
def opinfo_in_dict(opinfo, d):
return (opinfo.name in d) or (f'{opinfo.name}.{opinfo.variant_test_name}' in d)
def xfail(op_name, variant_name='', *, device_type=None, dtypes=None):
return (op_name, variant_name, device_type, dtypes, True)
# TODO: this doesn't work in python < 3.8
def skip(op_name, variant_name='', *, device_type=None, dtypes=None):
return (op_name, variant_name, device_type, dtypes, False)
def skipOps(test_case_name, base_test_name, to_skip):
all_opinfos = op_db + additional_op_db
for xfail in to_skip:
op_name, variant_name, device_type, dtypes, expected_failure = xfail
matching_opinfos = [o for o in all_opinfos
if o.name == op_name and o.variant_test_name == variant_name]
assert len(matching_opinfos) >= 1, f"Couldn't find OpInfo for {xfail}"
for opinfo in matching_opinfos:
decorators = list(opinfo.decorators)
if expected_failure:
decorator = DecorateInfo(unittest.expectedFailure,
test_case_name, base_test_name,
device_type=device_type, dtypes=dtypes)
decorators.append(decorator)
else:
decorator = DecorateInfo(unittest.skip("Skipped!"),
test_case_name, base_test_name,
device_type=device_type, dtypes=dtypes)
decorators.append(decorator)
opinfo.decorators = tuple(decorators)
# This decorator doesn't modify fn in any way
def wrapped(fn):
return fn
return wrapped
def tol2(op_name, variant_name, override_dct, *, device_type=None):
return (op_name, variant_name, override_dct, device_type)
def tol1(op_name, override_dct, *, device_type=None):
return tol2(op_name, '', override_dct, device_type=device_type)
def opsToleranceOverride(test_case_name, base_test_name, overrides):
all_opinfos = op_db + additional_op_db
for override in overrides:
op_name, variant_name, override, device_type = override
matching_opinfos = [o for o in all_opinfos
if o.name == op_name and o.variant_test_name == variant_name]
assert len(matching_opinfos) == 1, f"Couldn't find OpInfo for {override}"
opinfo = matching_opinfos[0]
decorators = list(opinfo.decorators)
decorators.append(DecorateInfo(
toleranceOverride(override),
test_case_name, base_test_name, device_type=device_type))
opinfo.decorators = tuple(decorators)
# This decorator doesn't modify fn in any way
def wrapped(fn):
return fn
return wrapped
class DisableVmapFallback:
def __enter__(self):
self.prev_state = functorch._C._is_vmap_fallback_enabled()
functorch._C._set_vmap_fallback_enabled(False)
def __exit__(self, *ignored):
functorch._C._set_vmap_fallback_enabled(self.prev_state)
def check_vmap_fallback(test_case, thunk, opinfo, dry_run=False):
try:
with DisableVmapFallback():
thunk()
except Exception:
if not dry_run:
raise
if opinfo.variant_test_name:
print(f"xfail('{opinfo.name}', '{opinfo.variant_test_name}'),")
else:
print(f"xfail('{opinfo.name}'),")
| pytorch-master | functorch/test/common_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from functorch.dim import Tensor, Dim, dims, dimlists, stack, DimensionBindError, DimList
from attn_ft import BertSelfAttention as BertSelfAttentionA, Linear
from attn_positional import BertSelfAttention as BertSelfAttentionB
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_CUDA
from unittest import skip, skipIf
import torch
import gc
from functorch._C import dim as _C
try:
from torchvision.models import resnet18
except ImportError:
resnet18 = None
_test_c, _parse_test, _set_pointwise_optimize = _C._test_c, _C._parse_test, _C._set_pointwise_optimize
from contextlib import contextmanager
from time import perf_counter
measure_perf = False
if measure_perf:
from torchdim.magic_trace import magic_trace
else:
@contextmanager
def magic_trace(*args, **kwargs):
yield
@contextmanager
def measure(what):
b = perf_counter()
yield
e = perf_counter()
print(f"{what}: {e - b:.20f} seconds")
def triu(A):
i, j = dims()
a = A[i, j]
zero = torch.tensor(0, dtype=torch.float) # XXX - torch.where is janky...
return torch.where(i <= j, a, zero).order(i, j)
def gpu_time(lmb, name, r=100):
b = torch.cuda.Event(enable_timing=True)
e = torch.cuda.Event(enable_timing=True)
# with magic_trace(name + ".fxt"):
for _ in range(r):
lmb()
b.record()
for _ in range(r):
lmb()
e.record()
e.synchronize()
elapsed = b.elapsed_time(e)
# with torch.profiler.profile(schedule=torch.profiler.schedule(
# wait=0,
# warmup=1,
# active=2), on_trace_ready=tensorboard_trace_handler(name), with_stack=True) as profiler:
# for _ in range(3):
# lmb()
# profiler.step()
print(name, elapsed / r)
return elapsed / r
class TestMin(TestCase):
def setUp(self):
gc.disable()
gc.collect()
self.interesting = set()
for o in gc.get_objects():
if isinstance(o, (torch.Tensor, Dim, Tensor, DimList)):
self.interesting.add(id(o))
if 'cuda' in self._testMethodName:
self.mem_allocated = torch.cuda.memory_allocated()
def tearDown(self):
interesting = []
for o in gc.get_objects():
if isinstance(o, (torch.Tensor, Dim, Tensor, DimList)) and id(o) not in self.interesting:
interesting.append(o)
extra_memory = 0
if 'cuda' in self._testMethodName:
extra_memory += torch.cuda.memory_allocated() - self.mem_allocated
# nolevels = _n_levels_in_use() == 0
if extra_memory != 0 or len(interesting) != 0:
import refcycle
refcycle.garbage().export_image('garbage.pdf')
gc.collect()
# assert nolevels, f"cleanup failed? {_n_levels_in_use()}"
assert extra_memory == 0, f'extra cuda memory left allocated: {extra_memory}'
assert len(interesting) == 0, \
f'extra torch.Tensor, Dim, or Tensor left allocated: {len(interesting)} objects of types:' \
f' { [type(t) for t in interesting] }'
def test_manual_stuff(self):
A_ = torch.rand(3, 4)
B_ = torch.rand(4, 5)
i, j, k = dims()
A = A_[i, k]
B = B_[k, j]
C = (A.expand(j) * B.expand(i)).sum(k)
self.assertTrue(torch.allclose(C.order(i, j), torch.mm(A_, B_)))
self.assertTrue(torch.allclose(torch.triu(A_, 0), triu(A_)))
D_ = torch.randint(0, 3, (6,))
d = dims()
D = D_[d]
A.index([i], [D]).order(k, d)
def attn(self, batch_size=1, sequence_length=4, hidden_size=6, num_attention_heads=3, linear=Linear, device=None, time=False):
def maybe_to(x):
return x if device is None else x.to(device)
attention_probs_dropout_prob = 0.
A = maybe_to(BertSelfAttentionA(hidden_size, num_attention_heads, attention_probs_dropout_prob, linear=linear))
B = maybe_to(BertSelfAttentionB(hidden_size, num_attention_heads, attention_probs_dropout_prob))
A.load_state_dict(B.state_dict())
hidden_state = maybe_to(torch.rand(batch_size, sequence_length, hidden_size))
b_out = B(hidden_state)
a_out = A(hidden_state)
self.assertTrue(torch.allclose(a_out, b_out)) # why does a simple matmul not do the right thing?
if time:
gpu_time(lambda: B(hidden_state), "positional", r=3)
gpu_time(lambda: A(hidden_state), "first_class", r=3)
for approach in ('relative_key', 'relative_key_query'):
A = maybe_to(BertSelfAttentionA(hidden_size, num_attention_heads,
attention_probs_dropout_prob, approach, sequence_length, linear=linear))
B = maybe_to(BertSelfAttentionB(hidden_size, num_attention_heads,
attention_probs_dropout_prob, approach, sequence_length))
A.load_state_dict(B.state_dict())
hidden_state = maybe_to(torch.rand(batch_size, sequence_length, hidden_size))
b_out = B(hidden_state)
a_out = A(hidden_state)
self.assertTrue(torch.allclose(a_out, b_out))
if time:
gpu_time(lambda: B(hidden_state), "positional", r=3)
gpu_time(lambda: A(hidden_state), "first_class", r=3)
A = maybe_to(BertSelfAttentionA(hidden_size, num_attention_heads,
attention_probs_dropout_prob, None, None, linear=linear))
B = maybe_to(BertSelfAttentionB(hidden_size, num_attention_heads,
attention_probs_dropout_prob, None, None))
A.load_state_dict(B.state_dict())
hidden_state = maybe_to(torch.rand(batch_size, sequence_length, hidden_size))
past_key_value = (maybe_to(torch.rand(batch_size, num_attention_heads,
sequence_length, hidden_size // num_attention_heads)),
maybe_to(torch.rand(batch_size, num_attention_heads,
sequence_length, hidden_size // num_attention_heads)))
b_out = B(hidden_state, past_key_value=past_key_value)
a_out = A(hidden_state, past_key_value=past_key_value)
self.assertTrue(torch.allclose(a_out, b_out))
if time:
gpu_time(lambda: B(hidden_state), "positional", r=3)
gpu_time(lambda: A(hidden_state), "first_class", r=3)
def test_attn(self):
self.attn()
def test_inplace(self):
# some embeddings table
embeddings = torch.zeros(10, 3)
# some sparse updates to the embeddings
indices = torch.arange(2) + 1
values = torch.rand(2, 3)
i, n, f = dims()
embeddings[indices[i], f] += values[i, f]
@skipIf(not TEST_CUDA, "no CUDA")
def test_attn_cuda(self):
# size from the BERT paper, 90% pretraining of sequence length 128
self.attn(batch_size=256, hidden_size=768, sequence_length=128,
num_attention_heads=12, device='cuda', time=measure_perf, linear=torch.nn.Linear)
def test_stack(self):
i, j, d = dims()
A = torch.rand(4, 5)
r = stack([A[i, j]], d, j)
# a, b = r.unbind(d)
# self.assertTrue(torch.allclose(a.order(i, j), i.expand(j).order(i, j)))
# self.assertTrue(torch.allclose(b.order(i, j), j.expand(i).order(i, j)))
def test_max(self):
ap = torch.rand(2, 3, 2)
i, j, k = dims()
a = ap[i, j, k]
r, i0 = a.max(dim=k)
self.assertTrue(torch.allclose(r.order(i, j), ap.max(2)[0]))
def test_mm(self):
i, j, k, q = dims()
a = torch.rand(3, 4)
b = torch.rand(4, 5)
a_ = a[i, k]
b_ = b[k, j]
q.size = 1
r = (a_.expand(j, q) * b_.expand(i, q)).sum(k).order(q, i, j)
# r = (a_*b_).sum(k).order(q, i, j)
# print(r)
# print(a @ b)
def test_with_dims_split(self):
a = torch.arange(3 * 12).view(3, 12)
i, j, k = dims()
k.size = 4
r = a[i, [j, k]]
x = r.order(i, [j, k])
self.assertTrue(torch.allclose(a, x))
def test_hello(self):
A = torch.rand(3, 4)
B = torch.rand(4, 5)
i, j, k = dims()
# r = A[i]*4
r = (A[i, k] * B[k, j]).sum(k).order(i, j)
assert torch.allclose(r, A @ B)
assert A.sum() == A[i].sum((0, i))
assert A.sum() == A[i].sum((-1, i))
assert torch.allclose(A.sum(), A[i].sum(0, keepdim=True).sum((0, i)))
assert torch.allclose(A[i].std(i, True), A.std(0, True))
assert torch.allclose(A[i, k].max(i)[0].order(k), A.max(0)[0])
assert torch.allclose(A.sort(1)[0], A[i, k].sort(k)[0].order(i, k))
# XXX - chunk changes the size of a dimension, has to take a new dimension...
# assert torch.allclose(A.chunk(2,1)[0], A[i, k].chunk(2, k)[0].order(i, k))
assert torch.allclose(A[i].renorm(1, i, 7).order(i), A.renorm(1, 0, 7))
kk = dims()
# assert torch.allclose( torch.stack([A, A], 1), stack([A[i,k], A[i, k]], kk, k).order(i, kk, k))
k2 = dims()
# r = cat((A[i, k], A[i,k]), k, k2)
# assert torch.allclose(torch.cat([A, A], 1), r.order(i, k2))
# assert k2.size == 2*k.size
assert torch.allclose(A.expand(5, -1, -1), A[i, k].expand(j).order(j, i, k))
z = dims()
C = torch.arange(2)
assert torch.allclose(A[:, 0:2], A[i, k].index(k, C[z]).order(i, z))
o, l = dims()
o.size = 2
r = A[i, k].index(k, (o, l))
assert torch.allclose(r.order(i, o, l), A.view(-1, 2, 2))
rr = r.index((o, l), k)
assert torch.allclose(A, rr.order(i, k))
r = i + k - 1
r2 = torch.arange(3)[:, None] + torch.arange(4)[None, :] - 1
assert torch.allclose(r.order(i, k), r2)
# test with ...
assert torch.allclose(A.T, A[..., k].order(k))
# test with dimlist
a_, b_ = dimlists()
assert torch.allclose(A[i, a_].order(*a_, i), A.T)
# test with one bound dimlist
assert torch.allclose(A[:, a_].order(*a_), A.T)
# test with a dimlist that will end up empty
assert torch.allclose(A[i, b_, k].order(i, k, *b_), A)
# test with too few things
(A[i] + i)
assert torch.allclose((A[i] + i).order(i), A + torch.arange(3)[:, None])
# test with too many elements
try:
A[1, ..., 1, 1]
raise NotImplementedError()
except IndexError:
pass
c, d = dims()
c.size = 2
assert torch.allclose(A[i, [c, d]].order(i, c, d), A.view(3, 2, 2))
assert torch.allclose(A[c + 1, c + 0].order(c), A[torch.arange(2) + 1, torch.arange(2)])
try:
A[..., 3, ...]
raise NotImplementedError()
except DimensionBindError:
pass
C = torch.rand(4, 7)
c_, x, y, z = dims()
a, b, c = C.split((3, 3, 1), dim=1)
s = dims()
ref = C.split((3, 3, 1), dim=1)
t = C[s, c_].split((x, y, z), dim=c_)
for a, b, d in zip(ref, t, (x, y, z)):
assert torch.allclose(a, b.order(s, d))
D = torch.rand(3, 4, 5)
assert torch.allclose(D.transpose(0, 1).flatten(1, 2), D[i, k, j].order((i, j)).order(k))
r = [id(x) for x in torch.rand_like(A[i, k]).dims]
assert id(i) in r and id(k) in r
r = [id(x) for x in torch.nn.functional.dropout(A[i, k]).dims]
assert id(i) in r and id(k) in r
def test_simple(self):
i, j, k = dims()
x = torch.rand(3, 4)
z = x[i, j]
(z + z + z + z)
(z.order(i, j))
def test_mm_fuse(self):
i, j, k = dims()
A = torch.rand(3, 4)
B = torch.rand(4, 5)
C = (A[i, k] * B[k, j]).sum(k).order(i, j)
assert torch.allclose(C, A @ B)
def test_time_mm_fuse(self):
i, j, k = dims()
A = torch.rand(3, 4)
B = torch.rand(4, 5)
for _ in range(10):
r0 = A @ B
for _ in range(10):
a = A[i, k]
b = B[k, j]
r1 = (a * b).sum(k)
with measure('pp'):
for _ in range(10000):
A @ B
# magic_trace_stop_indicator()
with measure('fc'):
for _ in range(10000):
(A[i, k] * B[k, j]).sum(k).order(i, j)
with magic_trace('f.fxt'):
for _ in range(10000):
(A[i, k] * B[k, j]).sum(k).order(i, j)
with magic_trace('p.fxt'):
for _ in range(10000):
A @ B
# magic_trace_stop_indicator()
assert torch.allclose(r1.order(i, j), r0)
def test_compare_dims(self):
i, j = dims()
i.size = 3
j.size = 4
(i < j)
def test_c(self):
_test_c()
def test_seg(self):
A = torch.rand(3, 4)
i, k = dims()
i.size = 4
k.size = 3
r = i + k - 1
def test_expand(self):
A = torch.rand(3, 4)
i = dims()
assert list(A[i].expand(2, 4).order(i).size()) == [3, 2, 4]
def test_parse(self):
self.assertEqual(("x", None, None, None), _parse_test(1, 0, "x"))
self.assertEqual(("x", None, "y", None), _parse_test(1, 0, "x", c="y"))
self.assertEqual(("x", None, "y", "z"), _parse_test(1, 0, "x", d="z", c="y"))
self.assertEqual(("x", "4", None, None), _parse_test(2, 0, "x", b="4"))
self.assertEqual(("x", "y", "z", "q"), _parse_test(2, 0, "x", "y", "z", "q"))
with self.assertRaises(TypeError):
_parse_test(2, 0, "x", "y", "z", "q", "5")
with self.assertRaises(TypeError):
_parse_test(2, 0, "x", "y", b="y")
with self.assertRaises(TypeError):
_parse_test(2, 0, "x", c="y")
with self.assertRaises(TypeError):
_parse_test(2, 0, "x")
def test_network(self):
if resnet18 is None:
self.skipTest('no torchvision')
rn = resnet18(norm_layer=lambda x: torch.nn.BatchNorm2d(x, track_running_stats=False))
rn.train()
img = torch.rand(1, 1, 2, 3, 224, 224)
imgf = img.view(2, 3, 224, 224)
i, j = dims()
r = rn(img[i, j])
r = r.order(i, j).view(2, 1000)
r2 = rn(imgf)
assert torch.allclose(r2, r, atol=1e-06)
def test_dim_args(self):
a = dimlists()
assert isinstance(a, DimList)
a = dims()
b = dimlists()
assert isinstance(a, Dim)
assert isinstance(b, DimList)
assert str(a) == 'a'
a, b = dims(sizes=[3, 4])
assert a.size == 3
assert b.size == 4
a = dims(sizes=[3])
b = dimlists(sizes=[4])
assert len(b) == 4
a = dims()
b = dimlists(sizes=[[4, 5]])
assert b[0].size == 4
assert b[1].size == 5
def test_diag(self):
i = dims()
A = torch.rand(4, 4)
(A[i, i])
def test_softmax_split(self):
a = torch.rand(16)
g, i = dims(sizes=[2, None])
a2 = a[[i, g], ]
m_b, _ = a2.max(i)
f_b = torch.exp(a2 - m_b)
l_b = f_b.sum(i)
m, _ = m_b.max(g)
c = torch.exp(m_b - m)
f = (c * f_b).order((i, g))
l = (c * l_b).sum(g)
assert torch.allclose(f / l, torch.nn.functional.softmax(a, dim=0))
def test_index(self):
A = torch.rand(3, 4)
B = torch.rand(4, 5)
i, j, k = dims()
o, l = dims()
o.size = 2
r = A[i, k].index(k, [o, l])
assert torch.allclose(r.order(i, o, l), A.view(-1, 2, 2))
rr = r.index([o, l], k)
assert torch.allclose(A, rr.order(i, k))
z = dims()
C = torch.arange(2)
x = A[i, k].index(k, C[z]).order(i, z)
assert torch.allclose(A[:, 0:2], x)
C = torch.rand(3, 4, 5)
ik = dims()
assert torch.allclose(C.index((0, 2), ik).order(ik), C.permute(0, 2, 1).reshape(15, 4))
# failures that came up from monkey patching some operators...
def test_monkey(self):
A = torch.rand(3, 4)
A[0, 0] = 5
x = torch.randn(3, 4, 4, 4, 3)
x_clone1 = x.clone()
ia = torch.tensor([0, 2, 1])
ib = torch.tensor([0, 2, 1])
first_shape = x[:, ia, None, ib, 0].shape
x_clone1[:, ia, None, ib, 0] = torch.randn(first_shape).to(x_clone1)
x = torch.autograd.Variable(torch.tensor([]))
z = torch.autograd.Variable(torch.IntTensor([1, 2, 3]))
a = [z[2], z[0] + 3]
x.new(a)
# self.assertEqual(x.new([z[2], z[0] + 3]).tolist(), [3, 4])
def test_index_placement(self):
A = torch.rand(1, 2, 3, 4)
i, j = dims(sizes=[2, 4])
a = A[:, i + 0, :, j + 0]
r = a.order(i, j)
assert torch.allclose(A.permute(1, 3, 0, 2), r)
def test_order(self):
i, j = dims()
A = torch.rand(3, 4, 5)
assert torch.allclose(A[i].order(1, i), A.permute(2, 0, 1))
def test_mask(self):
a = torch.rand(5)
i, j = dims(sizes=[a.size(0), a.size(0)])
((i >= j) * a[i]).sum(j).order(i)
def test_eq(self):
i, j = dims(sizes=[3, 3])
assert (i == j).sum((i, j)) == 3
def test_dims_with_size(self):
x = dims(3)
assert len(x) == 3 and isinstance(x[0], Dim)
class Foo:
pass
y = Foo()
z, y.x, q = dims(3)
assert str(z) == "z"
assert str(y.x) == "d1"
assert str(q) == "d2"
def test_dir(self):
i, j = dims(sizes=[3, 3])
dir(i <= j)
def test_doc(self):
assert Tensor.clamp.__doc__ == torch.Tensor.clamp.__doc__
def test_embed(self):
embeddings = torch.rand(8, 32)
ids = torch.tensor([1, 0, 3, 4])
# slow but Pythonic
values_ = torch.empty(4, 32)
for batch in range(ids.size(0)):
for feature in range(embeddings.size(1)):
values_[batch, feature] = embeddings[ids[batch], feature]
# with torchdim, single indexing kernel
batch, feature = dims(2)
values = embeddings[ids[batch], feature].order(batch, feature)
assert torch.allclose(values, values_)
def test_functorch(self):
A = torch.rand(3, 4, 5)
B = torch.rand(3, 4, 5)
C = torch.rand(5, 2)
i, j = dims()
AA = torch.mm(A[i], C) # 3, 4, 2
BB = torch.mm(B[j], C) # 3, 4, 2
assert list(torch.mm(AA.T, BB).order(i, j).shape) == [3, 3, 2, 2]
skip_functorch_only = ['test_time_mm_fuse', 'test_attn_cuda']
class TestMinFunctorchOnly(TestMin):
def setUp(self):
super().setUp()
_set_pointwise_optimize(False)
def tearDown(self):
_set_pointwise_optimize(True)
super().tearDown()
for n in skip_functorch_only:
setattr(TestMinFunctorchOnly, n, skip("skip_functorch_only")(lambda self: None))
if __name__ == '__main__':
run_tests()
| pytorch-master | functorch/test/test_dims.py |
# Owner(s): ["module: functorch"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import OrderedDict
from unittest.case import skipIf
from torch.testing._internal.common_utils import TestCase, run_tests
import torch
import torch.nn.functional as F
from torch import Tensor
import functools
import itertools
import warnings
import unittest
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import instantiate_device_type_tests, \
skipCUDAIfNoMagma
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_utils import (
parametrize,
instantiate_parametrized_tests,
subtest
)
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
from functorch_additional_op_db import additional_op_db
from common_utils import (
get_fallback_and_vmap_exhaustive,
xfail,
skip,
skipOps,
check_vmap_fallback,
tol1,
opsToleranceOverride,
is_batch_norm_training,
generate_vmap_inputs,
compute_quantities_for_vmap_test,
is_valid_inplace_sample_input,
)
import types
from collections import namedtuple
import functorch
from functorch import vmap, grad, grad_and_value, jvp, vjp
from functorch.experimental import chunk_vmap
from functorch._C import reshape_dim_into, reshape_dim_outof
from functorch._src.make_functional import functional_init_with_buffers
FALLBACK_REGEX = 'There is a performance drop'
class EnableVmapFallbackWarnings:
def __enter__(self):
self.prev_state = torch._C._debug_only_are_vmap_fallback_warnings_enabled()
torch._C._debug_only_display_vmap_fallback_warnings(True)
def __exit__(self, *ignored):
torch._C._debug_only_display_vmap_fallback_warnings(self.prev_state)
class TestVmapAPI(TestCase):
def test_non_tensor_output_raises(self):
with self.assertRaisesRegex(ValueError, "got type <class 'float'> as a return"):
vmap(lambda x: 3.14)(torch.ones(3))
def multiple_outputs(x):
return x, 3
with self.assertRaisesRegex(ValueError, "got type <class 'int'> as a return"):
vmap(multiple_outputs)(torch.ones(3))
def test_different_map_dim_size_raises(self):
x = torch.randn(2)
y = torch.randn(3)
expected_msg = 'Expected all tensors to have the same size in the mapped dimension'
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(torch.mul)(x, y)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
def test_func_with_no_inputs(self):
expected_msg = 'got no inputs'
def foo():
return torch.randn(3)
def bar(x):
return torch.randn(3)
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(foo)()
with self.assertRaisesRegex(ValueError, expected_msg):
vmap(bar)()
def test_func_with_no_tensors(self):
def foo(x):
return torch.randn(3)
with self.assertRaisesRegex(ValueError, 'at least one Tensor'):
vmap(foo, (None,))(1)
def test_constant_function(self):
output = vmap(lambda x: torch.tensor(3.14))(torch.ones(3))
self.assertEqual(output, torch.tensor([3.14, 3.14, 3.14]))
def test_single_input(self):
x = torch.randn(2, 3)
def square(x):
return x * x
output = vmap(square)(x)
self.assertEqual(output, x * x)
def test_multiple_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul)(x, y)
self.assertEqual(output, x * y)
def test_multiple_outputs(self):
def foo(x):
return x * x, x * x * x
x = torch.randn(3)
outputs = vmap(foo)(x)
self.assertEqual(outputs[0], x * x)
self.assertEqual(outputs[1], x * x * x)
def test_multiple_outputs2(self):
# This is the same thing as
# def returns_tuple_of_tensors(x):
# return x, x
def returns_tuple_of_tensors(x):
return (x, x)
def returns_list_of_two_tensors(x):
return [x, x]
def returns_list_of_one_tensor(x):
return [x]
x = torch.randn(3)
# should not throw
vmap(returns_tuple_of_tensors)(x)
vmap(returns_list_of_two_tensors)(x)
vmap(returns_list_of_one_tensor)(x)
def test_nested_with_same_map_dim(self):
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
output = vmap(vmap(torch.mul))(x, y)
self.assertEqual(output, x * y)
output = vmap(vmap(vmap(torch.mul)))(x, y)
self.assertEqual(output, x * y)
def test_nested_with_diag_embed(self):
# diag_embed requires special testing because it is registered with conditional functionalization.
x = torch.randn(3, 3, 5)
output = vmap(vmap(torch.diag_embed))(x)
self.assertEqual(output, torch.diag_embed(x))
def test_nested_with_different_map_dim(self):
x = torch.randn(2, 3)
y = torch.randn(5, 3)
output = vmap(lambda x: vmap(lambda y: x * y)(y))(x)
self.assertEqual(output.shape, (2, 5, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
z = torch.randn(7, 3)
output = vmap(lambda x: vmap(lambda y: vmap(lambda z: x * y * z)(z))(y))(x)
self.assertEqual(output.shape, (2, 5, 7, 3))
self.assertEqual(output, x.view(2, 1, 1, 3) * y.view(5, 1, 3) * z)
def test_noop_in_inner_vmap(self):
x = torch.randn(3)
y = torch.randn(5)
output = vmap(lambda x: vmap(lambda y: x)(y))(x)
self.assertEqual(output, x.view(3, 1).expand(3, 5))
def test_unsupported_op_err_msg(self):
# Unsupported view op
tensor = torch.randn(2, 3)
msg = (
r"Batching rule not implemented for aten::.+; the "
r"fallback path doesn't work on out= or view ops"
)
# TODO: find a view op
# with self.assertRaisesRegex(RuntimeError, msg):
# vmap(torch.ravel)(tensor)
def out_op(x, y):
return torch.abs(x, out=y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(out_op)(tensor, tensor)
# Don't support non-tensor returns. This is a limitation of vmap;
# functions that don't return tensors must be special cased
with self.assertRaisesRegex(RuntimeError, 'Batching rule not implemented'):
vmap(torch.equal)(tensor, tensor)
def test_nonzero_out_dims(self):
# Basic test
tensor = torch.randn(2, 3)
result = vmap(lambda x: x, out_dims=1)(tensor)
self.assertEqual(result, tensor.permute(1, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
# Test that the batch dimension gets permuted to dim 2
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 0, 3))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
# negative out_dim
tensor = torch.randn(2, 3, 5, 7)
result = vmap(lambda x: x, out_dims=-1)(tensor)
self.assertEqual(result, tensor.permute(1, 2, 3, 0))
self.assertEqual(result.data_ptr(), tensor.data_ptr())
# check that out_dims works on ALL outputs
tensor = torch.randn(2, 3, 5, 7)
other = torch.randn(2, 3, 5, 7)
result = vmap(lambda x, y: (x, y), out_dims=2)(tensor, other)
self.assertEqual(result, (tensor.permute(1, 2, 0, 3), other.permute(1, 2, 0, 3)))
# use out_dims with the maximum vmap-able tensor dims (64 dims)
ndims = 64
shape = [2] + [1] * (ndims - 1)
expected_shape = [1, 1, 2] + [1] * (ndims - 3)
tensor = torch.randn(shape)
result = vmap(lambda x: x, out_dims=2)(tensor)
self.assertEqual(result.shape, expected_shape)
# test something that is not the identity function
def foo(x, y):
return x, x * y, x * y * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=1)(x, y)
self.assertEqual(
result,
(x.permute(1, 0, 2), (x * y).permute(1, 0, 2), (x * y * y).permute(1, 0, 2)))
def test_multiple_out_dims(self):
def foo(x):
return x, x
def bar(x, y):
return x, x, x, x * y
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
result = vmap(foo, out_dims=(0, 1))(x)
self.assertEqual(result, (x, x.permute(1, 0, 2)))
result = vmap(bar, out_dims=(-1, 0, 1, 2))(x, y)
expected = (
x.permute(1, 2, 0),
x,
x.permute(1, 0, 2),
(x * y).permute(1, 2, 0),
)
self.assertEqual(result, expected)
def test_nested_out_dims(self):
y = torch.randn(2, 3, 5, 7)
# Inner vmap has non-zero out_dim
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y))(y)
self.assertEqual(result.shape, (2, 5, 3, 7))
self.assertEqual(result, y.permute(0, 2, 1, 3))
# all vmaps have non-zero out_dim
result = vmap(lambda y: vmap(lambda x: x, out_dims=1)(y), out_dims=1)(y)
self.assertEqual(result.shape, (5, 2, 3, 7))
self.assertEqual(result, y.permute(2, 0, 1, 3))
# throwing in some negative out_dims
result = vmap(lambda y: vmap(lambda x: x, out_dims=-1)(y), out_dims=-1)(y)
self.assertEqual(result.shape, (5, 7, 3, 2))
self.assertEqual(result, y.permute(2, 3, 1, 0))
# testing fn that isn't the identity
x = torch.randn(2, 3)
y = torch.randn(5, 3)
result = vmap(lambda y: vmap(lambda x: x * y, out_dims=1)(x), out_dims=-1)(y)
self.assertEqual(result.shape, (3, 2, 5))
self.assertEqual(result, (y.view(5, 1, 3) * x).permute(2, 1, 0))
def test_out_dims_edge_case(self):
def foo(x):
return x
# Test that we accept out_dims=(1,) for a function with one output.
tensor = torch.randn(2, 3)
expected = vmap(foo, out_dims=1)(tensor)
result = vmap(foo, out_dims=(1,))(tensor)
self.assertEqual(result, expected)
def test_pytree_returns(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y), [y, (y, y)]
y0, (y1, y2), (y3, (y4, y5)) = vmap(f)(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y0, y1)
self.assertEqual(y2, y1)
self.assertEqual(y2, y3)
self.assertEqual(y4, y3)
self.assertEqual(y5, y4)
def test_pytree_odict_returns(self):
x = torch.randn(2, 3)
def f(t):
y = t.sin()
return OrderedDict([("sin", y), ("cos", t.cos())])
out = vmap(f)(x)
assert isinstance(out, OrderedDict)
expected = f(x)
self.assertEqual(out["sin"], expected["sin"])
self.assertEqual(out["cos"], expected["cos"])
def test_pytree_returns_outdims(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=(0, (0, 1)))(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y1, x.sin())
self.assertEqual(y2, x.sin().t())
def test_pytree_returns_broadcast_simple(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=1)(x)
self.assertEqual(y0, x.sin().t())
self.assertEqual(y1, y0)
self.assertEqual(y2, y0)
def test_pytree_returns_broadcast_nested(self):
x = torch.randn(2, 3)
def f(x):
y = x.sin()
return y, (y, y)
y0, (y1, y2) = vmap(f, out_dims=(0, 1))(x)
self.assertEqual(y0, x.sin())
self.assertEqual(y1, y0.t())
self.assertEqual(y2, y0.t())
def test_out_dims_must_be_int_or_collection_of_int_err_msg(self):
msg = 'must be an int or a python collection of ints'
tensor = torch.randn(2, 3)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims='lol')(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=('lol',))(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=None)(tensor)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(None,))(tensor)
def test_out_dims_and_num_outputs_mismatch_err_msg(self):
msg = 'not compatible'
x = torch.randn(2, 3, 5)
# Too many out_dims
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: x, out_dims=(0, 0))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0, 0, 0))(x)
# Too few out_dims
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x), out_dims=(0,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda x: (x, x, x), out_dims=(0, 0))(x)
def test_out_dim_out_of_bounds_err_msg(self):
# TODO(rzou): This error message isn't that great. It comes straight
# from maybe_wrap_dim. Consider doing a try-catch-(add some context) to
# the error message in the future in C++
msg = 'Dimension out of range'
x = torch.randn(2, 3, 5)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=3)(x)
with self.assertRaisesRegex(IndexError, msg):
vmap(lambda x: x, out_dims=-4)(x)
def test_non_zero_in_dims(self):
tensor = torch.randn(2, 3, 5)
# Implicit out_dims = 0; vmap will move the batch dim to the front.
output = vmap(lambda x: x, (1,))(tensor)
self.assertEqual(output, tensor.permute(1, 0, 2))
self.assertEqual(output.data_ptr(), tensor.data_ptr())
x = torch.randn(2, 3)
y = torch.randn(3, 2)
output = vmap(torch.mul, (0, 1))(x, y)
self.assertEqual(output, x * y.t())
output = vmap(torch.mul, (1, 0))(x, y)
self.assertEqual(output, x.t() * y)
def test_none_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
# None in_dim for a Tensor means we don't map over it
output = vmap(torch.mul, (0, None))(x, y)
self.assertEqual(output.shape, (2, 2, 3))
self.assertEqual(output, x.view(2, 1, 3) * y)
# None in_dim for non-tensor arguments
output = vmap(torch.mul, (0, None))(x, 2)
self.assertEqual(output, x * 2)
def test_nested_non_default_in_dims(self):
x = torch.rand(5, 2, 3)
y = torch.rand(3, 5, 2)
result = vmap(vmap(vmap(torch.mul), (1, 0)), (1, 2))(x, y)
self.assertEqual(result, x.permute(1, 2, 0) * y.permute(2, 0, 1))
def test_nested_negative_in_dims(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
output = vmap(torch.mul, (-1, -1))(x, y)
self.assertEqual(output.shape, (3, 2))
self.assertEqual(output, (x * y).permute(1, 0))
def test_non_default_in_dims_out_dims(self):
x = torch.randn(2, 3, 5)
# Same in_dim as out_dim, vmap over identity
result = vmap(lambda x: x, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x)
self.assertEqual(result.data_ptr(), x.data_ptr())
# Different in_dim from out_dim, vmap over identity
result = vmap(lambda x: x, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, x.transpose(1, 2))
self.assertEqual(result.data_ptr(), x.data_ptr())
def foo(x):
return x * 2
# Same in_dim as out_dim, vmap over operation
result = vmap(foo, in_dims=1, out_dims=1)(x)
self.assertEqual(result, x * 2)
# Different in_dim as out_dim, vmap over operation
result = vmap(foo, in_dims=2, out_dims=1)(x)
self.assertEqual(result.shape, (2, 5, 3))
self.assertEqual(result, (x * 2).transpose(1, 2))
# Basic nested test.
result = vmap(vmap(foo, 1, 1), 1, 1)(x)
self.assertEqual(result, x * 2)
def test_item_throws(self):
def f(x):
return x.item()
with self.assertRaisesRegex(RuntimeError, r'item\(\) on a Tensor'):
vmap(f)(torch.randn(3))
def test_data_dependent_control_flow_throws(self):
def f(x):
if x:
return x
return 0
with self.assertRaisesRegex(RuntimeError, r'data-dependent control flow'):
vmap(f)(torch.randn(3))
def test_accepts_nested_inputs(self):
x = torch.randn(2, 3)
y = torch.randn(2, 3)
# Single layer of nesting
out = vmap(lambda z: z[0] + z[1])((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))((x, y))
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1])([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=(0,))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, y])
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'])({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=(0,))({'x': x, 'y': y})
self.assertEqual(out, x + y)
out = vmap(lambda z: z['x'] + z['y'], in_dims=({'x': 0, 'y': 0},))({'x': x, 'y': y})
self.assertEqual(out, x + y)
# Multiple layers of nesting
out_fn = vmap(lambda z: z['x'][0] + z['x'][1][0] + z['y'][0] + z['y'][1])
out = out_fn({'x': [x, (x,)], 'y': [y, y]})
self.assertEqual(out, x + x + y + y)
def test_in_dims_wrong_type_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'expected `in_dims` to be int or a \(potentially nested\) tuple'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, [0, 0])(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, set({0, 0}))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, 'lol')(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=[0, 0])([x, y])
# The following should not throw
vmap(torch.mul, (0, 0))(x, y)
def test_not_enough_in_dims_err_msg(self):
x = torch.randn(3)
y = torch.randn(3)
msg = r'in_dims is not compatible with the structure of `inputs`'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0,))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.mul, (0, 0, 0))(x, y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0],))([x, y])
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=((0, 0),))([x, y])
# The following should not throw
vmap(torch.mul, (0, 0))(x, y)
def test_integer_in_dim_but_not_tensor_input_err_msg(self):
def foo(xy):
return xy[0] * xy[1]
def bar(x, yz):
return x * yz[0] * yz[1]
x = torch.randn(2, 3)
# the following are errors in jax (and will always be errors)
msg = 'Got in_dim=0 for an input but the input is of type'
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum)(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(torch.sum, (0, 0))(x, 0)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([0, 0],))([x, 1])
# The following should not throw
vmap(torch.sum, (0, None))(x, 0)
def test_in_dim_not_in_tensor_err_msg(self):
def foo(x):
return x * x
x = torch.randn(2, 3)
y = torch.randn(2, 3)
msg = r'Got in_dim=-?\w for some input, but that input is a Tensor of dimensionality \w'
with self.assertRaisesRegex(ValueError, msg):
vmap(foo)(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(0,))(torch.randn([]))
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(-3,))(x)
with self.assertRaisesRegex(ValueError, msg):
vmap(foo, in_dims=(2,))(y)
with self.assertRaisesRegex(ValueError, msg):
vmap(lambda z: z[0] + z[1], in_dims=([3, 0],))([x, y])
# the following should not throw
vmap(foo, in_dims=(0,))(torch.randn(2, 3))
vmap(foo, in_dims=(1,))(torch.randn(2, 3))
def test_fallback_does_not_warn_by_default(self):
# NB: One day we will implement a batching rule for torch.atan2.
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
vmap(op)(x, y)
# The single warning here is the "vmap is experimental"
# warning, not a warning from the vmap fallback path.
self.assertEqual(len(wa), 1)
@unittest.expectedFailure
def test_fallback_warns_when_warnings_are_enabled(self):
# NB: One day we will implement a batching rule for torch.atan2.
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
with warnings.catch_warnings(record=True) as wa:
with EnableVmapFallbackWarnings():
vmap(op)(x, y)
self.assertEqual(len(wa), 2)
self.assertRegex(str(wa[-1].message), FALLBACK_REGEX)
def _assert_uses_vmap_fallback(self, vmap_args, inputs):
return
# with warnings.catch_warnings(record=True) as wa:
# with EnableVmapFallbackWarnings():
# result = vmap(*vmap_args)(*inputs)
# self.assertEqual(len(wa), 2)
# self.assertRegex(str(wa[-1].message), FALLBACK_REGEX)
def test_fallback_zero_dim(self):
# NB: One day we will implement a batching rule for torch.atan2.
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
op = torch.copysign
x = torch.randn(11)
y = torch.randn(11)
self._assert_uses_vmap_fallback((op,), (x, y))
B0, B1 = 0, 3
x = torch.randn(B0, 11)
y = torch.randn(11)
msg = 'The fallback path does not support vmap over dims of size 0'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
x = torch.randn(B0, B1, 11)
y = torch.randn(B1, 11)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (0, None))(x, y)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, (None, 0))(y, x)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(x, x)
def test_fallback_atan2(self):
# NB: One day we will implement a batching rule for torch.atan2.
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
op = torch.copysign
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
# fallback on torch.atan2
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(op, (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
# fallback on torch.atan2, nested vmap
x = torch.randn(7, 11, 5)
y = torch.randn(5, 7, 11)
result = vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(result, op(x.permute(2, 0, 1), y))
# big batch size (total 10000)
x = torch.randn(100, 10, 10, 5)
y = torch.randn(100, 10, 10)
result = vmap(vmap(vmap(op)))(x, y)
self.assertEqual(result, op(x, y.view(100, 10, 10, 1)))
# TODO: No clue what is wrong here.
@unittest.skip
def test_fallback_masked_fill(self):
# NB: One day we will implement a batching rule for masked_fill
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
def run_test(batch_size):
B0 = batch_size
x = torch.randn(B0, 7, 11, 13)
dim = 0
index = torch.tensor([0, 4, 2])
values = torch.randn(B0, 3, 13)
self._assert_uses_vmap_fallback((torch.index_add, (0, None, None, 0)), (x, dim, index, values))
result = vmap(torch.index_add, (0, None, None, 0))(x, dim, index, values)
expected = torch.index_add(
x, dim + 1, index, values.view(B0, 3, 1, 13))
self.assertEqual(result, expected)
run_test(batch_size=5)
run_test(batch_size=1237)
def test_fallback_multiple_returns(self):
# NB: One day we will implement a batching rule for torch.var_mean
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
B0, B1, B2 = 2, 3, 1237
tensor = torch.randn(B0, 10)
self._assert_uses_vmap_fallback((torch.var_mean,), (tensor,))
# fallback correctness on torch.var_mean
result = vmap(torch.var_mean)(tensor)
expected = torch.var_mean(tensor, dim=1)
self.assertEqual(result, expected)
# nested vmap
tensor = torch.randn(B0, B1, 10)
result = vmap(vmap(torch.var_mean))(tensor)
expected = torch.var_mean(tensor, dim=2)
self.assertEqual(result, expected)
# big batch size, nested vmap
tensor = torch.randn(B0, B1, B2, 10)
result = vmap(vmap(vmap(torch.var_mean)))(tensor)
expected = torch.var_mean(tensor, dim=3)
self.assertEqual(result, expected)
def test_inplace_fallback_unary(self):
# Test the in-place fallback on an in-place method that takes no
# additional Tensor arguments. This is the simplest case of the fallback.
# NB: One day we will implement a batching rule for acos_.
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
op = Tensor.acos_
B0, B1, B2 = 2, 3, 10000
x = torch.randn(B0, 5)
self._assert_uses_vmap_fallback((op,), (x,))
# Single vmap
x_orig = torch.rand(B0, 5)
x = x_orig.clone()
result = vmap(op)(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
# Single vmap + different out_dim produces a view(!)
x_orig = torch.rand(B0, 5)
x = x_orig.clone()
result = vmap(op, out_dims=(1,))(x)
self.assertTrue(result._base is x)
self.assertEqual(result, x_orig.t().acos())
# Nested vmap
x_orig = torch.randn(B0, B1, 5)
x = x_orig.clone()
result = vmap(vmap(op))(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
# Nested vmap, large batch size
x_orig = torch.randn(B0, B1, B2, 5)
x = x_orig.clone()
result = vmap(vmap(vmap(op)))(x)
self.assertTrue(result is x)
self.assertEqual(result, x_orig.acos())
def test_inplace_fallback_nary_same_levels(self):
# NB: One day we will implement a batching rule for atan2_
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
op = Tensor.atan2_
outplace_op = torch.atan2
x = torch.randn(5, 7, 11)
y = torch.randn(5, 7, 11)
self._assert_uses_vmap_fallback((op,), (x, y))
# Single vmap
B0 = 5
x_orig = torch.randn(7, 11, B0)
x = x_orig.clone()
y = torch.randn(B0, 7, 11)
vmap(op, (2, 0))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.movedim(0, 2)))
# Nested vmap
B0, B1 = 5, 7
x_orig = torch.randn(B1, 11, B0)
x = x_orig.clone()
y = torch.randn(B0, B1, 11)
vmap(vmap(op), (2, 0))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.movedim([0, 1], [2, 0])))
# big batch size (total 10000)
B0, B1, B2 = 100, 10, 10
x_orig = torch.randn(B0, B1, B2, 5)
x = x_orig.clone()
y = torch.randn(B0, B1, B2)
vmap(vmap(vmap(op)))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.view(B0, B1, B2, 1)))
# ("Fallback isInplaceVmapCompatible check is broken")
@unittest.expectedFailure
def test_inplace_fallback_nary_different_levels(self):
# NB: One day we will implement a batching rule for atan2_
# If/when we do, this test should be replaced to test the fallback
# path on another operator to avoid bitrot.
op = Tensor.atan2_
outplace_op = torch.atan2
B0, B1 = 2, 3
x = torch.rand(B0, 7)
y = torch.rand(7)
self._assert_uses_vmap_fallback((op, (0, None)), (x, y))
# op(left, right): All of the levels in right are found in left
x_orig = torch.rand(B0, 7)
x = x_orig.clone()
y = torch.rand(7)
vmap(op, in_dims=(0, None))(x, y)
self.assertEqual(x, outplace_op(x_orig, y))
x_orig = torch.rand(B0, B1, 7)
x = x_orig.clone()
y = torch.rand(B0, 7)
vmap(vmap(op, in_dims=(0, None)))(x, y)
self.assertEqual(x, outplace_op(x_orig, y.view(B0, 1, 7)))
# op(left, right): Some of the levels in right are not found in left
msg = r'vmap: aten::atan2_\(self, \*extra_args\) is not possible'
x = torch.rand(7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(x, y)
x = torch.rand(B1, 7)
y = torch.rand(B0, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 0))(x, y)
x = torch.rand(B1, 7)
y = torch.rand(7, B0)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(0, None)), in_dims=(None, 1))(x, y)
x = torch.rand(B0, 7)
y = torch.rand(B0, B1, 7)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=(None, 0)))(x, y)
def test_backward_unsupported_interaction(self):
x = torch.randn(3, requires_grad=True)
y = torch.randn(5)
grad = torch.randn_like(x)
err_msg = r'backward\(\) called inside a functorch transform'
def backward_on_vmapped_tensor(x):
x.sum().backward()
# FIXME
return self.skipTest("error: element 0 of tensors does not require grad and does not have a grad_fn")
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_on_vmapped_tensor)(x)
def backward_with_vmapped_grad(x, grad):
x.backward(grad)
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(backward_with_vmapped_grad)(x, grad)
def completely_unrelated_backward(y):
x.sum().backward()
return y
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(completely_unrelated_backward)(y)
@unittest.expectedFailure
def test_grad_unsupported_interaction(self):
input_tensor = torch.randn(3, requires_grad=True)
err_msg = 'autograd.grad.* called inside torch.vmap'
captured = torch.randn(3, requires_grad=True)
def output_to_grad_is_vmapped(input_tensor):
output = (captured * input_tensor).sum()
return torch.autograd.grad([output], [captured])[0]
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(output_to_grad_is_vmapped)(input_tensor)
output = (input_tensor ** 2).sum()
def input_to_grad_is_vmapped(input_tensor):
return torch.autograd.grad([output], [input_tensor])[0]
with self.assertRaisesRegex(RuntimeError, err_msg):
vmap(input_to_grad_is_vmapped)(input_tensor)
def test_batched_gradient_basic(self):
N = 3
x = torch.randn(N, requires_grad=True)
y = torch.randn(N)
def vjp_mul(v):
return torch.autograd.grad([x * y], [x], grad_outputs=[v])[0]
batched_v = torch.eye(N)
jacobian = vmap(vjp_mul)(batched_v)
self.assertEqual(jacobian, torch.diagflat(y))
def test_functools_partial(self):
x = torch.randn(3)
y = torch.randn(2, 3)
result = vmap(functools.partial(torch.mul, x))(y)
self.assertEqual(result, x * y)
def test_nn_module(self):
tensor = torch.randn(2, 3)
model = torch.nn.Linear(3, 3, bias=False)
result = vmap(model)(tensor)
self.assertEqual(result, model(tensor))
def test_fallback_with_undefined_grad(self):
B0 = 7
x = torch.randn(2, 3, 4, 5, requires_grad=True)
weight = torch.randn(3, 3, 1, 1)
v = torch.randn(B0, 2, 3, 4, 5)
def get_vjp(v):
result = torch.nn.functional.conv2d(x, weight)
grad_x, = torch.autograd.grad(result, x, v)
return grad_x
# Runs vmap(get_vjp)(v), which should not error out.
# The backward formula for convolution returns an undefined
# Tensor for grad_bias because the original bias does not exist.
#
# In the future we'll probably add a batching rule for convolution
# backward. When this happens, we should modify this test to use a
# different op (and/or create and use a dummy operator) to avoid bitrot.
self._assert_uses_vmap_fallback([get_vjp], [v])
def test_reshape_dim_into(self):
x = torch.randn(2, 3, 5, 7)
y = reshape_dim_into(0, 0, x)
self.assertEqual(y, x.reshape(6, 5, 7))
y = reshape_dim_into(0, 1, x)
self.assertEqual(y, x.movedim(0, 1).reshape(3, 2 * 5, 7))
y = reshape_dim_into(0, 2, x)
self.assertEqual(y, x.movedim(0, 2).reshape(3, 5, 2 * 7))
y = reshape_dim_into(1, 2, x)
self.assertEqual(y, x.movedim(1, 2).reshape(2, 5, 3 * 7))
y = reshape_dim_into(0, -2, x)
self.assertEqual(y, x.movedim(0, 1).reshape(3, 2 * 5, 7))
y = reshape_dim_into(0, -1, x)
self.assertEqual(y, x.movedim(0, 2).reshape(3, 5, 2 * 7))
y = reshape_dim_into(-4, -1, x)
self.assertEqual(y, x.movedim(0, 2).reshape(3, 5, 2 * 7))
def test_reshape_dim_outof(self):
x = torch.randn(12, 12, 12).permute(2, 1, 0)
y = reshape_dim_outof(0, 2, x)
self.assertEqual(y, x.reshape(2, 6, 12, 12))
y = reshape_dim_outof(1, 4, x)
self.assertEqual(y, x.reshape(12, 4, 3, 12))
y = reshape_dim_outof(2, 6, x)
self.assertEqual(y, x.reshape(12, 12, 6, 2))
y = reshape_dim_outof(-1, 6, x)
self.assertEqual(y, x.reshape(12, 12, 6, 2))
def test_batch_rule_does_not_need_to_handle_no_batched_input(self):
def f(x, y):
res = torch.dot(y, torch.ones(2))
return x + res
x = torch.randn(7, 5)
y = torch.randn(3, 2)
out = vmap(vmap(f, in_dims=(0, None)), in_dims=(None, 0))(x, y)
expected = torch.mv(y, torch.ones(2)).view(3, 1, 1) + x
self.assertEqual(out, expected)
def _test_vmap_autocast(self, device):
if torch.device(device).type == "cpu":
amp_dtype = torch.bfloat16
else:
amp_dtype = torch.float16
a_float32 = torch.rand(4, 2, 3, device=device)
b_float32 = torch.rand(4, 3, 2, device=device)
c_float32 = torch.rand(4, 2, 2, device=device)
d_float32 = torch.rand(4, 3, 2, device=device)
# Case 1, autocast inside vmapped function
def func1(x, y, z, w):
with torch.autocast(dtype=amp_dtype, device_type=device):
e_float16 = torch.matmul(x, y)
assert e_float16.dtype == amp_dtype, e_float16.dtype
f_float16 = torch.matmul(z, e_float16)
assert f_float16.dtype == amp_dtype, f_float16.dtype
return torch.matmul(w, f_float16.float())
expected = func1(a_float32, b_float32, c_float32, d_float32)
out = vmap(func1)(a_float32, b_float32, c_float32, d_float32)
assert expected.allclose(out)
# Case 2, autocast decorator inside vmapped function
@torch.autocast(dtype=amp_dtype, device_type=device)
def func2(x, y, z, w):
e_float16 = torch.matmul(x, y)
assert e_float16.dtype == amp_dtype, e_float16.dtype
f_float16 = torch.matmul(z, e_float16)
assert f_float16.dtype == amp_dtype, f_float16.dtype
return torch.matmul(w, f_float16)
expected = func2(a_float32, b_float32, c_float32, d_float32)
out = vmap(func2)(a_float32, b_float32, c_float32, d_float32)
assert expected.allclose(out)
# Case 3, autocast is outside vmapped function
def func3(x, y, z, w):
e_float16 = torch.matmul(x, y)
assert e_float16.dtype == amp_dtype, e_float16.dtype
f_float16 = torch.matmul(z, e_float16)
assert f_float16.dtype == amp_dtype, f_float16.dtype
return torch.matmul(w, f_float16)
with torch.autocast(dtype=amp_dtype, device_type=device):
expected = func3(a_float32, b_float32, c_float32, d_float32)
out = vmap(func3)(a_float32, b_float32, c_float32, d_float32)
assert expected.allclose(out)
@unittest.skip("Somehow, vmap and autocast do not work on CPU")
def test_vmap_autocast_cpu(self):
self._test_vmap_autocast("cpu")
@skipIf(not torch.cuda.is_available(), "CUDA is unavailable")
def test_vmap_autocast_cuda(self):
self._test_vmap_autocast("cuda")
def slice_inputs(inputs, bdims, i):
result = []
for inp, bdim in zip(inputs, bdims):
if bdim is None:
result.append(inp)
else:
result.append(inp.select(bdim, i))
return tuple(result)
def reference_vmap(op, inputs, in_dims=0, out_dims=0):
if isinstance(in_dims, int):
in_dims = (in_dims,) * len(inputs)
bdim_sizes = [inp.size(dim) for inp, dim in zip(inputs, in_dims) if dim is not None]
assert all(bdim_size == bdim_sizes[0] for bdim_size in bdim_sizes)
bdim_size = bdim_sizes[0]
results = tuple(op(*slice_inputs(inputs, in_dims, i)) for i in range(bdim_size))
assert len(results) > 0
op_has_single_return = not isinstance(results[0], tuple)
if op_has_single_return:
assert all(isinstance(result, torch.Tensor) for result in results)
if isinstance(out_dims, int):
out_dims = (out_dims,) * 1
return torch.stack(results, dim=out_dims[0])
assert all(isinstance(result, tuple) for result in results)
num_returns = len(results[0])
assert all(len(result) == num_returns for result in results)
if isinstance(out_dims, int):
out_dims = (out_dims,) * num_returns
return tuple(torch.stack(result_shards, out_dim)
for result_shards, out_dim in zip(zip(*results), out_dims))
class TensorFactory:
@staticmethod
def rand(size, device='cpu', dtype=torch.float):
return torch.rand(size, device=device, dtype=dtype)
@staticmethod
def randn(size, device='cpu', dtype=torch.float):
return torch.randn(size, device=device, dtype=dtype)
@staticmethod
def randp1(size, device='cpu', dtype=torch.float):
return torch.rand(size, device=device, dtype=dtype) + 1
# Tests vmap(op, in_dims, out_dims)(*inputs) by comparing the output to a
# (slow) sequential map+stack fallback.
#
# check_view: Test if the first returned output is a view of the first input
# check_propagates_grad: Test if the operation propagates gradients.
def _vmap_test(self, op, inputs, in_dims=0, out_dims=0,
check_view=False, check_propagates_grad=True):
result = vmap(op, in_dims, out_dims)(*inputs)
reference_result = reference_vmap(op, inputs, in_dims, out_dims)
self.assertEqual(result, reference_result)
op_has_single_return = not isinstance(result, tuple)
if check_view:
result_as_tuple = (result,) if op_has_single_return else result
for output in result_as_tuple:
input0_base = inputs[0] if inputs[0]._base is None else inputs[0]._base
self.assertTrue(output._base is input0_base,
msg="result was not a view of the first input!")
if not check_propagates_grad:
return
# Assuming input[0] is a floating-point tensor. Check if the vmap
# operation propagates the requires_grad flag to the zeroth output.
# Some vmap operators are implemented in a way that assumes that
# they are composite with respect to autograd. If the operator ever is
# changed to not be composite with respect to autograd, then the
# following check should fail.
inputs_clone = list(inputs)
inputs_clone[0] = inputs[0].clone().requires_grad_()
result = vmap(op, in_dims, out_dims)(*inputs_clone)
result_as_tuple = (result,) if op_has_single_return else result
self.assertTrue(result[0].requires_grad)
def should_allow_vmap_fallback_usage(fn):
return getattr(fn, '_allow_vmap_fallback_usage', False)
def allowVmapFallbackUsage(fn):
fn._allow_vmap_fallback_usage = True
return fn
# All tests of TestVmapBase check that the slow vmap fallback is never invoked.
# This is so that we can incrementally add batching rules for operators to
# replace the slow vmap fallback path for said operators. To skip this check,
# please use the allowVmapFallbackUsage decorator.
#
# NB: Don't add tests to TestVmapBase directly, unless you want them to run
# on every subclass of TestVmapBase. Add them to e.g. TestVmapOperators.
#
# NB: TestVmapBase is a nested class. This prevents test runners from picking
# it up and running it.
class Namespace:
class TestVmapBase(TestCase):
def __init__(self, method_name='runTest'):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is None:
return
if not should_allow_vmap_fallback_usage(test_method):
setattr(self, method_name,
self._wrap_method_with_vmap_fallback_check(test_method))
def _wrap_method_with_vmap_fallback_check(self, method):
# msg = (
# 'Expected the test to not invoke the vmap fallback path, i.e., '
# 'all of the operators being tested in this test should have batching '
# 'rules implemented. If you are intentionally testing something to '
# 'do with the fallback path, use allowVmapFallbackUsage. Otherwise, '
# 'please make sure that batching rules are implemented for the '
# 'operator(s) being tested.'
# )
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
with EnableVmapFallbackWarnings():
method(*args, **kwargs)
# for captured_warning in wa:
# self.assertNotRegex(str(captured_warning.message), FALLBACK_REGEX, msg)
return types.MethodType(wrapper, self)
@allowVmapFallbackUsage
def test_vmap_fallback_check_ok(self):
# One day we'll implement a batching rule for torch.var_mean.
# When that happens, please change the example to use an
# operator that doesn't have a batching rule implemented.
op_using_fallback = torch.var_mean
vmap(op_using_fallback)(torch.rand(3))
@unittest.expectedFailure
def test_vmap_fallback_check(self):
@self._wrap_method_with_vmap_fallback_check
def no_fallback(self):
pass
# One day we'll implement a batching rule for torch.var_mean.
# When that happens, please change the example to use an
# operator that doesn't have a batching rule implemented.
op_using_fallback = torch.var_mean
@self._wrap_method_with_vmap_fallback_check
def uses_fallback(self):
vmap(op_using_fallback)(torch.rand(3))
no_fallback(self)
with self.assertRaises(AssertionError):
uses_fallback(self)
def _make_case(op, input_getter=TensorFactory.randn):
return (op, input_getter)
class TestVmapOperators(Namespace.TestVmapBase):
def _vmap_test(self, *args, **kwargs):
return _vmap_test(self, *args, **kwargs)
def _vmap_view_test(self, *args, **kwargs):
self._vmap_test(*args, **kwargs, check_view=True)
def _test_unary(self, op, getter, device, *args, **kwargs):
test = functools.partial(self._vmap_test, *args, **kwargs)
B0, B1 = 7, 11
# Single vmap, various in_dims / out_dims
test(op, [getter([B0, 3], device)])
test(op, [getter([2, 5, B0, 3], device)], in_dims=2)
test(op, [getter([2, 5, B0, 3], device)], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [getter([B0, B1], device)])
test(vmap(op), [getter([B1, 2, 5, B0, 3], device)], in_dims=2)
test(vmap(op, in_dims=2), [getter([2, 5, B0, B1, 3], device)],
in_dims=2, out_dims=2)
@parametrize("case", [
(torch.abs, TensorFactory.randn),
(torch.acos, TensorFactory.rand),
(torch.asin, TensorFactory.rand),
(torch.atan, TensorFactory.rand),
(torch.ceil, TensorFactory.randn),
(torch.cos, TensorFactory.rand),
(torch.cosh, TensorFactory.rand),
(torch.digamma, TensorFactory.rand),
(torch.exp, TensorFactory.randn),
(torch.expm1, TensorFactory.randn),
(torch.floor, TensorFactory.randn),
(torch.frac, TensorFactory.randn),
(torch.lgamma, TensorFactory.rand),
(torch.log, TensorFactory.randp1),
(torch.log10, TensorFactory.randp1),
(torch.log1p, TensorFactory.randp1),
(torch.log2, TensorFactory.randp1),
(torch.neg, TensorFactory.randn),
(torch.reciprocal, TensorFactory.randp1),
(torch.relu, TensorFactory.randn),
(torch.round, TensorFactory.randn),
(torch.rsqrt, TensorFactory.randp1),
(torch.sigmoid, TensorFactory.randn),
(torch.sign, TensorFactory.randn),
(torch.sin, TensorFactory.rand),
(torch.sinh, TensorFactory.rand),
(torch.sqrt, TensorFactory.rand),
(torch.tan, TensorFactory.rand),
(torch.tanh, TensorFactory.rand),
(torch.trunc, TensorFactory.randn),
], name_fn=lambda x: x[0].__name__)
def test_unary_pointwise(self, case):
op, getter = case
self._test_unary(op, getter, 'cpu')
# test in-place
method = getattr(Tensor, f'{op.__name__ + "_"}')
self._test_unary(method, getter, 'cpu', check_propagates_grad=False)
def test_clone(self):
# Some basic tests
self._test_unary(lambda x: x.clone(), TensorFactory.randn, 'cpu')
self._test_unary(lambda x: x.clone(memory_format=torch.preserve_format),
TensorFactory.randn, 'cpu')
self._test_unary(lambda x: x.clone(memory_format=torch.contiguous_format),
TensorFactory.randn, 'cpu')
# Test that the per-examples are contiguous when using torch.contiguous_format
def clone_contiguous(x):
return x.clone(memory_format=torch.contiguous_format)
B0, B1 = 3, 5
x = torch.randn(2, B0, 7)
y = vmap(clone_contiguous, in_dims=1, out_dims=1)(x)
self.assertTrue(y.movedim(1, 0).is_contiguous())
self.assertTrue(y[:, 0, :].is_contiguous())
x = torch.randn(2, B0, 7, B1)
y = vmap(vmap(clone_contiguous, in_dims=2), in_dims=1)(x)
self.assertTrue(y.is_contiguous())
self.assertTrue(y[0][0].is_contiguous())
msg = r'only supported with memory_format torch.preserve_format or torch.contiguous_format'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(lambda x: x.clone(memory_format=torch.channels_last))(torch.randn(B0))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(lambda x: x.clone(memory_format=torch.channels_last_3d))(torch.randn(B0))
def test_weird_matmul_case(self):
# Check that this doesn't crash.
# https://github.com/pytorch/functorch/issues/417
x = torch.randn(5, 2, 2, 2)
y = torch.randn(5, 7, 2)
vmap(vmap(torch.matmul, in_dims=(None, 0)))(x, y)
@parametrize("case",
(
(torch.clamp_min_, TensorFactory.randn),
(torch.clamp_max_, TensorFactory.randn),
), name_fn=lambda x: x[0].__name__)
def test_clamp_inplace_variant(self, case):
test = self._vmap_test
def get_number(getter):
return getter([]).item()
op, getter = case
device = 'cpu'
B0, B1 = 7, 11
# Single vmap: op(Tensor, Tensor)
test(op, (getter([B0, 3], device), getter([B0, 3], device)), check_propagates_grad=False)
test(op, (getter([B0], device), getter([B0], device)), check_propagates_grad=False)
test(op, (getter([2, B0, 3], device), getter([2, B0, 3], device)), in_dims=(1, 1), check_propagates_grad=False)
test(op, (getter([B0, 2, 3], device), getter([2, B0, 3], device)),
in_dims=(0, 1), out_dims=1, check_propagates_grad=False)
test(op, (getter([B0, 2, 3], device), getter([1, 1], device)), in_dims=(0, None), check_propagates_grad=False)
test(op, (getter([B0, 3], device), getter([B0, 3], device)), in_dims=(0, 0), check_propagates_grad=False)
# Nested vmap: op(Tensor, Tensor)
test(vmap(op), (getter([B0, B1, 2, 3], device), getter([B0, B1, 1, 3], device)), check_propagates_grad=False)
# Python number overload: op(Tensor, Number)
number = get_number(getter)
self._test_unary(lambda t: op(t, number), getter, device, check_propagates_grad=False)
@parametrize('case', [
subtest(_make_case(torch.clamp_min), name='clamp_min'),
subtest(_make_case(torch.clamp_max), name='clamp_max'),
])
def test_clamp_variant(self, case):
test = self._vmap_test
def get_number(getter):
return getter([]).item()
op, getter = case
device = 'cpu'
B0, B1 = 7, 11
# Single vmap: op(Tensor, Tensor)
test(op, (getter([B0, 3], device), getter([B0, 3], device)))
test(op, (getter([B0], device), getter([B0, 2, 3], device)))
test(op, (getter([B0], device), getter([2, B0, 3], device)), in_dims=(0, 1))
test(op, (getter([B0], device), getter([2, B0, 3], device)),
in_dims=(0, 1), out_dims=1)
test(op, (getter([B0], device), getter([2, 3], device)), in_dims=(0, None))
test(op, (getter([2, 3], device), getter([B0, 3], device)), in_dims=(None, 0))
# Nested vmap: op(Tensor, Tensor)
test(vmap(op), (getter([B0, B1, 2, 3], device), getter([B0, B1, 3], device)))
test(vmap(op, in_dims=(None, 0)),
(getter([B0, 2, 3], device), getter([B1, 3], device)), in_dims=(0, None))
# Python number overload: op(Tensor, Number)
number = get_number(getter)
self._test_unary(lambda t: op(t, number), getter, device)
def test_copy_(self):
x = torch.randn(3)
y = torch.randn(3)
vmap(Tensor.copy_)(x, y)
self.assertEqual(x, y)
x = torch.randn(3)
y = torch.randn(3, 2)
vmap(Tensor.copy_, in_dims=(1, None))(y, x)
self.assertEqual(y, x.expand(2, 3).t())
x = torch.randn(3)
y = torch.randn(2, 3)
with self.assertRaisesRegex(RuntimeError, 'inplace'):
vmap(Tensor.copy_, in_dims=(None, 0))(x, y)
def test_silu_backward(self):
test = self._vmap_test
device = 'cpu'
getter = TensorFactory.randp1
B0 = 7
op = torch.ops.aten.silu_backward
# Single vmap: op(Tensor, Tensor)
test(op, (getter([B0, 3], device), getter([B0, 3], device)))
test(op, (getter([], device), getter([B0], device)), in_dims=(None, 0))
test(op, (getter([2, B0], device), getter([2], device)), in_dims=(1, None))
@parametrize('case', [
subtest(_make_case(torch.add), name='add'),
subtest(_make_case(lambda x, y: x + y), name='add_dunder'),
subtest(_make_case(torch.sub), name='sub'),
subtest(_make_case(lambda x, y: x - y), name='sub_dunder'),
subtest(_make_case(torch.mul), name='mul'),
subtest(_make_case(lambda x, y: x * y), name='mul_dunder'),
subtest(_make_case(torch.div, input_getter=TensorFactory.randp1), name='div'),
subtest(_make_case(lambda x, y: x / y, input_getter=TensorFactory.randp1), name='div_dunder'),
subtest(_make_case(torch.pow, input_getter=TensorFactory.randp1), name='pow'),
subtest(_make_case(lambda x, y: x ** y, input_getter=TensorFactory.randp1), name='pow_dunder'),
])
def test_arithmetic(self, case):
test = self._vmap_test
def get_number(getter):
return getter([]).item()
op, getter = case
device = 'cpu'
B0, B1 = 7, 11
# Single vmap: op(Tensor, Tensor)
test(op, (getter([B0, 3], device), getter([B0, 3], device)))
test(op, (getter([B0], device), getter([B0, 2, 3], device)))
test(op, (getter([B0], device), getter([2, B0, 3], device)), in_dims=(0, 1))
test(op, (getter([B0], device), getter([2, B0, 3], device)),
in_dims=(0, 1), out_dims=1)
test(op, (getter([B0], device), getter([2, 3], device)), in_dims=(0, None))
test(op, (getter([2, 3], device), getter([B0, 3], device)), in_dims=(0, None))
# Nested vmap: op(Tensor, Tensor)
test(vmap(op), (getter([B0, B1, 2, 3], device), getter([B0, B1, 3], device)))
test(vmap(op, in_dims=(None, 0)),
(getter([B0, 2, 3], device), getter([B1, 3], device)), in_dims=(0, None))
# Python number overload: op(Tensor, Number) (and vice-versa)
number = get_number(getter)
self._test_unary(lambda t: op(t, number), getter, device)
number = get_number(getter)
self._test_unary(lambda t: op(number, t), getter, device)
# Type promotion: op(Logical Scalar Tensor, Logical Scalar Tensor)
test(op, (getter([B0], device), getter([B0], device, dtype=torch.double)))
test(op, (getter([B0], device, dtype=torch.double), getter([B0], device)))
test(op, (getter([B0], device), getter([B0], device)))
# Type promotion: op(Tensor, Logical Scalar Tensor) (and vice-versa)
test(op, (getter([B0, 2], device), getter([B0], device, torch.double)))
test(op, (getter([B0], device, torch.double), getter([B0, 2], device)))
if not torch.cuda.is_available():
return
# TODO(rzou): fix the following
# # Test cross-device scalars
# number = get_number(getter)
# self._test_unary(lambda t: op(t, number), getter, device='cuda')
# self._test_unary(lambda t: op(number, t), getter, device='cuda')
# self._test_unary(lambda t: op(t, torch.tensor(number)), getter, device='cuda')
# TODO: as_strided BR
@unittest.expectedFailure
def test_as_strided(self):
def _test(sizes, strides, offset, tensor, lambd):
result = vmap(lambda t: t.as_strided(sizes, strides, offset))(tensor)
expected = vmap(lambd)(tensor)
self.assertTrue(result._base is expected._base)
self.assertEqual(result, expected)
# single vmap test
B0 = 5
tensors = [
# contiguous
torch.randn(B0, 2, 3),
# non-contiguous
torch.randn(B0, 3, 2).transpose(1, 2),
# non-zero storage offset
torch.randn(2, B0, 2, 3)[1],
# non-contiguous strides, zero storage offset
torch.randn(B0, 2, 4, 3, 7)[:, :, 0, :, 0],
# non-contiguous strides, non-zero storage offset
torch.randn(B0, 2, 4, 3, 7)[:, :, 2, :, 1],
]
for x in tensors:
S0, S1 = x.stride()[1:]
offset = x.storage_offset()
# Broadcast
_test([5, 5, 2, 3], [0, 0, S0, S1], offset, x, lambda x: x.expand(5, 5, 2, 3))
# transpose
_test([3, 2], [S1, S0], offset, x, lambda x: x.transpose(0, 1))
# select
_test([2], [S0], offset + S1, x, lambda x: x[:, 1])
# Nested vmap test
B1 = 7
x = torch.randn(B1, B0, 2, 3)
S0, S1 = x.stride()[2:]
result = vmap(vmap(lambda t: t.as_strided([5, 5, 2, 3], [0, 0, S0, S1])), in_dims=1)(x)
expected = vmap(vmap(lambda t: t.expand(5, 5, 2, 3)), in_dims=1)(x)
self.assertTrue(result._base is expected._base)
self.assertEqual(result, expected)
# Check that mal-formatted size/strides doesn't crash
with self.assertRaisesRegex(RuntimeError, 'size and stride must have the same length'):
x = torch.randn(B0, 2, 3).transpose(0, 1)
vmap(lambda x: x.as_strided([1, 1, 1], [1, 1]))(x)
# Sanity check #1: we require the batch dims to be at the front of the
# tensor (in memory layout).
msg = 'batch dims being vmapped over are at the front of the tensor'
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(2, B0, 3).transpose(0, 1)
vmap(lambda x: x.as_strided([2, 3], [B0 * 3, 1]))(x)
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, 2, 3, B1).movedim(3, 1)
vmap(vmap(lambda x: x.as_strided([2, 3], [B1 * 3, B1])))(x)
# All the Sanity check #2{a,b,c} cases check that
# xs[i].as_strided(sizes, strides, offset + xs[i].offset() - xs.offset())
# doesn't index memory that is out of bounds of xs[i]. This condition
# is important to the correctness of the as_strided batching rule
# (see NOTE: [When will the as_strided_batching_rule fail?])
# Sanity check #2a: The maximum indexable location of
# xs[i].as_strided(sizes, strides, offset + xs[i].offset() - xs.offset())
# is less than or equal to the maximum indexable location of xs[i].
msg = 'This is not supported inside of vmap'
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, 3)
vmap(lambda x: x.as_strided([3], [1], 1))(x)
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, 3, 5)
vmap(lambda x: x.as_strided([4, 4], [4, 1], 0))(x)
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, B1, 3, 5)
vmap(vmap(lambda x: x.as_strided([4, 4], [4, 1], 0)))(x)
# Sanity check #2b: The min indexable location of
# xs[i].as_strided(sizes, strides, offset + xs[i].offset() - xs.offset())
# is greater than or equal to the min indexable location of xs[i].
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(2, B0, 3)[1]
vmap(lambda x: x.as_strided([3], [1], B0 * 3 - 1))(x)
# Sanity check #2c:
# xs[i] is a zero-dim tensor, but
# xs[i].as_strided(sizes, strides, offset + xs[i].offset() - xs.offset())
# is not
with self.assertRaisesRegex(RuntimeError, msg):
x = torch.randn(B0, 0, 3)
vmap(lambda x: x.as_strided([3], [1]))(x)
def test_nll_loss(self):
test = self._vmap_test
op = F.nll_loss
B = 3
y = torch.randn(B, 2, 5)
t = torch.randint(0, 5, (B, 2))
test(op, (y, t))
test(functools.partial(op, reduction='sum'), (y, t))
test(functools.partial(op, reduction='none'), (y, t))
y = torch.randn(B, 2, 5)
t = torch.randint(0, 5, (2,))
test(op, (y, t), in_dims=(0, None))
test(functools.partial(op, reduction='sum'), (y, t), in_dims=(0, None))
test(functools.partial(op, reduction='none'), (y, t), in_dims=(0, None))
def test_adaptive_avg_pool2d(self):
test = self._vmap_test
op = functools.partial(F.adaptive_avg_pool2d, output_size=(3, 3))
x = torch.randn(3, 5, 7, 9, 11)
test(op, (x,))
test(op, (x,), in_dims=(1,))
test(op, (x,), in_dims=(4,))
def test_bmm(self):
op = torch.bmm
test = self._vmap_test
B0, B1 = 7, 11
# shape mismatch
msg = ""
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(torch.randn(B0, 2, 2, 2), torch.randn(B0, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(0, None))(torch.randn(B0, 3, 3, 2), torch.randn(2, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(torch.randn(2, 2), torch.randn(B0, 2, 2, 2))
# left arg is vmapped
test(op, (torch.rand(B0, 2, 3, 5), torch.rand(2, 5, 3)), in_dims=(0, None))
test(vmap(op, in_dims=(0, None)), (torch.rand(B1, B0, 2, 3, 5), torch.rand(2, 5, 3)),
in_dims=(1, None))
# right arg is vmapped
test(op, (torch.rand(2, 5, 3), torch.rand(B0, 2, 3, 5)), in_dims=(None, 0))
test(vmap(op, in_dims=(None, 0)), (torch.rand(2, 5, 3), torch.rand(B1, B0, 2, 3, 5)),
in_dims=(None, 1))
# both args are vmapped
test(op, (torch.rand(B0, 2, 3, 5), torch.rand(B0, 2, 5, 3)))
test(vmap(op), (torch.rand(B1, B0, 2, 3, 5), torch.rand(B0, B1, 2, 5, 3)), in_dims=(1, 0))
test(vmap(op, in_dims=(0, None)),
(torch.rand(B1, 2, 3, 5), torch.rand(B0, 2, 5, 3)), in_dims=(None, 0))
def test_cat(self):
test = self._vmap_test
B0, B1 = 5, 7
# Quick hack b/c vmap can't accept a list of tensors as an argument
def get_op(dim):
def op(*tensors):
return torch.cat(tensors, dim=dim)
return op
test(get_op(0), (torch.rand(B0, 2), torch.rand(B0, 3)))
test(get_op(0), (torch.rand(2), torch.rand(B0, 3)), in_dims=(None, 0))
test(get_op(0), (torch.rand(2, 17), torch.rand(3, 17, B0)), in_dims=(None, 2))
test(get_op(-1), (torch.rand(17, 2), torch.rand(17, 3, B0)), in_dims=(None, 2))
test(vmap(get_op(0), in_dims=(0, None)),
(torch.rand(B1, 2), torch.rand(B0, 3)), in_dims=(None, 0))
test(vmap(get_op(0), in_dims=(0, 0)),
(torch.rand(B1, 2), torch.rand(B0, B1, 3)), in_dims=(None, 0))
def test_unsafe_view(self):
# Unsafe view isn't exposed, so we get at it via
# vmap(grad(matmul))
test = functools.partial(self._vmap_test, check_propagates_grad=False)
B = 2
x = torch.randn(B, 2, 3, 3)
y = torch.randn(B, 3, 3)
def baz(x, y):
return (x @ y).sum()
test(functorch.grad(baz), (x, y))
def test_conj(self):
op = torch.conj
def run_test(dtype):
def get(shape):
return torch.randn(shape, dtype=dtype)
B0, B1 = 7, 11
test = self._vmap_test
# Single vmap, various in_dims / out_dims
test(op, [get([B0, 3])])
test(op, [get([2, 5, B0, 3])], in_dims=2)
test(op, [get([2, 5, B0, 3])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [get([B0, B1])])
test(vmap(op), [get([B1, 2, 5, B0, 3])], in_dims=2)
test(vmap(op, in_dims=2), [get([2, 5, B0, B1, 3])],
in_dims=2, out_dims=2)
# correctness tests
run_test(torch.float)
run_test(torch.cfloat)
# check that torch.conj on a non-complex tensor returns the same tensor
real_tensor = torch.randn(3)
result = vmap(op)(real_tensor)
self.assertEqual(result.data_ptr(), real_tensor.data_ptr())
def test_contiguous(self):
op = Tensor.contiguous
self._test_unary(op, TensorFactory.randn, 'cpu')
# check that contiguous returns the original tensor if the per-examples
# are already contiguous
B0 = 3
x = torch.randn(B0, 2, 5, 7)
x = x.movedim(0, 2)
result = vmap(Tensor.contiguous, in_dims=2, out_dims=2)(x)
self.assertTrue(result is x)
msg = 'NYI: querying is_contiguous inside of vmap for memory_format'
tensor = torch.randn(B0, 3)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(functools.partial(op, memory_format=torch.channels_last))(tensor)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(functools.partial(op, memory_format=torch.channels_last_3d))(tensor)
def test_stride(self):
B0 = 3
x = torch.randn(B0, 2, 5, 7)
def foo(x):
assert x.stride() == (7 * 5, 7, 1)
return x
vmap(foo)(x)
x = torch.randn(2, B0, 5, 7).movedim(1, 0)
def bar(x):
assert x.stride() == (7 * 5 * B0, 7, 1)
return x
vmap(bar)(x)
def test_chunk(self):
test = self._vmap_view_test
op = torch.chunk
B0, B1, B2 = 7, 11, 13
# tests for torch.split(self, split_size: int, dim)
test(op, (torch.rand(B0, 2, 1024), 15, -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), 9, 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), 4, 0),
in_dims=(2, None, None))
test(vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
def test_clamp(self):
clamp_cases = (
(lambda t: t.clamp(min=-0.5), TensorFactory.randn),
(lambda t: t.clamp(max=0.5), TensorFactory.randn),
(lambda t: t.clamp(min=-0.5, max=0.5), TensorFactory.randn),
(lambda t: t.clamp_min(min=-0.5), TensorFactory.randn),
(lambda t: t.clamp_max(max=0.5), TensorFactory.randn),
)
for op, getter in clamp_cases:
self._test_unary(op, getter, 'cpu')
def test_comparison_ops(self):
test = functools.partial(self._vmap_test, check_propagates_grad=False)
getter = TensorFactory.randn
B0, B1 = 7, 11
ops = (
torch.eq, lambda x, y: x == y,
torch.gt, lambda x, y: x > y,
torch.ge, lambda x, y: x >= y,
torch.le, lambda x, y: x <= y,
torch.lt, lambda x, y: x < y,
torch.ne, lambda x, y: x != y,
)
for op in ops:
# Single vmap: op(Tensor, Tensor)
test(op, (getter([B0, 3]), getter([B0, 3])))
test(op, (getter([B0]), getter([B0, 2, 3])))
test(op, (getter([B0]), getter([2, B0, 3])), in_dims=(0, 1))
test(op, (getter([B0]), getter([2, B0, 3])), in_dims=(0, 1), out_dims=1)
test(op, (getter([B0]), getter([2, 3])), in_dims=(0, None))
test(op, (getter([2, 3]), getter([B0, 3])), in_dims=(0, None))
# Nested vmap: op(Tensor, Tensor)
test(vmap(op), (getter([B0, B1, 2, 3]), getter([B0, B1, 3])))
test(vmap(op, in_dims=(None, 0)),
(getter([B0, 2, 3]), getter([B1, 3])), in_dims=(0, None))
# test number as inputs
number = getter([]).item()
self._test_unary(lambda t: op(t, number), getter, 'cpu', check_propagates_grad=False)
def test_cross_batch_size_three(self):
# Let's test corner case when batch_size is 3 and cross' dim argument is not specified
# According to the cross API, dim will be assigned to the first dim with value 3
# In this test we ensure that found dim is not batch dim.
op = torch.cross
test = self._vmap_test
B0 = B1 = 3
test(op, (torch.rand(B0, 2, 3), torch.rand(B0, 2, 3)))
test(vmap(op, in_dims=(0, None)), (torch.rand(B0, B1, 2, 3), torch.rand(B0, B1, 2, 3)),
in_dims=(None, 1))
def test_diagonal(self):
tensor = torch.randn(3, 5, 7, 11, 13)
test = self._vmap_view_test
op = torch.diagonal
test(op, (tensor, 1, 0, 1), in_dims=(0, None, None, None))
test(op, (tensor, 0, 2, -1), in_dims=(0, None, None, None))
test(op, (tensor, 2, 1, 2), in_dims=(1, None, None, None))
test(op, (tensor, 0, -2, -1), in_dims=(1, None, None, None), out_dims=1)
test(vmap(lambda t: op(t, 0, 0, -1)), (tensor,), in_dims=1, out_dims=1)
test(vmap(vmap(lambda t: op(t, 0, 0, 1), in_dims=1), in_dims=3),
(tensor,), in_dims=1, out_dims=1)
def test_dot(self):
op = torch.dot
test = self._vmap_test
B0, B1 = 7, 11
# shape mismatch
msg = ""
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(torch.randn(B0, 2, 2, 2), torch.randn(B0, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(0, None))(torch.randn(B0, 2), torch.randn(2, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(torch.randn(2, 2), torch.randn(B0, 2))
# left arg is vmapped
test(op, (torch.rand(B0, 5), torch.rand(5)), in_dims=(0, None))
test(vmap(op, in_dims=(0, None)), (torch.rand(B1, B0, 5), torch.rand(5)),
in_dims=(1, None))
# right arg is vmapped
test(op, (torch.rand(5), torch.rand(B0, 5)), in_dims=(None, 0))
test(vmap(op, in_dims=(None, 0)), (torch.rand(5), torch.rand(B1, B0, 5)),
in_dims=(None, 1))
# both args are vmapped
test(op, (torch.rand(B0, 5), torch.rand(B0, 5)))
test(vmap(op), (torch.rand(B1, B0, 5), torch.rand(B0, B1, 5)), in_dims=(1, 0))
test(vmap(op, in_dims=(0, None)),
(torch.rand(B1, 5), torch.rand(B0, 5)), in_dims=(None, 0))
def test_expand_as(self):
op = torch.Tensor.expand_as
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 1, 5), torch.rand(B0, 2, 3, 5)))
test(op, (torch.rand(B0, 1, 5), torch.rand(2, 3, 5)), in_dims=(0, None))
test(op, (torch.rand(1, 5), torch.rand(B0, 2, 3, 5)), in_dims=(None, 0))
test(vmap(op), (torch.rand(B0, B1, 1, 5), torch.rand(B0, B1, 2, 3, 5)))
test(vmap(op), (torch.rand(B0, B1, 1, 5), torch.rand(B1, B0, 2, 3, 5)), in_dims=(0, 1))
test(vmap(op), (torch.rand(B0, B1), torch.rand(B1, 2, 3, 5)), in_dims=(0, None))
test(vmap(vmap(op)), (torch.rand(B0, B1, B2), torch.rand(B0, B1, B2, 2, 3, 5)))
def test_fill_and_zero_inplace(self):
test = functools.partial(self._vmap_test, check_propagates_grad=False)
B0, B1 = 7, 11
ops = (
lambda t: t.fill_(0.1),
lambda t: t.fill_(torch.tensor(0.2)),
lambda t: t.zero_(),
)
for op in ops:
# Single vmap, various in_dims / out_dims
test(op, [TensorFactory.randn([B0, 3])])
test(op, [TensorFactory.randn([2, 5, B0, 3])], in_dims=2)
test(op, [TensorFactory.randn([2, 5, B0, 3])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [TensorFactory.randn([B0, B1])])
test(vmap(op), [TensorFactory.randn([B1, 2, 5, B0, 3])], in_dims=2)
test(vmap(op, in_dims=2), [TensorFactory.randn([2, 5, B0, B1, 3])],
in_dims=2, out_dims=2)
# test when value is a batched tensor for fill_ operator
B0, B1 = 3, 5
test(Tensor.fill_, [TensorFactory.randn([B0, B1]), TensorFactory.randn(B0)])
with self.assertRaisesRegex(RuntimeError,
""):
# Runtime Error is thrown when the tensor being written to isn't being vmapped over
vmap(Tensor.fill_, (None, 0))(TensorFactory.randn([B0, B1]),
TensorFactory.randn([B0]))
def _test_complex_views(self, op, dtypes):
test = self._vmap_view_test
def run_test(op, dtype):
def get(shape):
return torch.randn(shape, dtype=dtype)
B0, B1 = 7, 11
# Single vmap, various in_dims / out_dims
test(op, [get([B0, 3])])
test(op, [get([3, B0])], in_dims=1)
test(op, [get([2, 5, B0, 3])], in_dims=2)
test(op, [get([2, 5, B0, 3])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [get([B0, B1])])
test(vmap(op), [get([B1, 2, 5, 3, B0])], in_dims=4)
test(vmap(op, in_dims=2), [get([2, 5, B0, B1, 3])],
in_dims=2, out_dims=2)
for dtype in dtypes:
run_test(op, dtype)
def test_real(self):
self._test_complex_views(torch.real, dtypes=[torch.cfloat, torch.cdouble])
def test_imag(self):
self._test_complex_views(torch.imag, dtypes=[torch.cfloat, torch.cdouble])
def test_view_as_real(self):
self._test_complex_views(torch.view_as_real, dtypes=[torch.cfloat, torch.cdouble])
def test_view_as_complex(self):
def run_test(dtype):
def get(shape):
return torch.randn(shape, dtype=dtype)
op = torch.view_as_complex
test = self._vmap_view_test
B0, B1 = 7, 11
# Single vmap, various in_dims / out_dims
test(op, [get([B0, 3, 2])])
test(op, [get([2, 5, B0, 3, 2])], in_dims=2)
test(op, [get([2, 5, B0, 3, 2])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(op), [get([B0, B1, 2])])
test(vmap(op), [get([B1, 2, 5, B0, 3, 2])], in_dims=2)
test(vmap(op, in_dims=2), [get([2, 5, B0, B1, 3, 2])],
in_dims=2, out_dims=2)
# Interesting case #1: Batch dim directly before dim of size 2
test(op, [get([3, B0, 2])], in_dims=1)
test(vmap(op, in_dims=1), [get([3, B1, B0, 2])], in_dims=2)
# Interesting case #2: Batch dim at end of tensor, success cases
# view_as_complex requires that the dim with size 2 have stride 1
# in order for the view to function propertly
test(op, [get([B0, 2]).transpose(0, 1)], in_dims=1)
test(vmap(op, in_dims=1), [get([B0, B1, 2]).movedim(1, 2)])
test(vmap(op, in_dims=2), [get([B0, 3, B1, 2]).movedim(2, 3)])
# Interesting case #3: Batch dim at end of tensor, failure cases
msg = "Tensor must have a last dimension with stride 1"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=1)(get([2, B0]))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op, in_dims=1), in_dims=1)(get([2, B0, B1]))
# Invalid input: no dimension of size 2
msg = 'Input tensor must have one or more dimensions'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(get([B0]))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(vmap(op))(get([B0, B1]))
# Invalid input: Batch dim has size 2, but the logical last dim does
# not have size 2
msg = 'Tensor must have a last dimension of size 2'
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=1)(get([3, 2]))
for dtype in [torch.float, torch.double]:
run_test(dtype)
def test_is_complex(self):
ctensor = torch.randn(3, dtype=torch.cfloat)
tensor = torch.randn(3)
def foo(x):
if x.is_complex():
return torch.tensor(1)
else:
return torch.tensor(0)
self.assertEqual(vmap(foo)(ctensor), torch.tensor([1, 1, 1]))
self.assertEqual(vmap(foo)(tensor), torch.tensor([0, 0, 0]))
def test_is_floating_point(self):
float_tensor = torch.tensor([1., 2., 3.])
long_tensor = torch.tensor([1, 2, 3])
def foo(x):
if x.is_floating_point():
return torch.tensor(1)
else:
return torch.tensor(0)
self.assertEqual(vmap(foo)(float_tensor), torch.tensor([1, 1, 1]))
self.assertEqual(vmap(foo)(long_tensor), torch.tensor([0, 0, 0]))
def test_is_contiguous(self):
def foo(x):
if x.is_contiguous():
return torch.tensor(1.)
else:
return torch.tensor(0.)
B0, B1 = 3, 5
# Single batch dim
contig = torch.randn(B0, 2, 7)
self.assertEqual(vmap(foo)(contig), torch.ones(B0))
noncontig = torch.randn(2, B0, 7)
self.assertEqual(vmap(foo, in_dims=1)(noncontig), torch.zeros(B0))
noncontig = torch.randn(2, B0, 7).movedim(1, 0)
self.assertEqual(vmap(foo)(noncontig), torch.zeros(B0))
noncontig = torch.randn(2, 7, B0)
self.assertEqual(vmap(foo, in_dims=2)(noncontig), torch.zeros(B0))
# Multiple batch dims
contig = torch.randn(B0, B1, 3)
self.assertEqual(vmap(vmap(foo))(contig), torch.ones(B0, B1))
contig = torch.randn(B1, B0, 3)
self.assertEqual(vmap(vmap(foo), in_dims=1)(contig), torch.ones(B0, B1))
contig = torch.randn(B1, B0, 3).movedim(0, 1)
self.assertEqual(vmap(vmap(foo))(contig), torch.ones(B0, B1))
noncontig = torch.randn(B0, 3, B1)
self.assertEqual(vmap(vmap(foo, in_dims=1))(noncontig), torch.zeros(B0, B1))
# is_contiguous on empty tensor is True
def bar(x):
assert x.is_contiguous()
return x
vmap(bar)(torch.randn(B0, 0, 3))
vmap(bar, in_dims=1)(torch.randn(0, B0, 3))
vmap(bar)(torch.randn(B0, 0, 3).transpose(-1, -2))
# is_contiguous with other memory formats
def baz(x, memory_format):
x.is_contiguous(memory_format=memory_format)
return x
msg = 'NYI: querying is_contiguous inside of vmap for memory_format'
tensor = torch.randn(B0, 2, 7, 3)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(functools.partial(baz, memory_format=torch.channels_last))(tensor)
with self.assertRaisesRegex(RuntimeError, msg):
vmap(functools.partial(baz, memory_format=torch.channels_last_3d))(tensor)
def test_unsqueeze(self):
op = torch.unsqueeze
test = self._vmap_view_test
B0, B1 = 7, 11
# unsqueeze dim 0
test(op, (torch.rand(B0, 2, 5), 0), in_dims=(0, None))
test(op, (torch.rand(2, B0, 5), 0), in_dims=(1, None))
# unsqueeze last dim (positive)
test(op, (torch.rand(B0, 2, 5), 2), in_dims=(0, None))
test(op, (torch.rand(2, B0, 5), 2), in_dims=(1, None))
# unsqueeze last dim (negative)
test(op, (torch.rand(B0, 2, 5), -1), in_dims=(0, None))
test(op, (torch.rand(2, B0, 5), -1), in_dims=(1, None))
# nested vmaps
def unsqueeze_0(x):
return torch.unsqueeze(x, 0)
def unsqueeze_last(x):
return torch.unsqueeze(x, -1)
# bdims in canonical order
test(vmap(unsqueeze_0), (torch.rand(B0, B1, 2), ))
test(vmap(unsqueeze_last), (torch.rand(B0, B1, 2),))
# wild bdims
test(vmap(unsqueeze_0), (torch.rand(B1, 2, B0),), in_dims=2)
test(vmap(unsqueeze_0, in_dims=1), (torch.rand(2, B1, B0),), in_dims=2)
test(vmap(unsqueeze_last), (torch.rand(B1, 2, B0),), in_dims=2)
test(vmap(unsqueeze_last, in_dims=1), (torch.rand(2, B1, B0),), in_dims=2)
def test_movedim(self):
op = torch.movedim
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
# movedim(tensor, int, int) variant
test(op, (torch.rand(B0, 2, 5), 0, 1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 5), 0, 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 2, B0, 5), 0, 1), in_dims=(2, None, None))
test(vmap(vmap(op, in_dims=(2, None, None)), in_dims=(0, None, None)),
(torch.rand(B1, 2, B0, 5, B2), 0, 1), in_dims=(2, None, None))
# movedim(tensor, intlist, intlist) variant
test(op, (torch.rand(B0, 2, 3, 5), [1, 0], [0, 2]), in_dims=(0, None, None))
test(op, (torch.rand(2, 3, B0, 5), [1, 0], [0, 2]), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)),
(torch.rand(B1, 2, B0, 5), [0, 1], [1, 0]), in_dims=(2, None, None))
test(vmap(vmap(op, in_dims=(2, None, None)), in_dims=(0, None, None)),
(torch.rand(B1, 2, B0, 5, B2), [0, 1], [1, 0]), in_dims=(2, None, None))
def test_mm(self):
op = torch.mm
test = self._vmap_test
B0, B1 = 7, 11
# shape mismatch
msg = "Shape mismatch"
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(torch.randn(B0, 2, 2, 2), torch.randn(B0, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(0, None))(torch.randn(B0, 2), torch.randn(2, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(torch.randn(2, 2), torch.randn(B0, 2, 2, 2))
# left arg is vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(5, 2)), in_dims=(0, None))
test(vmap(op, in_dims=(0, None)), (torch.rand(B1, B0, 2, 5), torch.rand(5, 2)),
in_dims=(1, None))
# right arg is vmapped
test(op, (torch.rand(2, 5), torch.rand(B0, 5, 2)), in_dims=(None, 0))
test(vmap(op, in_dims=(None, 0)), (torch.rand(2, 5), torch.rand(B1, B0, 5, 2)),
in_dims=(None, 1))
# both args are vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(B0, 5, 2)))
test(vmap(op), (torch.rand(B1, B0, 2, 5), torch.rand(B0, B1, 5, 2)), in_dims=(1, 0))
test(vmap(op, in_dims=(0, None)),
(torch.rand(B1, 2, 5), torch.rand(B0, 5, 2)), in_dims=(None, 0))
def test_mv(self):
op = torch.mv
test = self._vmap_test
B0, B1 = 7, 11
# shape mismatch
msg = ""
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op)(torch.randn(B0, 2, 2, 2), torch.randn(B0, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(0, None))(torch.randn(B0, 2, 2), torch.randn(2, 2))
with self.assertRaisesRegex(RuntimeError, msg):
vmap(op, in_dims=(None, 0))(torch.randn(2, 2), torch.randn(B0, 2, 2))
# left arg is vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(5)), in_dims=(0, None))
test(vmap(op, in_dims=(0, None)), (torch.rand(B1, B0, 2, 5), torch.rand(5)),
in_dims=(1, None))
# right arg is vmapped
test(op, (torch.rand(2, 5), torch.rand(B0, 5)), in_dims=(None, 0))
test(vmap(op, in_dims=(None, 0)), (torch.rand(2, 5), torch.rand(B1, B0, 5)),
in_dims=(None, 1))
# both args are vmapped
test(op, (torch.rand(B0, 2, 5), torch.rand(B0, 5)))
test(vmap(op), (torch.rand(B1, B0, 2, 5), torch.rand(B0, B1, 5)), in_dims=(1, 0))
test(vmap(op, in_dims=(0, None)),
(torch.rand(B1, 2, 5), torch.rand(B0, 5)), in_dims=(None, 0))
def test_narrow(self):
op = torch.narrow
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5), -1, 1, 3), in_dims=(0, None, None, None))
test(op, (torch.rand(2, B0, 5), 1, 1, 3), in_dims=(1, None, None, None))
test(vmap(op, in_dims=(0, None, None, None)),
(torch.rand(B1, 2, B0, 5), 1, 0, 0), in_dims=(2, None, None, None))
test(vmap(vmap(op, in_dims=(2, None, None, None)), in_dims=(0, None, None, None)),
(torch.rand(B1, 2, B0, 5, B2), -1, 2, 3), in_dims=(2, None, None, None))
def test_new_empty(self):
# Empty is non-deterministic so we just check that the shape of the
# output tensor is what we expect and that the vmap fallback isn't used.
op = Tensor.new_empty
B0, B1 = 7, 11
result = vmap(lambda x: op(x, [2, 3]))(torch.randn(B0))
self.assertEqual(result.shape, [B0, 2, 3])
result = vmap(lambda x: op(x, []))(torch.randn(B0))
self.assertEqual(result.shape, [B0])
result = vmap(vmap(lambda x: op(x, [2, 3])))(torch.randn(B0, B1))
self.assertEqual(result.shape, [B0, B1, 2, 3])
def test_new_empty_strided(self):
# Empty is non-deterministic so we just check that the size and shape
# of the output are what we expect and that the vmap fallback isn't used
B0, B1 = 7, 11
def _test_single_vmap(size, stride, B0):
x = torch.randn(B0)
result = vmap(lambda x: x.new_empty_strided(size, stride))(x)
S = torch.empty_strided(size, stride).storage().size()
self.assertEqual(result.shape, [B0] + size)
self.assertEqual(result.stride(), [S] + stride)
def _test_double_vmap(size, stride, B0, B1):
x = torch.randn(B0, B1)
result = vmap(vmap(lambda x: x.new_empty_strided(size, stride)))(x)
S = torch.empty_strided(size, stride).storage().size()
self.assertEqual(result.shape, [B0, B1] + size)
self.assertEqual(result.stride(), [B1 * S, S] + stride)
x = torch.randn(B1, B0)
result = vmap(vmap(lambda x: x.new_empty_strided(size, stride)), in_dims=1)(x)
S = x.new_empty_strided(size, stride).storage().size()
self.assertEqual(result.shape, [B0, B1] + size)
self.assertEqual(result.stride(), [B1 * S, S] + stride)
# contiguous case
_test_single_vmap([2, 3, 5], [3 * 5, 5, 1], B0)
_test_double_vmap([2, 3, 5], [3 * 5, 5, 1], B0, B1)
# expanded
_test_single_vmap([2, 3, 5], [0, 5, 1], B0)
_test_double_vmap([2, 3, 5], [0, 5, 1], B0, B1)
# some of these cases are pretty strange, just verifying that if
# empty_strided allows them then BatchedTensor.new_empty_strided
# can as well
for shape in [[2, 3, 4], [0, 2, 0]]:
for strides in [[12, 4, 1], [2, 4, 6], [0, 0, 0]]:
_test_single_vmap(shape, strides, B0)
_test_double_vmap(shape, strides, B0, B1)
def test_new_zeros(self):
op = Tensor.new_zeros
test = functools.partial(self._vmap_test, check_propagates_grad=False)
B0, B1 = 7, 11
test(lambda x: op(x, 2, 3), (torch.rand(B0),))
test(lambda x: op(x, []), (torch.rand(B0),))
test(vmap(lambda x: op(x, 3, 5)), (torch.rand(B0, B1),))
def test_select(self):
op = torch.select
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5), 0, 0), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 5), 1, 1), in_dims=(1, None, None))
test(vmap(lambda t: op(t, 1, 1)), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(vmap(lambda t: op(t, 1, 1), in_dims=1)), (torch.rand(B1, 2, B0, B2, 5),), in_dims=2)
def test_roll_no_dims(self):
op = torch.roll
test = self._vmap_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5), 2), in_dims=(0, None))
test(op, (torch.rand(2, B0, 5), 3), in_dims=(1, None))
test(vmap(lambda t: op(t, 3)), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(vmap(lambda t: op(t, 3), in_dims=1)), (torch.rand(B1, 2, B0, B2, 5),), in_dims=2)
def test_stack(self):
test = self._vmap_test
B0, B1 = 5, 7
# Quick hack b/c vmap can't accept a list of tensors as an argument
def get_op(dim):
def op(*tensors):
return torch.stack(tensors, dim=dim)
return op
test(get_op(0), (torch.rand(B0, 3), torch.rand(B0, 3)))
test(get_op(0), (torch.rand(3), torch.rand(B0, 3)), in_dims=(None, 0))
test(get_op(0), (torch.rand(2, 17), torch.rand(2, 17, B0)), in_dims=(None, 2))
test(get_op(-1), (torch.rand(2, 17), torch.rand(2, 17, B0)), in_dims=(None, 2))
test(vmap(get_op(0), in_dims=(0, None)),
(torch.rand(B1, 2), torch.rand(B0, 2)), in_dims=(None, 0))
test(vmap(get_op(0), in_dims=(0, 0)),
(torch.rand(B1, 2), torch.rand(B0, B1, 2)), in_dims=(None, 0))
def test_slice(self):
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(lambda t: t[0:1], (torch.rand(B0, 3, 5),))
test(lambda t: t[:, 1:3], (torch.rand(3, 5, B0),), in_dims=2)
test(vmap(lambda t: t[:, 0:1], in_dims=2), (torch.rand(3, 5, B0, B1),), in_dims=2)
test(vmap(vmap(lambda t: t[0:1], in_dims=2), in_dims=2),
(torch.rand(3, 5, B0, B1, B2),), in_dims=2)
def test_squeeze(self):
def verify_behavior(op, min_ndim=1):
test = self._vmap_view_test
B0, B1 = 1, 11
# These tests cannot be used with an operator that requires more
# than 1 dimension after batching.
if min_ndim <= 1:
test(op, (torch.rand(B0),))
test(op, (torch.rand(B1),))
test(vmap(op), (torch.rand(B0, B1, 1),))
test(vmap(op), (torch.rand(B1, 1, B0),), in_dims=2)
test(op, (torch.rand(B0, 3, 5),))
test(op, (torch.rand(1, B0, 5),), in_dims=1)
test(op, (torch.rand(B0, 0, 1, 5, 1),))
test(op, (torch.rand(B0, 1, 1, 1, 1),))
test(vmap(op), (torch.rand(B0, B1, 1, 3, 4),))
test(vmap(op), (torch.rand(B1, 1, B0, 4, 5),), in_dims=2)
verify_behavior(torch.squeeze)
verify_behavior(lambda x: torch.squeeze(x, dim=0), min_ndim=1)
verify_behavior(lambda x: torch.squeeze(x, dim=1), min_ndim=2)
verify_behavior(lambda x: torch.squeeze(x, dim=-1), min_ndim=2)
verify_behavior(lambda x: torch.squeeze(x, dim=-2), min_ndim=3)
msg = ""
try:
torch.squeeze(torch.rand(10), dim=1)
except IndexError as err:
msg = str(err)
with self.assertRaises(RuntimeError, msg=msg):
vmap(lambda x: torch.squeeze(x, dim=1))(torch.rand(10))
def _test_mean_sum_dim(self, op):
test = self._vmap_test
B0, B1 = 5, 7
# Single vmap, various in_dims / out_dims
test(lambda x: op(x, 0), [torch.randn([B0])])
test(lambda x: op(x, -1), [torch.randn([B0])])
test(lambda x: op(x, 0), [torch.randn([B0, 3])])
test(lambda x: op(x, -1), [torch.randn([2, 5, B0, 3])], in_dims=2)
test(lambda x: op(x, 2), [torch.randn([2, 5, B0, 3])], in_dims=2, out_dims=2)
# Doubly nested vmap
test(vmap(lambda x: op(x, 0)), [torch.randn([B0, B1])])
test(vmap(lambda x: op(x, -1)), [torch.randn([B0, B1])])
test(vmap(lambda x: op(x, -2)), [torch.randn([B1, 2, 5, B0, 3])], in_dims=2)
test(vmap(lambda x: op(x, 2), in_dims=2), [torch.randn([2, 5, B0, B1, 3])],
in_dims=2, out_dims=2)
def test_sum_dim(self):
self._test_mean_sum_dim(torch.sum)
def test_mean_dim(self):
self._test_mean_sum_dim(torch.mean)
def test_argmax_dim(self):
def test(f, args):
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(f, args, {}):
self.assertEqual(loop_out, batched_out)
B0 = 5
test(lambda x: torch.argmax(x), [torch.randn(B0)])
test(lambda x: torch.argmax(x), [torch.randn(B0, 2, 3)])
test(lambda x: torch.argmax(x, 0), [torch.randn(B0, 2, 3)])
test(lambda x: torch.argmax(x, -1), [torch.randn(B0, 2, 3)])
test(lambda x: torch.argmax(x, 2), [torch.randn(B0, 2, 3)])
def _test_sum_mean(self, op):
test = self._vmap_test
B0, B1 = 5, 7
# Single vmap, various in_dims / out_dims
test(op, [torch.randn([B0])])
test(op, [torch.randn([B0, 3])])
test(op, [torch.randn([2, 5, B0, 3])], in_dims=2)
test(op, [torch.randn([2, 5, B0, 3])], in_dims=2)
# Doubly nested vmap
test(vmap(op), [torch.randn([B0, B1])])
test(vmap(op), [torch.randn([B1, 2, 5, B0, 3])])
test(vmap(op), [torch.randn([2, 5, B0, B1, 3])], in_dims=2)
def test_sum(self):
self._test_sum_mean(torch.sum)
def test_mean(self):
self._test_sum_mean(torch.mean)
def test_repeat(self):
test = self._vmap_test
B0 = 7
op = Tensor.repeat
test(lambda x: op(x, (2, 3)), (torch.rand(B0, 1, 1),))
test(lambda x: op(x, (2, 3)), (torch.rand(1, B0, 1),), in_dims=1)
def test_slogdet(self):
test = functools.partial(self._vmap_test, check_propagates_grad=False)
B0 = 7
op = torch.linalg.slogdet
test(op, (torch.rand(B0, 1, 1),))
test(op, (torch.rand(B0, 2, 2),))
test(op, (torch.rand(B0, 3, 2, 2),))
test(op, (torch.rand(3, 2, 2, B0),), in_dims=3)
def test_reshape(self):
test = self._vmap_test
B0, B1, B2 = 7, 11, 13
op = torch.reshape
test(op, (torch.rand(B0, 2 * 5), [2, 5]), in_dims=(0, None), check_view=True)
test(op, (torch.rand(2, B0, 5), [1, 1, 10]), in_dims=(1, None), check_view=False)
test(vmap(lambda t: t.reshape([-1])), (torch.rand(B0, B1, 2, 5),), check_view=True)
test(vmap(vmap(lambda t: t.reshape([-1]), in_dims=2), in_dims=1),
(torch.rand(3, B1, 2, B2, 5, B0),), in_dims=5, check_view=False)
def test_reshape_as(self):
test = self._vmap_test
B0, B1, B2 = 7, 11, 13
op = torch.Tensor.reshape_as
test(op, (torch.rand(B0, 2 * 5), torch.rand(B0, 2, 5)), check_view=True)
test(op, (torch.rand(2 * 5), torch.rand(B0, 2, 5)), in_dims=(None, 0), check_view=True)
test(op, (torch.rand(B0, 2 * 5), torch.rand(2, 5)), in_dims=(0, None), check_view=True)
test(op, (torch.rand(2, B0, 5), torch.rand(1, 1, 10)), in_dims=(1, None), check_view=False)
test(vmap(op), (torch.rand(B0, B1, 2, 5), torch.randn(B0, B1, 10)), check_view=True)
test(vmap(vmap(op, in_dims=(2, None)), in_dims=(1, None)),
(torch.rand(3, B1, 2, B2, 5, B0), torch.rand(B0, 3 * 2 * 5)),
in_dims=(5, 0), check_view=False)
def test_result_type(self):
def scalar_tensor_with_dtype(op):
def wrapped(*args, **kwargs):
dtype = op(*args, **kwargs)
return torch.ones([], dtype=dtype)
return wrapped
test = self._vmap_test
op = scalar_tensor_with_dtype(torch.result_type)
B0 = 2
test(op, (torch.randn(B0), torch.randn(B0, dtype=torch.float64)),
check_propagates_grad=False)
test(op, (torch.randn(B0), torch.randint(10, [B0], dtype=torch.int64)),
check_propagates_grad=False)
test(lambda x: op(x, 1), (torch.randn(B0),), check_propagates_grad=False)
test(lambda x: op(x, 1.6), (torch.randn(B0),), check_propagates_grad=False)
test(lambda x: op(x, torch.tensor(1)), (torch.randn(B0),),
check_propagates_grad=False)
test(lambda x: op(x, torch.tensor(1.6, dtype=torch.double)),
(torch.randn(B0),), check_propagates_grad=False)
test(op, (torch.randn(B0, 2), torch.randn(B0, 2, dtype=torch.float64)),
check_propagates_grad=False)
test(op, (torch.randn(B0, 2), torch.randint(10, [B0, 2], dtype=torch.int64)),
check_propagates_grad=False)
test(lambda x: op(x, 1), (torch.randn(B0, 2),), check_propagates_grad=False)
test(lambda x: op(x, 1.6), (torch.randn(B0, 2),), check_propagates_grad=False)
test(lambda x: op(x, torch.tensor(1)), (torch.randn(B0, 2),),
check_propagates_grad=False)
test(lambda x: op(x, torch.tensor(1.6, dtype=torch.double)),
(torch.randn(B0, 2),), check_propagates_grad=False)
test(op, (torch.randn(B0, 2), torch.randn(B0, dtype=torch.float64)),
check_propagates_grad=False)
test(op, (torch.randn(B0, 2), torch.randint(10, [B0], dtype=torch.int64)),
check_propagates_grad=False)
def test_tensor_split(self):
test = self._vmap_view_test
op = torch.tensor_split
B0, B1, B2 = 7, 11, 13
# tests for torch.tensor_split(self, indices_or_sections: int, dim)
test(op, (torch.rand(B0, 2, 1024), 5, -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), 150, 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), 256, 0),
in_dims=(2, None, None))
test(vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
# tests for torch.tensor_split(self, indices_or_sections: List[int], dim)
test(op, (torch.rand(B0, 2, 1024), [50, 100, 378, 890], -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), [50, 100, 212, 345, 0, 378, 890], 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), [50, 100, 212, 345, 0, 378, 890], 0),
in_dims=(2, None, None))
test(vmap(vmap(lambda t: op(t, [4, 8, 9, 34, 29], 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
def test_split(self):
test = self._vmap_view_test
op = torch.split
B0, B1, B2 = 7, 11, 13
# tests for torch.split(self, split_size: int, dim)
test(op, (torch.rand(B0, 2, 1024), 101, -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), 130, 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), 256, 0),
in_dims=(2, None, None))
test(vmap(vmap(lambda t: op(t, 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
# tests for torch.split(self, split_size: List[int], dim)
test(op, (torch.rand(B0, 2, 1024), [1, 1020, 3], -1), in_dims=(0, None, None))
test(op, (torch.rand(2, B0, 1024), [100] * 10 + [24], 1), in_dims=(1, None, None))
test(vmap(op, in_dims=(0, None, None)), (torch.rand(B1, 1023, B0, 5), [256] * 3 + [255], 0),
in_dims=(2, None, None))
test(vmap(vmap(lambda t: op(t, [4] * 8 + [8] * 4, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 64, B2),), in_dims=2)
def test_trace(self):
op = torch.trace
test = self._vmap_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5),))
test(op, (torch.rand(2, B0, 5),), in_dims=1)
test(vmap(op), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(vmap(op, in_dims=2)), (torch.rand(B1, 2, B0, 5, B2),), in_dims=2)
def test_transpose(self):
op = torch.transpose
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(lambda x: op(x, 0, 1), (torch.rand(B0, 2, 5),))
test(lambda x: op(x, -1, -2), (torch.rand(B0, 2, 5),))
test(lambda x: op(x, 3, 1), (torch.rand(B0, 2, 5, 4, 6),))
test(lambda x: op(x, 1, 0), (torch.rand(2, B0, 5),), in_dims=1)
test(vmap(lambda x: op(x, 0, 1)), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(vmap(lambda x: op(x, 0, 1), in_dims=2)),
(torch.rand(B1, 2, B0, 5, B2),), in_dims=2)
# Special case: scalar tensor
for dim1, dim2 in itertools.product([0, -1], [0, -1]):
x = torch.rand(B0)
result = vmap(lambda x: op(x, dim1, dim2))(x)
self.assertTrue(result is x)
def test_t(self):
op = torch.t
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 5),))
test(op, (torch.rand(2, B0, 5),), in_dims=1)
test(vmap(op), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(vmap(op, in_dims=2)), (torch.rand(B1, 2, B0, 5, B2),), in_dims=2)
def test_T_numpy(self):
def op(t):
return t.T
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 3, 5),))
test(op, (torch.rand(B0),))
test(op, (torch.rand(2, B0, 3, 5),), in_dims=1)
test(vmap(op), (torch.rand(B1, 2, B0, 5),), in_dims=2)
test(vmap(op), (torch.rand(B1, 2, B0, 3, 5),), in_dims=2)
test(vmap(vmap(op, in_dims=2)), (torch.rand(B1, 2, B0, 3, B2, 5),), in_dims=2)
def test_to(self):
test = self._vmap_test
B0, B1 = 7, 11
test(lambda t: t.to('cpu'), (torch.rand(B0),))
test(lambda t: t.to(torch.double), (torch.rand(B0),))
test(lambda t, o: t.to(o), (torch.rand(B0), torch.randn(B0, dtype=torch.float64)))
test(lambda t, o: t.to(o),
(torch.rand(B0), torch.randn(B0, dtype=torch.float64)),
in_dims=(0, None))
test(vmap(lambda t: t.to(torch.double)), (torch.rand(B0, B1, 3),))
# also test some casting methods
test(lambda t: t.double(), (torch.rand(B0),))
test(lambda t: t.float(), (torch.rand(B0),))
test(lambda t: t.int(), (torch.rand(B0),), check_propagates_grad=False)
test(lambda t: t.long(), (torch.rand(B0),), check_propagates_grad=False)
def test_unfold(self):
op = torch.Tensor.unfold
test = self._vmap_view_test
B0, B1, B2 = 3, 2, 5
test(op, (torch.rand(B0, 7, 11), 0, 2, 1), in_dims=(0, None, None, None))
test(op, (torch.rand(7, B0, 11), 1, 4, 2), in_dims=(1, None, None, None))
test(vmap(op, in_dims=(0, None, None, None)),
(torch.rand(B1, 7, B0, 11), 1, 5, 1), in_dims=(2, None, None, None))
test(vmap(vmap(op, in_dims=(2, None, None, None)), in_dims=(0, None, None, None)),
(torch.rand(B1, 7, B0, 11, B2), -1, 2, 4), in_dims=(2, None, None, None))
def test_unbind(self):
test = self._vmap_view_test
op = torch.unbind
B0, B1, B2 = 7, 11, 13
test(op, (torch.rand(B0, 2, 1024), -1), in_dims=(0, None))
test(op, (torch.rand(B0, 2, 0),))
test(op, (torch.rand(2, B0, 7), 0), in_dims=(1, None))
test(vmap(op, in_dims=(0, None)), (torch.rand(B1, 1023, B0, 5), 1),
in_dims=(2, None))
test(vmap(vmap(lambda t: op(t, dim=1), in_dims=2)),
(torch.rand(B1, 2, B0, 32, B2),), in_dims=2)
def test_view(self):
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
op = torch.Tensor.view
# We should error out if the view would produce an incorrect result
with self.assertRaises(RuntimeError):
vmap(op, in_dims=(1, None))(torch.rand(2, B0, 5), [10])
test(op, (torch.rand(B0, 2 * 5), [2, 5]), in_dims=(0, None))
test(op, (torch.rand(B0, 4, 5), [1, 2, 1, 10]), in_dims=(0, None))
test(vmap(lambda t: t.view([-1])), (torch.rand(B0, B1, 2, 5, 3),))
test(vmap(vmap(lambda t: t.reshape([-1])), in_dims=1),
(torch.rand(B2, B0, B1, 3, 2, 5),), in_dims=1)
def test_view_as(self):
test = self._vmap_view_test
B0, B1, B2 = 7, 11, 13
op = torch.Tensor.view_as
# We should error out if the view would produce an incorrect result
with self.assertRaises(RuntimeError):
vmap(op, in_dims=(1, 0))(torch.rand(2, B0, 5), torch.rand(B0, 10))
test(op, (torch.rand(B0, 2 * 5), torch.rand(B0, 2, 5)))
test(op, (torch.rand(2 * 5), torch.rand(B0, 2, 5)), in_dims=(None, 0))
test(op, (torch.rand(B0, 2 * 5), torch.rand(2, 5)), in_dims=(0, None))
test(op, (torch.rand(B0, 4, 5), torch.rand(2, 1, 1, 10)), in_dims=(0, None))
test(vmap(op), (torch.rand(B0, B1, 2, 5), torch.randn(B0, B1, 10)))
test(vmap(vmap(op, in_dims=(0, None)), in_dims=(0, None)),
(torch.rand(B1, B2, B0, 3, 2, 5), torch.rand(B0, 3 * 2 * 5)),
in_dims=(2, 0))
def test_conv2d(self):
conv_setups = [
(torch.nn.Conv1d, torch.conv1d, [2, 4, 15]),
(torch.nn.Conv2d, torch.conv2d, [2, 4, 15, 20]),
(torch.nn.Conv3d, torch.conv3d, [2, 4, 15, 20, 25]),
# (torch.nn.ConvTranspose2d, torch.conv_transpose2d, [2, 4, 15, 20])
]
for conv_mod, conv_fn, inp_shape in conv_setups:
mod = conv_mod(4, 8, kernel_size=3)
arg_values = [torch.randn(inp_shape), mod.weight, mod.bias]
kwarg_values = {}
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(conv_fn, arg_values, kwarg_values):
self.assertEqual(loop_out, batched_out)
arg_values = [torch.randn(inp_shape), mod.weight, None]
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(conv_fn, arg_values, kwarg_values):
self.assertEqual(loop_out, batched_out)
mod2 = conv_mod(4, 8, kernel_size=3, groups=2, stride=3, padding=1, dilation=2)
arg_values = [torch.randn(inp_shape), mod2.weight, mod2.bias]
kwarg_values = dict(groups=2, stride=3, padding=1, dilation=2)
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(conv_fn, arg_values, kwarg_values):
self.assertEqual(loop_out, batched_out)
arg_values = [torch.randn(inp_shape), mod2.weight, None]
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(conv_fn, arg_values, kwarg_values):
self.assertEqual(loop_out, batched_out)
def test_one_hot(self):
sample_inputs = [
(torch.randint(0, 3, []), 3),
(torch.randint(0, 3, [2, 3, 4]), 4),
]
for args in sample_inputs:
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(F.one_hot, args, {}):
self.assertEqual(loop_out, batched_out)
def test_conj_bit(self):
x = torch.tensor([1 + 1j, 2 + 1j])
def foo(x):
assert not x.is_conj()
y = x.conj()
assert y.is_conj()
return y
res = vmap(foo)(x)
self.assertEqual(res, x.conj())
def test_mode_key(self):
def vmap_f(x):
return x + torch.randn(())
def naive_f(x, shape):
return x + torch.randn(shape)
torch.manual_seed(0)
out1 = vmap(vmap(vmap_f, randomness='different'), randomness='different')(torch.ones(2, 3))
torch.manual_seed(0)
out2 = naive_f(torch.ones(2, 3), (2, 3))
self.assertEqual(out1, out2)
torch.manual_seed(0)
out1 = vmap(vmap(vmap_f, randomness='different'), randomness='different')(torch.ones(2, 3, 4))
torch.manual_seed(0)
out2 = naive_f(torch.ones(2, 3, 4), (2, 3, 1))
self.assertEqual(out1, out2)
self.assertTrue(torch.randn(()).dim() == 0)
@parametrize('in_dim', [0, 1, 2])
@parametrize('out_dim', [0, 1, 2])
@parametrize('randomness', ['error', 'same'])
def test_chunk_vmap(self, in_dim, out_dim, randomness):
x = torch.randn(4, 5, 6)
def f(x):
y = x.sin()
if randomness != "error":
y = y + torch.rand_like(x)
return y
rs = torch.get_rng_state()
expected = vmap(f, in_dims=in_dim, out_dims=out_dim, randomness=randomness)(x)
for chunks in [1, 2, 3, 4, 7, 10, 16]:
torch.set_rng_state(rs)
output = chunk_vmap(
f, in_dims=in_dim, out_dims=out_dim, randomness=randomness, chunks=chunks
)(x)
self.assertEqual(output, expected)
instantiate_parametrized_tests(TestVmapOperators)
def construct_v(output, batch_size, contig=False):
if contig:
return torch.randn(batch_size, *output.shape,
dtype=output.dtype, device=output.device)
result = torch.randn(*output.shape, batch_size,
dtype=output.dtype, device=output.device)
return result.movedim(-1, 0)
def as_tuple(x):
if isinstance(x, tuple):
return x
elif isinstance(x, list):
return tuple(x)
else:
return x,
def differentiable(args):
return tuple(arg for arg in as_tuple(args)
if isinstance(arg, torch.Tensor) and arg.requires_grad)
def _get_rand_no_zeros(*args, **kwargs):
requires_grad = kwargs.get('requires_grad', False)
kwargs_without_requires_grad = kwargs.copy()
kwargs_without_requires_grad['requires_grad'] = False
result = torch.rand(*args, **kwargs_without_requires_grad)
return result.clamp_min_(0.1).requires_grad_(requires_grad)
class TestVmapBatchedGradient(Namespace.TestVmapBase):
def _vmap_test(self, *args, **kwargs):
return _vmap_test(self, *args, **kwargs)
# Tests batched gradient computation of outputs = op(*args, **kwargs)
# by comparing it to a sequential map+stack fallback.
#
# output_process_fn: a function that maps the outputs to the part
# that should be differentiated.
# batch_size: the batch dim size for the batched grad
def _batched_grad_test(self, op, args, kwargs=None, output_process_fn=lambda x: x, batch_size=3):
if kwargs is None:
kwargs = {}
outputs = op(*args, **kwargs)
outputs = differentiable(output_process_fn(outputs))
for contig in [True, False]:
batched_vectors = tuple(construct_v(out, batch_size, contig)
for out in outputs)
def vector_jacobian_product(*vectors):
return torch.autograd.grad(outputs, differentiable(args), vectors,
retain_graph=True)
self._vmap_test(vector_jacobian_product, batched_vectors,
check_propagates_grad=False)
# Tests batched second grad computation of outputs = op(*args, **kwargs).
# by comparing it to a sequential map+stack fallback.
#
# output_process_fn: a function that maps the outputs to the part
# that should be differentiated.
# batch_size: the batch dim size for the batched grad
#
# NB: we only test computing batched gradients in the second gradient
# computation. One specific use case that does this is computing the hessian
# matrix of a scalar-valued function; this is useful in Bayesian Logistic
# Regression.
# It might be useful to have a test that computes batched first gradients and
# then uses those to compute batched second gradients in the future.
def _batched_grad_grad_test(self, op, args, kwargs=None, output_process_fn=lambda x: x, batch_size=3):
if kwargs is None:
kwargs = {}
outputs = op(*args, **kwargs)
outputs = differentiable(output_process_fn(outputs))
ones = tuple(torch.ones_like(out) for out in outputs)
# Same thing as summing together all of the outputs and calling .backward()
first_grads = torch.autograd.grad(outputs, differentiable(args), ones,
create_graph=True)
first_grads = differentiable(first_grads)
self.assertNotEqual(
len(first_grads), 0, "None of the first grads depend on the input!")
for contig in [True, False]:
batched_vectors = tuple(construct_v(grad, batch_size, contig)
for grad in first_grads)
def vector_hessian_product(*vectors):
outputs = torch.autograd.grad(first_grads, differentiable(args), vectors,
retain_graph=True, allow_unused=True)
outputs = tuple(out for out in outputs if out is not None)
assert len(outputs) > 0
return outputs
self._vmap_test(vector_hessian_product, batched_vectors,
check_propagates_grad=False)
def _test_arithmetic(self, op, device, test_grad_grad=True):
x = torch.randn(2, 3, requires_grad=True, device=device)
y = _get_rand_no_zeros(2, 3, device=device, requires_grad=True)
scalar = 3.14
self._batched_grad_test(op, (x, y))
self._batched_grad_test(op, (scalar, y))
self._batched_grad_test(op, (x, scalar))
if test_grad_grad:
self._batched_grad_grad_test(op, (x, y))
def test_add(self, device):
self._test_arithmetic(torch.add, device, test_grad_grad=False)
self._test_arithmetic(lambda x, y: x + y, device, test_grad_grad=False)
def test_sub(self, device):
self._test_arithmetic(torch.sub, device, test_grad_grad=False)
self._test_arithmetic(lambda x, y: x - y, device, test_grad_grad=False)
def test_mul(self, device):
self._test_arithmetic(torch.mul, device)
self._test_arithmetic(lambda x, y: x * y, device)
def test_div(self, device):
self._test_arithmetic(torch.div, device)
self._test_arithmetic(lambda x, y: x / y, device)
def test_binary_cross_entropy(self, device):
x = F.sigmoid(torch.randn(3, 2, device=device, requires_grad=True))
target = torch.rand(3, 2, device=device)
op = functools.partial(F.binary_cross_entropy, target=target)
self._batched_grad_test(op, (x,), {})
self._batched_grad_grad_test(op, (x,), {})
def test_log_softmax(self, device):
op = functools.partial(torch.log_softmax, dim=-1)
x = torch.randn(3, 2, device=device, requires_grad=True)
self._batched_grad_test(op, (x,), {})
self._batched_grad_grad_test(op, (x,), {})
def test_expand(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
def op(x):
return x.expand(5, 5, 2, 3)
self._batched_grad_test(op, (x,))
@allowVmapFallbackUsage
def test_index(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
index = torch.tensor([[0, 0], [1, 1]], device=device)
def op(x):
y = x * x
return y[index]
self._batched_grad_test(op, (x,))
self._batched_grad_grad_test(op, (x,))
def test_lgamma(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(Tensor.lgamma, (x,))
self._batched_grad_grad_test(Tensor.lgamma, (x,))
def test_log(self, device):
x = _get_rand_no_zeros(2, 3, device=device, requires_grad=True)
self._batched_grad_test(torch.log, (x,))
self._batched_grad_grad_test(torch.log, (x,))
def test_logsumexp(self, device):
x = _get_rand_no_zeros(2, 3, device=device, requires_grad=True)
def op(x):
return torch.logsumexp(x, -1)
self._batched_grad_test(op, (x,))
self._batched_grad_grad_test(op, (x,))
def test_log1p(self, device):
x = _get_rand_no_zeros(2, 3, device=device, requires_grad=True)
self._batched_grad_test(torch.log1p, (x,))
self._batched_grad_grad_test(torch.log1p, (x,))
@allowVmapFallbackUsage
def test_max(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(torch.max, (x,))
@allowVmapFallbackUsage
def test_median(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(torch.median, (x,))
@allowVmapFallbackUsage
def test_min(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(torch.min, (x,))
def test_permute(self, device):
x = torch.randn(2, 3, 5, requires_grad=True, device=device)
def op(x):
return x.permute(2, 0, 1)
self._batched_grad_test(op, (x,))
def test_reshape(self, device):
x = torch.randn(2, 3, 5, requires_grad=True, device=device)
def op(x):
return x.reshape([2 * 3, 5])
self._batched_grad_test(op, (x,))
def test_sigmoid(self, device):
x = torch.randn(2, 3, requires_grad=True, device=device)
self._batched_grad_test(Tensor.sigmoid, (x,))
self._batched_grad_grad_test(Tensor.sigmoid, (x,))
def test_stack(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
y = torch.randn(2, 3, device=device, requires_grad=True)
def op(x, y):
return torch.stack([x, y])
self._batched_grad_test(op, (x, y))
def test_select(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
self._batched_grad_test(lambda x: x[1], (x,))
self._batched_grad_test(lambda x: x.select(1, 2), (x,))
self._batched_grad_test(lambda x: x.select(-1, 0), (x,))
def test_slice(self, device):
x = torch.randn(2, 3, 5, device=device, requires_grad=True)
self._batched_grad_test(lambda x: x[0:1], (x,))
self._batched_grad_test(lambda x: x[:, 1:3], (x,))
self._batched_grad_test(lambda x: x[..., 1:3], (x,))
def test_trace(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
self._batched_grad_test(Tensor.trace, (x,))
x = torch.randn(3, 2, 2, device=device)
def sum_grad_trace(x):
return grad(torch.trace)(x).sum()
output = vmap(grad(sum_grad_trace))(x)
self.assertEqual(output, torch.zeros_like(output))
def test_where(self, device):
x = torch.randn(3, 2, device=device)
y = torch.ones(3, 2, device=device)
def f(x, y):
return torch.where(x > 0, x, y)
# Check that there is no runtime error, exactness tests are done with opinfo
vmap(f)(x, y)
x = torch.randint(0, 2, size=(4, 3), dtype=torch.float)
def f(t):
return torch.where(t)
with self.assertRaisesRegex(RuntimeError, r"Attempted to vmap over aten::where"):
vmap(f)(x)
@skipCUDAIfNoMagma
@allowVmapFallbackUsage
def test_symeig(self, device):
def op(x):
return torch.symeig(x, eigenvectors=True)[0]
x = torch.randn(3, 3, device=device, requires_grad=True)
self._batched_grad_test(op, (x,), {})
self._batched_grad_grad_test(op, (x,), {})
def test_threshold(self, device):
x = torch.randn(2, 3, device=device, requires_grad=True)
self._batched_grad_test(lambda x: F.threshold(x, 0.5, 0.0), (x,))
@allowVmapFallbackUsage
def test_inplace_view(self, device):
leaf = torch.randn(4, 5, requires_grad=True)
def func(leaf):
# Make sure the function is non-trivially twice differentiable
base = leaf * leaf
view = base[0]
view.cos_()
return view
self._batched_grad_test(func, (leaf,), {})
self._batched_grad_grad_test(func, (leaf,), {})
@allowVmapFallbackUsage
def test_inplace_manyview(self, device):
leaf = torch.randn(4, 4, 5, requires_grad=True)
def func(leaf):
# Make sure the function is non-trivially twice differentiable
base = leaf * leaf
view = base.transpose(0, 2)
view = view[1]
view = view.diagonal()
view = view[::2]
view.cos_()
return view
self._batched_grad_test(func, (leaf,), {})
self._batched_grad_grad_test(func, (leaf,), {})
def test_diagonal(self, device):
x = torch.randn(4, 5, device=device, requires_grad=True)
self._batched_grad_test(lambda x: x.diagonal(1, 0, 1), (x,))
x = torch.randn(3, 4, 5, device=device, requires_grad=True)
self._batched_grad_test(lambda x: x.diagonal(0, -1, -2), (x,))
@allowVmapFallbackUsage
def test_unrelated_output(self, device):
B0 = 3
x = torch.randn([], requires_grad=True)
y = torch.randn([], requires_grad=True)
gy = torch.randn(B0, requires_grad=True)
def vjp(v):
res, = torch.autograd.grad(y, x, v, allow_unused=True)
return torch.zeros_like(x) if res is None else res
result = vmap(vjp)(gy)
self.assertEqual(result, torch.zeros(B0, *x.shape, device=device))
@allowVmapFallbackUsage
def test_unrelated_output_multiple_grad(self, device):
B0 = 3
x = torch.randn([], requires_grad=True)
y = torch.randn([], requires_grad=True)
gy = torch.randn(B0, requires_grad=True)
def vjp(v):
res, = torch.autograd.grad(y, x, v, allow_unused=True)
return torch.zeros_like(x) if res is None else res
_ = vjp(gy[0])
result = vmap(vjp)(gy)
self.assertEqual(result, torch.zeros(B0, *x.shape, device=device))
def discover_variants(opinfo):
aliases = []
inplace_variants = []
if opinfo.inplace_variant:
inplace_variants.append(opinfo.inplace_variant)
aliases.append(opinfo.op)
for alias in opinfo.aliases:
aliases.append(alias.op)
if alias.inplace_variant:
inplace_variants.append(alias.inplace_variant)
return aliases, inplace_variants
class TestVmapOperatorsOpInfo(TestCase):
def vmap_outplace_test(self, func, args, kwargs, in_dims, check_shape_only=False):
for loop_out, vmap_out in compute_quantities_for_vmap_test(func, args, kwargs, in_dims):
if check_shape_only:
self.assertEqual(vmap_out.shape, loop_out.shape)
continue
self.assertEqual(vmap_out, loop_out)
def vmap_inplace_test(self, func, args, kwargs, in_dims):
# NB: This test assumes that the first argument is being modified.
# This is OK because it's what every other OpInfo-based test assumes,
# but it is going to need a more robust solution eventually.
if in_dims[0] is None:
# Check that we correctly raise an error when vmap is impossible
# on the in-place operation
with self.assertRaises(RuntimeError):
for _ in compute_quantities_for_vmap_test(
func, args, kwargs, in_dims, compute_loop_out=False, clone_inputs=True):
pass
return
for loop_out, vmap_out in compute_quantities_for_vmap_test(
func, args, kwargs, in_dims, clone_inputs=True):
self.assertEqual(vmap_out, loop_out)
def opinfo_vmap_test(self, device, dtype, op, check_has_batch_rule, skip_inplace=()):
def test():
# Error inputs check
if op.error_inputs_func is not None:
error_inputs = op.error_inputs(device)
for error_input in error_inputs:
sample_input = error_input.sample_input
args = (sample_input.input,) + tuple(sample_input.args)
kwargs = sample_input.kwargs
for args, in_dims, _ in generate_vmap_inputs(args, {}):
with self.assertRaises(Exception):
vmap(op, in_dims)(*args, **kwargs)
# Sample inputs check
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
aliases, inplace_aliases = discover_variants(op)
check_shape_only = op.name in ('empty_like', 'new_empty')
for sample_input in sample_inputs_itr:
args = (sample_input.input,) + sample_input.args
kwargs = sample_input.kwargs
is_batch_norm_and_training = is_batch_norm_training(op.name, kwargs)
for args, in_dims, _ in generate_vmap_inputs(
args, {}, is_batch_norm_and_training=is_batch_norm_and_training):
for func in aliases:
self.vmap_outplace_test(func, args, kwargs, in_dims, check_shape_only)
if op.name in skip_inplace:
continue
if not is_valid_inplace_sample_input(sample_input, op, op.inplace_variant):
continue
for func in inplace_aliases:
self.vmap_inplace_test(func, args, kwargs, in_dims)
if check_has_batch_rule:
check_vmap_fallback(self, test, op)
else:
test()
vmap_fail = {
# These are things that we either cannot fix or are not actually problems
xfail('resize_'),
xfail('resize_as_'),
xfail('to_sparse'),
xfail('__getitem__'), # dynamic mask
xfail('index_put'), # dynamic mask
xfail('nn.functional.dropout'), # works, can't check against for loop because of randomness inconsistency
xfail('masked_select'), # dynamic op
xfail('nonzero'), # dynamic op
xfail('allclose'), # returns a boolean
xfail('rand_like'), # randomness is tested separately
xfail('randint_like'), # randomness is tested separately
xfail('randn_like'), # randomness is tested separately
xfail('bernoulli', ''), # randomness is tested separately
xfail('normal', ''), # randomness is tested separately
xfail('normal', 'number_mean'), # randomness is tested separately
xfail('multinomial', ''), # randomness
xfail('nn.functional.embedding', ''), # we only support some cases
xfail('nn.functional.rrelu'), # randomness
xfail('nn.functional.dropout2d', ''), # randomness
xfail('nn.functional.feature_alpha_dropout', 'with_train'), # randomness
xfail('as_strided'), # as_strided is too crazy
xfail('nn.functional.fractional_max_pool3d'), # randomness
xfail('nn.functional.fractional_max_pool2d'), # randomness
# entries in here don't work and need to be fixed.
# Each one of these is a bug
xfail('view_as_complex'),
xfail('tensor_split'),
xfail('svd', device_type='cuda'),
xfail('linalg.svd', device_type='cuda'),
xfail('histogramdd'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('nn.functional.embedding_bag'),
xfail('__rpow__'), # https://github.com/pytorch/functorch/issues/617
xfail('column_stack', ''),
xfail('pca_lowrank', ''),
xfail('svd_lowrank', ''),
skip('linalg.eigh', ''), # Flaky but is likely a real problem
# required rank 4 tensor to use channels_last format
xfail('bfloat16'),
xfail('bool'),
xfail('byte'),
xfail('char'),
xfail('double'),
xfail('float'),
xfail('half'),
xfail('int'),
xfail('long'),
xfail('short'),
xfail('linspace', ''),
xfail('nn.functional.dropout3d', ''),
xfail('broadcast_shapes', ''),
xfail('clamp_min', ''),
xfail('sparse.sampled_addmm'),
xfail('jiterator_binary', device_type='cuda'),
xfail('arange', ''),
xfail('clamp_max', ''),
xfail('jiterator_binary_return_by_ref', device_type='cuda'),
xfail('jiterator_4inputs_with_extra_args', device_type='cuda'),
xfail('equal', ''),
xfail('jiterator_unary', device_type='cuda'),
xfail('logspace', ''),
xfail('jiterator_2inputs_2outputs', device_type='cuda'),
xfail('empty', ''),
}
@ops(op_db + additional_op_db, allowed_dtypes=(torch.float,))
@opsToleranceOverride('TestVmapOperatorsOpInfo', 'test_vmap_exhaustive', (
tol1('linalg.det',
{torch.float32: tol(atol=1e-04, rtol=1e-04)}, device_type='cuda'),
# The following is often flaky, but just on windows.
# We should investigate if it's actually a problem or not.
tol1('nn.functional.conv_transpose3d',
{torch.float32: tol(atol=1e-04, rtol=1e-02)}, device_type='cuda'),
))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@skipOps('TestVmapOperatorsOpInfo', 'test_vmap_exhaustive', vmap_fail)
def test_vmap_exhaustive(self, device, dtype, op):
# needs to be fixed
inplace_failure_list = (
)
self.opinfo_vmap_test(device, dtype, op, check_has_batch_rule=False,
skip_inplace=inplace_failure_list)
@ops(op_db + additional_op_db, allowed_dtypes=(torch.float,))
@opsToleranceOverride('TestVmapOperatorsOpInfo', 'test_op_has_batch_rule', (
tol1('linalg.det',
{torch.float32: tol(atol=1e-04, rtol=1e-04)}, device_type='cuda'),
))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@skipOps('TestVmapOperatorsOpInfo', 'test_op_has_batch_rule', vmap_fail.union({
xfail('complex'),
xfail('copysign'),
xfail('eig'),
xfail('histogram'),
xfail('index_fill'),
xfail('nansum'),
xfail('nanmean'),
# `index_put` OpInfo in pytorch/pytorch has
# masked index as input which is not supported
xfail('index_put', ''),
xfail('isin'),
xfail('linalg.lstsq'),
xfail('linalg.lstsq', 'grad_oriented'),
xfail('linalg.matrix_rank'),
xfail('linalg.matrix_rank', 'hermitian'),
xfail('linalg.pinv'),
xfail('linalg.pinv', 'hermitian'),
xfail('linalg.solve'),
xfail('lu_solve'),
xfail('lu_unpack'),
xfail('masked_fill'),
xfail('masked_scatter'),
xfail('masked_select'),
xfail('nanquantile'),
xfail('ormqr'),
xfail('put'),
xfail('quantile'),
xfail('renorm'),
xfail('resize_as_'),
xfail('take'),
xfail('tensor_split'),
xfail('to_sparse'),
xfail('vdot'),
xfail('__getitem__', ''),
xfail('all'),
xfail('any'),
xfail('count_nonzero'),
xfail('nanmean'),
xfail('nn.functional.dropout'), # works, can't check against for loop because of randomness inconsistency
xfail('resize_'),
xfail('view_as_complex'),
xfail('matrix_exp'),
xfail('bucketize'),
xfail('fft.ihfft2'),
xfail('fft.ihfftn'),
xfail('allclose'),
xfail('argwhere'),
xfail('linalg.cross'),
xfail('unique_consecutive'),
xfail('unique'),
xfail('nn.functional.ctc_loss'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('nn.functional.huber_loss'),
# We can get this to work on CUDA through decomposition,
# but fails on CPU due to max_pool1d_cpu not having a derivative
xfail('nn.functional.max_pool1d'),
xfail('nn.functional.max_pool3d'),
xfail('histc'),
xfail('as_strided'),
xfail('istft'),
xfail('nonzero'),
xfail('nn.functional.fractional_max_pool2d'),
xfail('stft'),
xfail('isclose'),
xfail('nn.functional.fractional_max_pool3d'),
xfail('nn.functional.bilinear'),
xfail('nn.functional.embedding_bag'),
xfail('linalg.tensorsolve'),
xfail('bernoulli', ''),
xfail('linalg.lu_factor', ''),
xfail('nn.functional.feature_alpha_dropout', 'with_train'),
xfail('nn.functional.kl_div', ''),
xfail('multinomial', ''),
xfail('column_stack', ''),
xfail('pca_lowrank', ''),
xfail('normal', ''),
xfail('nn.functional.dropout2d', ''),
xfail('normal', 'number_mean'),
xfail('svd_lowrank', ''),
xfail('diagflat', ''),
xfail('special.log_ndtr'),
xfail('nn.functional.triplet_margin_loss', ''),
xfail('nn.functional.pdist', ''),
xfail('scatter_reduce', 'sum'),
xfail('nn.functional.smooth_l1_loss', ''),
xfail('scatter_reduce', 'amax'),
xfail('nn.functional.max_unpool1d', 'grad'),
xfail('nn.functional.multi_margin_loss', ''),
xfail('scatter_reduce', 'prod'),
xfail('nn.functional.multilabel_margin_loss', ''),
xfail('scatter_reduce', 'amin'),
xfail('nn.functional.max_unpool3d', 'grad'),
xfail('nn.functional.max_unpool2d', ''),
xfail('nn.functional.max_unpool2d', 'grad'),
xfail('nn.functional.margin_ranking_loss', ''),
xfail('nn.functional.max_unpool1d', ''),
xfail('nn.functional.soft_margin_loss', ''),
xfail('scatter_reduce', 'mean'),
xfail('nn.functional.max_unpool3d', ''),
xfail('linalg.ldl_solve', '', device_type='cpu'),
xfail('chalf', ''),
xfail('arange', ''),
xfail('clamp_max', ''),
xfail('jiterator_binary_return_by_ref', device_type='cuda'),
xfail('special.spherical_bessel_j0'),
xfail('jiterator_unary', device_type='cuda'),
xfail('jiterator_2inputs_2outputs', device_type='cuda'),
xfail('special.airy_ai'),
xfail('clamp_min', ''),
xfail('special.bessel_j0'),
xfail('sparse.sampled_addmm'),
xfail('special.bessel_y0'),
xfail('special.chebyshev_polynomial_u'),
xfail('special.modified_bessel_k1'),
xfail('segment_reduce', 'offsets'),
xfail('linalg.solve_ex', ''),
xfail('special.bessel_j1'),
xfail('logspace', ''),
xfail('empty', ''),
xfail('index_reduce', ''),
xfail('linspace', ''),
xfail('special.laguerre_polynomial_l'),
xfail('special.hermite_polynomial_h'),
xfail('jiterator_binary', device_type='cuda'),
xfail('special.modified_bessel_i0'),
xfail('jiterator_4inputs_with_extra_args', device_type='cuda'),
xfail('linalg.vander', ''),
xfail('segment_reduce', 'lengths'),
xfail('linalg.lu_solve', ''),
xfail('special.bessel_y1'),
xfail('special.hermite_polynomial_he'),
xfail('special.scaled_modified_bessel_k0'),
xfail('nn.functional.dropout3d', ''),
xfail('special.scaled_modified_bessel_k1'),
xfail('broadcast_shapes', ''),
xfail('special.modified_bessel_k0'),
xfail('linalg.vecdot', ''),
xfail('linalg.ldl_factor', ''),
xfail('special.modified_bessel_i1'),
xfail('special.chebyshev_polynomial_t'),
xfail('as_strided_scatter', ''),
xfail('equal', ''),
xfail('linalg.lu', ''),
skip('linalg.ldl_solve', ''),
}))
def test_op_has_batch_rule(self, device, dtype, op):
# needs to be fixed
inplace_failures = (
'abs',
'acos',
'acosh',
'addbmm',
'addcdiv',
'addcmul',
'addmm',
'addmv',
'addr',
'asin',
'asinh',
'atan2',
'atan',
'atanh',
'baddbmm',
'clamp',
'conj_physical',
'cumprod',
'cumsum',
'div',
'div',
'floor_divide',
'fmod',
'heaviside',
'hypot',
'igamma',
'igammac',
'index_add',
'index_copy',
'ldexp',
'lerp',
'neg',
'nextafter',
'polygamma',
'pow',
'remainder',
'scatter_add',
'scatter',
'square',
'sub',
'tril',
'triu',
'trunc',
'xlogy',
)
self.opinfo_vmap_test(device, dtype, op, check_has_batch_rule=True,
skip_inplace=inplace_failures)
def test_conv_double_backward(self, device):
images = torch.randn(2, 1, 5, 5, device=device)
weight = torch.randn(2, 1, 2, 2, device=device)
bias = torch.randn(2, device=device)
ggI = torch.randn_like(images)
ggW = torch.randn_like(weight)
ggb = torch.randn_like(bias)
stride = (1, 1)
padding = (0, 0)
dilation = (1, 1)
transposed = False
output_padding = (0, 0)
groups = 1
output_mask = (True, True, True)
gO = torch.randn_like(F.conv2d(images, weight, bias, stride, padding, dilation, groups))
args = (
ggI, ggW, ggb, gO, weight, images, stride, padding, dilation,
transposed, output_padding, groups, output_mask,
)
op = torch.ops.aten._convolution_double_backward
generator = get_fallback_and_vmap_exhaustive(op, args, {})
def test():
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out, atol=1e-4, rtol=1e-4)
check_vmap_fallback(self, test, op)
def test_isnan(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = torch.isnan
x = torch.randn(B, N, C, H, W)
x[x > 0] = float('nan')
test(self, op, (x,), in_dims=(0))
def test_isinf(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = torch.isinf
x = torch.randn(B, N, C, H, W)
x[x > 0] = float('inf')
test(self, op, (x,), in_dims=(0))
def test_foo_like(self, device):
# vfdev-5: Probably, we can remove this line. Flake8 reported as unused
# test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
for op in [torch.ones_like, torch.zeros_like]:
x = torch.randn(B, N, C, H, W)
# todo(chilli): test these better
# Not testing correctness, just that they run
vmap(op, in_dims=(0,))(x,)
def test_flatten(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
op = torch.flatten
x = torch.randn(2, 3, 4, 5)
test(self, op, (x, 1, 2), in_dims=(0, None, None))
def test_group_norm(self, device):
test = functools.partial(_vmap_test, check_propagates_grad=False)
B, N, C, H, W = 2, 3, 24, 5, 7
op = F.group_norm
x = torch.randn(B, N, C, H, W)
weight = torch.randn(C)
bias = torch.randn(C)
test(self, op, (x, 3, weight, bias), in_dims=(0, None, None, None))
x = torch.randn(B, N, C, H, W)
weight = torch.randn(B, C)
bias = torch.randn(B, C)
test(self, op, (x, 4, weight, bias), in_dims=(0, None, 0, 0))
def test_index_put(self, device):
def test(f, t, idx, values):
base = f(t[0], idx[0], values[0])
self.assertEqual(vmap(f, in_dims=(0, 0, 0))(t, idx, values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, None, None))(t, idx[0], values[0])[0], base)
self.assertEqual(vmap(f, in_dims=(0, None, 0))(t, idx[0], values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, 0, None))(t, idx, values[0])[0], base)
def f(x, y, z):
x[y] = z
return x
x = torch.randn(3, 4, 5, device=device)
y = torch.zeros((3, 2), device=device).long()
z = torch.randn(3, 2, 5, device=device)
test(f, x, y, z)
# indexing innermost dim
def f(t, idx, values):
t[:, idx] = values
return t
t = torch.zeros((3, 2, 3))
values = torch.ones((3, 1, 2))
idx = torch.tensor([[1, 2]]).expand((3, 2))
test(f, t, idx, values)
# indexing middle dim
def f(t, idx, values):
t[:, idx, :] = values
return t
t = torch.zeros((3, 2, 3, 3))
values = torch.ones((3, 1, 2, 3))
idx = torch.tensor([[0, 2]]).expand((3, 2))
test(f, t, idx, values)
# indexing with slices
def f(t, values):
t[:, :2, :] = values
return t
base = f(t[0], values[0])
self.assertEqual(vmap(f, in_dims=(0, 0))(t, values)[0], base)
self.assertEqual(vmap(f, in_dims=(0, None))(t, values[0])[0], base)
# index_put_
tensor = torch.zeros(3, 3, 4)
value = torch.ones(3, 2)
idxs = (torch.tensor([[0], [1], [2]]), torch.tensor([[0]]), torch.tensor([1, 2]))
expected = torch.index_put_(tensor.clone(), idxs, value)
def f(t, idx, v):
torch.index_put_(t, idx, v)
return t
self.assertEqual(vmap(f, in_dims=(0, (None, None), 0))(tensor, idxs[1:], value), expected)
self.assertEqual(vmap(f, in_dims=(0, (None, None), None))(tensor, idxs[1:], value[0]), expected)
@parametrize('training', [True, False])
@parametrize('track_running_stats', [True, False])
@parametrize('affine', [True, False])
def test_batch_norm(self, device, affine, track_running_stats, training):
if not track_running_stats and not training:
return
test = functools.partial(_vmap_test, check_propagates_grad=False)
BN = torch.nn.BatchNorm2d
ensemble_size = 10
hidden_dim = 3
weights, buffers, _, _, _ = \
functional_init_with_buffers(BN, [ensemble_size])(
hidden_dim, affine=affine, track_running_stats=track_running_stats)
inputs = [torch.randn(ensemble_size, 32, hidden_dim, 16, 16, device=device)]
in_dims = [0]
def append(inp, in_dim):
inputs.append(inp)
in_dims.append(in_dim)
if track_running_stats:
running_mean, running_var, _ = buffers
append(running_mean.to(device), 0)
append(running_var.to(device), 0)
else:
append(None, None)
append(None, None)
if affine:
weight, bias = weights
append(weight.to(device), 0)
append(bias.to(device), 0)
else:
append(None, None)
append(None, None)
append(training, None)
def op(inp, running_mean, running_var, weight, bias, training):
res = F.batch_norm(inp, running_mean, running_var, weight, bias, training)
if track_running_stats:
return res, running_mean, running_var
return res
test(self, op, tuple(inputs), in_dims=tuple(in_dims))
def test_torch_return_types_returns(self, device):
t = torch.randn(3, 2, 2, device=device)
self.assertTrue(isinstance(vmap(torch.min, (0, None))(t, 0), torch.return_types.min))
self.assertTrue(isinstance(vmap(torch.max, (0, None))(t, 0), torch.return_types.max))
self.assertTrue(isinstance(vmap(torch.topk, (0, None, None))(t, 1, 0), torch.return_types.topk))
self.assertTrue(isinstance(vmap(torch.linalg.eig, (0))(t), torch.return_types.linalg_eig))
def test_namedtuple_returns(self, device):
Point = namedtuple('Point', ['x', 'y'])
def f(x, y):
return Point(x=x, y=y)
x = torch.randn(2, 5, device=device)
y = torch.randn(2, 3, device=device)
self.assertTrue(isinstance(vmap(f)(x, y), Point))
def test_inplace_on_view(self, device):
def func(leaf):
base = leaf * leaf
view = base.transpose(0, 1)
view[2:4, 2:4] *= 2
view[0:2, 0:2].diagonal().sin_()
view = view[1:3, 1:3]
view.cos_()
return view
def push_vjp(leaf, gout):
_, vjp_fn = vjp(func, leaf)
result, = vjp_fn(gout)
return result
leaf = torch.randn(4, 4, device=device)
gout = torch.randn(2, 2, device=device)
args = (leaf, gout)
for args, in_dims, _, in generate_vmap_inputs(args, {}):
if in_dims[1] is None:
# triggers some composite compliance problem
continue
self.vmap_outplace_test(push_vjp, args, {}, in_dims)
def test_advanced_indexing(self, device):
def test(f, args):
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(f, args, {}):
self.assertEqual(loop_out, batched_out)
def f(x, idx):
return x[:, idx]
def f2(x, idx):
return x[idx, :]
def f3(x, idx):
return x[:, :, idx]
inps = (torch.randn(5, 5, 5, device=device),
torch.randn(5, 5, 5, 5, device=device),
torch.randn(5, 5, 5, 5, 5, device=device))
idxes = (torch.tensor([0, 1, 2], device=device),
torch.tensor([0, 1, 2], device=device).reshape(3, 1),
torch.tensor([0, 1, 2], device=device).reshape(3, 1, 1))
for (inp, idx) in itertools.product(inps, idxes):
test(f, (inp, idx))
test(f2, (inp, idx))
test(f3, (inp, idx))
def test_nested_advanced_indexing(self, device):
e = torch.rand(7, 4, device=device)
idx = torch.tensor([0, 1], device=device).view(2, 1)
# simple reference implementation for comparison
def _fake_vmap(f, in_dims=0, out_dims=0):
def w(input):
r = [f(input.select(in_dims, i)) for i in range(input.size(in_dims))]
return torch.stack(r, out_dims)
return w
def with_vmap(_vmap):
def g(idx_):
def f(e_):
return e_[idx_]
return _vmap(f, in_dims=1)(e)
r = _vmap(g)(idx)
return r
a = with_vmap(vmap)
b = with_vmap(_fake_vmap)
self.assertEqual(a, b)
@ops(filter(lambda op: "linalg" in op.name, op_db + additional_op_db), allowed_dtypes=(torch.float,))
@skipOps('TestVmapOperatorsOpInfo', 'test_vmap_linalg_failure_1D_input', {
xfail('linalg.vector_norm'), # can accept vector inputs
skip('linalg.multi_dot'), # accepts list of tensor inputs, has its own special test
xfail('linalg.vander'),
xfail('linalg.vecdot'),
skip('linalg.ldl_solve', ''),
})
def test_vmap_linalg_failure_1D_input(self, device, dtype, op):
for sample in op.sample_inputs(device, dtype, requires_grad=False):
if sample.input.dim() != 2 or sample.input.shape[0] == 0:
continue
test_input = sample.input[0] # using the sample input avoids numerical inconsistency issues
with self.assertRaisesRegex(RuntimeError, "dimension"):
op(test_input, *sample.args, **sample.kwargs)
def op_wrapper(inp):
return op(inp, *sample.args, **sample.kwargs)
# square inputs are more likely to pass linalg checks
test_input = test_input.expand(test_input.shape[0], test_input.shape[0])
with self.assertRaisesRegex(RuntimeError, "dimension"):
return vmap(op_wrapper)(test_input)
def test_vmap_multi_dot_failure_1D_input(self):
# special exception for first and last tensors so making giving 3 items avoids special cases
inputs = (torch.randn(3, 3), torch.randn(3), torch.randn(3, 3))
with self.assertRaisesRegex(RuntimeError, "tensor 1 must be 2D but got 1D"):
torch.linalg.multi_dot(inputs)
# square inputs are more likely to pass linalg checks
inputs = tuple(i.expand(i.shape[0], i.shape[0]) for i in inputs)
with self.assertRaisesRegex(RuntimeError, "tensor 1 must be 2D but got 1D"):
return vmap(torch.linalg.multi_dot)(inputs)
class TestRandomness(TestCase):
def _reset_random(self, generator, orig_state, use_generator, seed):
return generator.set_state(orig_state) if use_generator else torch.manual_seed(seed)
def _get_image(self, batched_input, batch_size, device):
if batched_input == "first":
return torch.ones([batch_size, 3, 3, 14, 14], device=device)
if batched_input == "last":
return torch.ones([3, 3, 14, 14, batch_size], device=device)
assert batched_input == "none"
return torch.ones([3, 3, 14, 14], device=device)
def _assert_all_slices_equal(self, tensor):
expected = tensor[0]
self.assertTrue((tensor == expected).all())
def _assert_all_slices_unique(self, tensor):
B0 = tensor.shape[0]
slices_equal = vmap(vmap(lambda x, y: (x == y).all(), (0, None)), (None, 0))(tensor, tensor)
assert slices_equal.shape == (B0, B0)
slices_equal.diagonal().zero_()
self.assertEqual(slices_equal, torch.zeros_like(slices_equal))
def _assert_throws_in_error_mode(self, fn, args, in_dims):
with self.assertRaisesRegex(RuntimeError, r"called random operation while in randomness error mode"):
vmap(fn, in_dims=in_dims, randomness="error")(*args)
def _assert_throws_in_different_mode_inplace(self, fn, args, in_dims):
with self.assertRaisesRegex(RuntimeError, r"different inplace randomness on an unbatched tensor"):
vmap(fn, in_dims=in_dims, randomness="different")(*args)
def _assert_throws_in_same_mode_batched(self, fn, args, in_dims):
with self.assertRaisesRegex(RuntimeError,
r"Vmap does not currently support same randomness with a batched tensor input"):
vmap(fn, in_dims=in_dims, randomness="same")(*args)
def _in_dims(self, *batched_strings):
def get_in_dim(batched_string):
if batched_string == "first":
return 0
if batched_string == "last":
return -1
assert batched_string == "none"
return None
batched_strings = batched_strings + ("first",) # for the always batched as first dim dummy argument
return tuple(get_in_dim(batched_string) for batched_string in batched_strings)
@parametrize('randomness', ['same', 'different', 'error'])
@parametrize('use_generator', [True, False])
def test_factory_ops(self, device, randomness, use_generator):
generator = torch.Generator(device=device)
orig_state = generator.get_state()
kwargs = {'device': device, 'generator': generator} if use_generator else {'device': device}
ops = [
lambda _, shape: torch.randn(shape, **kwargs),
lambda _, shape: torch.rand(shape, **kwargs),
lambda _, shape: torch.randint(100, shape, **kwargs),
lambda _, shape: torch.randint(5, 100, shape, **kwargs),
lambda _, shape: torch.normal(0., 1., shape, **kwargs),
]
B0 = 4
shape = (3, 3)
seed = 1234567
for op in ops:
passed = torch.randn(B0, device=device)
if randomness == 'error':
self._assert_throws_in_error_mode(op, (passed, shape), in_dims=(0, None))
return
generator = self._reset_random(generator, orig_state, use_generator, seed)
vmap_result = vmap(op, in_dims=(0, None), randomness=randomness)(passed, shape)
generator = self._reset_random(generator, orig_state, use_generator, seed)
if randomness == "different":
expected = op(passed, [B0, *shape])
self._assert_all_slices_unique(vmap_result)
self.assertEqual(vmap_result, expected)
else:
expected = op(passed, shape)
self._assert_all_slices_equal(vmap_result)
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
@parametrize('randomness', ['same', 'different', 'error'])
@parametrize('use_generator', [True, False])
def test_randperm(self, device, randomness, use_generator):
# needs a special case because randperm doesn't take a batch size
B0 = 4
seed = 1234567
passed = torch.randn(B0, device=device)
torch.manual_seed(seed)
generator = torch.Generator(device=device)
orig_state = generator.get_state()
kwargs = {'device': device, 'generator': generator} if use_generator else {'device': device}
if randomness == 'error':
with self.assertRaisesRegex(RuntimeError, r"called random operation while in randomness error mode"):
vmap(lambda _: torch.randperm(10, **kwargs), randomness=randomness)(passed)
return
vmap_result = vmap(lambda _: torch.randperm(10, **kwargs), randomness=randomness)(passed)
generator = generator.set_state(orig_state)
torch.manual_seed(seed)
if randomness == 'different':
for i in range(B0):
expected = torch.randperm(10, **kwargs)
self.assertEqual(vmap_result[i], expected)
else:
expected = torch.randperm(10, **kwargs)
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
@parametrize('randomness', ['error', 'same', 'different'])
@parametrize('batched_input', ["first", "last", "none"])
def test_dropout(self, device, randomness, batched_input):
def op(t, ignored):
return torch.nn.functional.dropout(torch.ones_like(t), training=True)
B0 = 4
always_batched = torch.randn((B0,))
passed = self._get_image(batched_input, B0, device)
in_dims = self._in_dims(batched_input)
if randomness == 'error':
with self.assertRaisesRegex(RuntimeError, r"called random operation while in randomness error mode"):
vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
return
vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
# Check that the randomness is within bounds...
# ideally this is close to 0.5
p_estimate = vmap_result.mean() / 2
self.assertTrue(p_estimate < 0.75)
self.assertTrue(p_estimate > 0.25)
if randomness == 'different':
self._assert_all_slices_unique(vmap_result)
return
assert randomness == 'same'
self._assert_all_slices_equal(vmap_result)
@parametrize('randomness', ['error', 'same', 'different'])
@parametrize('batched_input', ["first", "last", "none"])
def test_alpha_dropout(self, device, randomness, batched_input):
def op(t, ignored):
return torch.nn.functional.alpha_dropout(torch.ones_like(t), training=True)
B0 = 4
always_batched = torch.randn((B0,))
passed = self._get_image(batched_input, B0, device)
in_dims = self._in_dims(batched_input)
if randomness == 'error':
with self.assertRaisesRegex(RuntimeError, r"called random operation while in randomness error mode"):
vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
return
# I have no clue how to actually test corectness of alpha dropout because the docs
# seem wrong: https://github.com/pytorch/pytorch/issues/74004
vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
if randomness == 'different':
self._assert_all_slices_unique(vmap_result)
return
assert randomness == 'same'
self._assert_all_slices_equal(vmap_result)
@parametrize('randomness', ['error', 'same', 'different'])
@parametrize('batched_input', ["first", "last", "none"])
@parametrize('dim', [2, 3])
def test_feature_dropout(self, device, randomness, batched_input, dim):
def op(t, ignored):
f = torch.nn.functional.dropout2d if dim == 2 else torch.nn.functional.dropout3d
return f(torch.ones_like(t), training=True)
B0 = 4
always_batched = torch.randn((B0,))
passed = self._get_image(batched_input, B0, device)
if dim == 3:
unsqueeze_dim = -2 if batched_input == "last" else -1
passed = passed.unsqueeze(unsqueeze_dim)
in_dims = self._in_dims(batched_input)
if randomness == 'error':
with self.assertRaisesRegex(RuntimeError, r"called random operation while in randomness error mode"):
vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
return
vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
# Check that the randomness is within bounds...
# ideally this is close to 0.5
p_estimate = vmap_result.mean() / 2
self.assertTrue(p_estimate < 0.75)
self.assertTrue(p_estimate > 0.25)
# Check the "feature" pattern
dims = [-1, -2] if dim == 2 else [-1, -2, -3]
planes_numel = 2 * vmap_result.numel() / (vmap_result.shape[0] * vmap_result.shape[1] * vmap_result.shape[2])
planes = vmap_result.sum(dims)
result = (planes == 0) ^ (planes == planes_numel)
self.assertEqual(result, torch.ones_like(result, dtype=torch.bool))
if randomness == 'different':
self._assert_all_slices_unique(vmap_result)
return
assert randomness == 'same'
self._assert_all_slices_equal(vmap_result)
@parametrize('randomness', ['error', 'same', 'different'])
@parametrize('batched_input', ["first", "last", "none"])
def test_feature_alpha_dropout(self, device, randomness, batched_input):
def op(t, ignored):
return torch.nn.functional.feature_alpha_dropout(torch.ones_like(t), training=True)
B0 = 4
always_batched = torch.randn((B0,))
passed = self._get_image(batched_input, B0, device)
unsqueeze_dim = -2 if batched_input == "last" else -1
passed = passed.unsqueeze(unsqueeze_dim)
in_dims = self._in_dims(batched_input)
if randomness == 'error':
with self.assertRaisesRegex(RuntimeError, r"called random operation while in randomness error mode"):
vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
return
vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
# I have no clue how to actually test corectness of alpha dropout because the docs
# seem wrong: https://github.com/pytorch/pytorch/issues/74004
# Check the "feature" pattern
dims = [-1, -2, -3]
planes = vmap_result.sum(dims)
max_elt = planes.max()
min_elt = planes.min()
result = (planes == min_elt) ^ (planes == max_elt)
self.assertEqual(result, torch.ones_like(result, dtype=torch.bool))
if randomness == 'different':
self._assert_all_slices_unique(vmap_result)
return
assert randomness == 'same'
self._assert_all_slices_equal(vmap_result)
@parametrize('randomness', ['error', 'same', 'different'])
@parametrize('batched_input', ["first", "last", "none"])
def test_like_functions(self, device, randomness, batched_input):
seed = 1234567
supported_ops = [
lambda t, _: torch.randint_like(t, 20),
lambda t, _: torch.randint_like(t, 0, 20),
lambda t, _: torch.rand_like(t),
lambda t, _: torch.randn_like(t),
]
B0 = 4
for op in supported_ops:
always_batched = torch.randn(B0)
passed = self._get_image(batched_input, B0, device)
in_dims = self._in_dims(batched_input)
if randomness == 'error':
with self.assertRaisesRegex(RuntimeError, r"called random operation while in randomness error mode"):
vmap(op, in_dims=in_dims, randomness=randomness)(passed, always_batched)
return
torch.manual_seed(seed)
vmap_result = vmap(op, randomness=randomness, in_dims=in_dims)(passed, always_batched)
torch.manual_seed(seed)
if batched_input == "last":
passed = passed.movedim(-1, 0)
if randomness == 'different':
if batched_input == "none":
passed = passed.expand(B0, *passed.shape)
expected = op(passed, 0)
self._assert_all_slices_unique(vmap_result)
self.assertEqual(expected, vmap_result)
return
assert randomness == 'same'
if batched_input != "none":
passed = passed[0]
expected = op(passed, 0)
self._assert_all_slices_equal(vmap_result)
for i in range(B0):
self.assertEqual(expected, vmap_result[i])
@parametrize('use_generator', [True, False])
@parametrize('randomness', ['error', 'same', 'different'])
@parametrize('batched_input', ["first", "last", "none"])
def test_random_unary_inplace(self, device, use_generator, randomness, batched_input):
generator = torch.Generator(device=device)
orig_state = generator.get_state()
kwargs = {'generator': generator} if use_generator else {}
ops = [
lambda t, _: t.random_(**kwargs),
lambda t, _: t.random_(100, **kwargs),
lambda t, _: t.random_(-5, 100, **kwargs),
lambda t, _: t.normal_(**kwargs),
lambda t, _: t.bernoulli_(**kwargs),
lambda t, _: t.cauchy_(**kwargs),
lambda t, _: t.exponential_(**kwargs),
lambda t, _: t.geometric_(0.5, **kwargs),
lambda t, _: t.log_normal_(**kwargs),
lambda t, _: t.uniform_(**kwargs),
]
B0 = 4
seed = 1234567
in_dims = self._in_dims(batched_input)
for op in ops:
# because of in place updates, clone inputs
always_batched = torch.randn(B0, device=device)
passed = self._get_image(batched_input, B0, device)
passed_expected = passed.clone()
if randomness == 'error':
self._assert_throws_in_error_mode(op, (passed, always_batched), in_dims=in_dims)
return
if randomness == 'different' and batched_input == "none":
self._assert_throws_in_different_mode_inplace(op, (passed, always_batched), in_dims=in_dims)
return
generator = self._reset_random(generator, orig_state, use_generator, seed)
vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(passed, always_batched)
if batched_input == "last":
passed_expected = passed_expected.movedim(-1, 0)
generator = self._reset_random(generator, orig_state, use_generator, seed)
if randomness == "different":
expected = op(passed_expected, always_batched)
self._assert_all_slices_unique(vmap_result)
self.assertEqual(vmap_result, expected)
else:
if batched_input != "none":
passed_expected = passed_expected[0].clone() # bug in pytorch, normal_ on views doesn't work
expected = op(passed_expected, always_batched)
self._assert_all_slices_equal(vmap_result)
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
@parametrize('use_generator', [True, False])
@parametrize('randomness', ['error', 'same', 'different'])
@parametrize('batched_input', ["first", "last", "none"])
@parametrize('batched_probability', ["first", "last", "none"])
def test_bernoulli_in_place(self, device, use_generator, randomness, batched_input, batched_probability):
B0 = 4
seed = 1234567
generator = torch.Generator(device=device)
orig_state = generator.get_state()
kwargs = {'generator': generator} if use_generator else {}
in_dims = self._in_dims(batched_input, batched_probability)
def op(t, p, ignored):
return t.bernoulli_(p, **kwargs)
# because of in place updates, clone inputs
always_batched = torch.randn(B0, device=device)
input = self._get_image(batched_input, B0, device)
input_expected = input.clone()
probability = self._get_image(batched_probability, B0, device) - 0.5
if randomness == 'error':
self._assert_throws_in_error_mode(op, (input, probability, always_batched), in_dims=in_dims)
return
if randomness == 'same' and batched_probability != "none":
self._assert_throws_in_same_mode_batched(op, (input, probability, always_batched), in_dims=in_dims)
return
if batched_input == "none" and batched_probability != "none":
regex = r"there exists a Tensor `other` in extra_args that has more elements than `self`"
with self.assertRaisesRegex(RuntimeError, regex):
vmap(op, in_dims=in_dims, randomness=randomness)(input, probability, always_batched)
return
if randomness == 'different' and batched_input == "none":
self._assert_throws_in_different_mode_inplace(op, (input, probability, always_batched), in_dims=in_dims)
return
self._reset_random(generator, orig_state, use_generator, seed)
vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(input, probability, always_batched)
self._reset_random(generator, orig_state, use_generator, seed)
if batched_input == "last":
input_expected = input_expected.movedim(-1, 0)
if batched_probability == "last":
probability = probability.movedim(-1, 0)
if randomness == "different":
expected = op(input_expected, probability, always_batched)
self._assert_all_slices_unique(vmap_result)
self.assertEqual(vmap_result, expected)
else:
if batched_input != "none":
input_expected = input_expected[0]
expected = op(input_expected, probability, always_batched)
self._assert_all_slices_equal(vmap_result)
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
@parametrize('use_generator', [True, False])
@parametrize('randomness', ['error', 'same', 'different'])
@parametrize('batched_input', ["first", "last", "none"])
@parametrize('batched_other', ["first", "last", "none"])
def test_random_binary_out_of_place(self, device, use_generator, randomness, batched_input, batched_other):
generator = torch.Generator(device=device)
orig_state = generator.get_state()
kwargs = {'generator': generator} if use_generator else {}
ops = [
lambda t, o, _: torch.normal(t, o, **kwargs),
lambda t, o, _: torch.binomial(t, (o - 0.5), **kwargs),
]
B0 = 4
seed = 1234567
in_dims = self._in_dims(batched_input, batched_other)
for op in ops:
always_batched = torch.randn(B0, device=device)
input = self._get_image(batched_input, B0, device)
other = self._get_image(batched_other, B0, device)
if randomness == 'error':
self._assert_throws_in_error_mode(op, (input, other, always_batched), in_dims=in_dims)
return
if randomness == 'same' and (batched_input != "none" or batched_other != "none"):
self._assert_throws_in_same_mode_batched(op, (input, other, always_batched), in_dims=in_dims)
return
generator = self._reset_random(generator, orig_state, use_generator, seed)
vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(input, other, always_batched)
if batched_input == "last":
input = input.movedim(-1, 0)
if batched_other == "last":
other = other.movedim(-1, 0)
generator = self._reset_random(generator, orig_state, use_generator, seed)
if randomness == "different":
if batched_input == "none":
input = input.expand(B0, *input.shape)
expected = op(input, other, always_batched)
self._assert_all_slices_unique(vmap_result)
self.assertEqual(vmap_result, expected)
else:
assert batched_input == "none" and batched_other == "none"
expected = op(input, other, always_batched)
self._assert_all_slices_equal(vmap_result)
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
@parametrize('use_generator', [True, False])
@parametrize('randomness', ['error', 'same', 'different'])
@parametrize('batched_input', ["first", "last", "none"])
def test_random_unary_out_of_place(self, device, use_generator, randomness, batched_input):
generator = torch.Generator(device=device)
orig_state = generator.get_state()
kwargs = {'generator': generator} if use_generator else {}
ops = [
lambda t, _: torch.normal(0., torch.abs(t), **kwargs),
lambda t, _: torch.normal(t, 1., **kwargs),
lambda t, _: torch.bernoulli(t - 0.5, **kwargs),
lambda t, _: torch.bernoulli(t, 0.5, **kwargs),
lambda t, _: torch._standard_gamma(t, **kwargs),
lambda t, _: torch._sample_dirichlet(t, **kwargs),
lambda t, _: torch.poisson(t, **kwargs),
]
B0 = 4
seed = 1234567
in_dims = self._in_dims(batched_input)
for op in ops:
always_batched = torch.randn(B0, device=device)
passed = self._get_image(batched_input, B0, device)
if randomness == 'error':
self._assert_throws_in_error_mode(op, (passed, always_batched), in_dims=in_dims)
return
if randomness == 'same' and batched_input != "none":
self._assert_throws_in_same_mode_batched(op, (passed, always_batched), in_dims=in_dims)
return
generator = self._reset_random(generator, orig_state, use_generator, seed)
vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(passed, always_batched)
generator = self._reset_random(generator, orig_state, use_generator, seed)
if randomness == "different":
if batched_input == "none":
passed = passed.expand(B0, *passed.shape)
if batched_input == "last":
passed = passed.movedim(-1, 0)
expected = op(passed, always_batched)
self._assert_all_slices_unique(vmap_result)
self.assertEqual(vmap_result, expected)
else:
expected = op(passed, always_batched)
self._assert_all_slices_equal(vmap_result)
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
@parametrize('use_generator', [True, False])
@parametrize('randomness', ['error', 'same', 'different'])
@parametrize('batched_call', [True, False])
@parametrize('batched_input', ["first", "last", "none"])
def test_multinomial(self, device, use_generator, randomness, batched_call, batched_input):
def flatten_input(input, batch_call, batch_location):
if batch_call and batch_location != "none":
final_size = 3 # [B0, B, N]
elif not batch_call and batch_location == "none":
final_size = 1 # [N]
else:
final_size = 2 # [B0, N] or [B, N]
start_idx = final_size - 1
end_idx = -1
if batch_location == "last":
start_idx -= 1
end_idx -= 1 # gets to correct final size because using negative indices
ret = input.flatten(start_idx, end_idx)
assert ret.dim() == final_size
return ret
def op(input, _):
return torch.multinomial(input, 10, **kwargs)
generator = torch.Generator(device=device)
orig_state = generator.get_state()
kwargs = {'generator': generator} if use_generator else {}
B0 = 4
seed = 1234567
in_dims = self._in_dims(batched_input)
always_batched = torch.randn(B0, device=device)
passed = self._get_image(batched_input, B0, device)
passed = flatten_input(passed, batched_call, batched_input)
if randomness == 'error':
self._assert_throws_in_error_mode(op, (passed, always_batched), in_dims=in_dims)
return
if randomness == 'same' and batched_input != "none":
self._assert_throws_in_same_mode_batched(op, (passed, always_batched), in_dims=in_dims)
return
generator = self._reset_random(generator, orig_state, use_generator, seed)
vmap_result = vmap(op, in_dims=in_dims, randomness=randomness)(passed, always_batched)
generator = self._reset_random(generator, orig_state, use_generator, seed)
if randomness == "different":
if batched_input == "none":
passed = passed.expand(B0, *passed.shape)
if batched_input == "last":
passed = passed.movedim(-1, 0)
orig_passed_size = passed.shape[:2] if batched_call else passed.shape[:1]
passed = passed.flatten(0, 1) if batched_call else passed
expected = op(passed, always_batched)
expected.reshape(*orig_passed_size, 10)
self._assert_all_slices_unique(vmap_result)
self.assertEqual(vmap_result, expected)
else:
expected = op(passed, always_batched)
self._assert_all_slices_equal(vmap_result)
for i in range(B0):
self.assertEqual(vmap_result[i], expected)
def test_unsupported_random(self, device):
x = torch.randn(3, device=device)
y = x.abs()
z = x.abs()
with self.assertRaisesRegex(RuntimeError, "calling out variants"):
def f(x):
return torch.randn(3, device=device, out=y)
vmap(f, randomness='same')(x)
with self.assertRaisesRegex(RuntimeError, "calling out variants"):
def f(x0, x1):
return torch.normal(x, y, out=x)
vmap(f, randomness='same')(z, z)
with self.assertRaisesRegex(RuntimeError, "do not yet support"):
def f(z):
return torch.rrelu(x)
vmap(f, randomness='same')(z)
@parametrize('in_dim', [0, 1, 2])
@parametrize('out_dim', [0, 1, 2])
def test_chunk_vmap(self, in_dim, out_dim):
randomness = "different"
x = torch.randn(4, 5, 6)
def f(x):
y = x.sin() + torch.rand_like(x)
return y
for chunks in [1, 2, 3, 4, 7, 10, 16]:
output = chunk_vmap(
f, in_dims=in_dim, out_dims=out_dim, randomness=randomness, chunks=chunks
)(x)
self._assert_all_slices_unique(output)
class TestTransformFailure(TestCase):
@parametrize('transform', ['vmap', 'grad', 'grad_and_value', 'vjp', 'jvp', 'jacrev', 'jacfwd'])
def test_fails_with_autograd_function(self, device, transform):
class Test(torch.autograd.Function):
@staticmethod
def forward(_, input):
return input
@staticmethod
def backward(_, grad_input):
return grad_input
transform = getattr(functorch, transform)
def f(x):
return Test.apply(x)
if transform == grad or transform == grad_and_value:
input = torch.tensor(4.)
else:
input = torch.randn(5)
if transform == vjp:
transform = functools.partial(transform, f)
elif transform == jvp:
input = (input,)
transform = functools.partial(transform, f, input)
else:
transform = transform(f)
with self.assertRaisesRegex(RuntimeError, "autograd.Function"):
transform(input)
only_for = ("cpu", "cuda")
instantiate_device_type_tests(TestVmapOperatorsOpInfo, globals(), only_for=only_for)
instantiate_device_type_tests(
TestVmapBatchedGradient,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(TestTransformFailure, globals(), only_for=only_for)
instantiate_device_type_tests(TestRandomness, globals(), only_for=only_for)
if __name__ == '__main__':
run_tests()
| pytorch-master | functorch/test/test_vmap.py |
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
# source code directory, relative to this file, for sphinx-autobuild
# sys.path.insert(0, os.path.abspath('../..'))
import torch
import functorch
RELEASE = os.environ.get('RELEASE', False)
import pytorch_sphinx_theme
import sys
# -- General configuration ------------------------------------------------
# Required version of sphinx is set from docs/requirements.txt
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
# 'sphinxcontrib.katex',
'sphinx.ext.autosectionlabel',
'sphinx_copybutton',
'myst_nb',
]
# sys.path.insert(0, os.path.abspath('./notebooks'))
# build the templated autosummary files
# autosummary_generate = True
numpydoc_show_class_members = False
# autosectionlabel throws warnings if section names are duplicated.
# The following tells autosectionlabel to not throw a warning for
# duplicated section names that are in different documents.
autosectionlabel_prefix_document = True
# tell myst to not execute ipynb tutorials.
jupyter_execute_notebooks = "off"
# katex options
#
#
katex_prerender = True
napoleon_use_ivar = True
# build the templated autosummary files
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'functorch'
copyright = 'functorch Contributors'
author = 'functorch Contributors'
functorch_version = str(functorch.__version__)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# TODO: change to [:2] at v1.0
version = 'nightly (' + functorch_version + ')'
# The full version, including alpha/beta/rc tags.
# TODO: verify this works as expected
release = 'nightly'
# Customized html_title here.
# Default is " ".join(project, release, "documentation") if not set
# TODO: I don't know if this flag works, please check before using it
if RELEASE:
raise RuntimeError('NYI')
# remove hash (start with 'a') from version number if any
# version_end = functorch_version.find('a')
# if version_end == -1:
# html_title = " ".join((project, functorch_version, "documentation"))
# version = functorch_version
# else:
# html_title = " ".join((project, functorch_version[:version_end], "documentation"))
# version = functorch_version[:version_end]
# release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['notebooks/colab**', 'notebooks/_src/**']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Disable docstring inheritance
autodoc_inherit_docstrings = False
# Disable displaying type annotations, these can be very verbose
autodoc_typehints = 'none'
# Enable overriding of function signatures in the first line of the docstring.
autodoc_docstring_signature = True
# -- katex javascript in header
#
# def setup(app):
# app.add_javascript("https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.js")
# -- Options for HTML output ----------------------------------------------
#
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"collapse_navigation": False,
"display_version": True,
"logo_only": True,
"pytorch_project": "docs",
"navigation_with_keys": True,
"analytics_id": "UA-117752657-2",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'css/custom.css',
]
# Called automatically by Sphinx, making this `conf.py` an "extension".
def setup(app):
# NOTE: in Sphinx 1.8+ `html_css_files` is an official configuration value
# and can be moved outside of this function (and the setup(app) function
# can be deleted).
html_css_files = [
'https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css'
]
# In Sphinx 1.8 it was renamed to `add_css_file`, 1.7 and prior it is
# `add_stylesheet` (deprecated in 1.8).
add_css = getattr(app, 'add_css_file', app.add_stylesheet)
for css_file in html_css_files:
add_css(css_file)
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyTorchdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pytorch.tex', 'PyTorch Documentation',
'Torch Contributors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'functorch', 'functorch Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'functorch', 'functorch Documentation',
author, 'functorch', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
"torch": ("https://pytorch.org/docs/stable/", None),
}
# -- A patch that prevents Sphinx from cross-referencing ivar tags -------
# See http://stackoverflow.com/a/41184353/3343043
from docutils import nodes
from sphinx.util.docfields import TypedField
from sphinx import addnodes
import sphinx.ext.doctest
# Without this, doctest adds any example with a `>>>` as a test
doctest_test_doctest_blocks = ''
doctest_default_flags = sphinx.ext.doctest.doctest.ELLIPSIS
doctest_global_setup = '''
import torch
try:
import torchvision
except ImportError:
torchvision = None
'''
def patched_make_field(self, types, domain, items, **kw):
# `kw` catches `env=None` needed for newer sphinx while maintaining
# backwards compatibility when passed along further down!
# (List, unicode, Tuple) -> nodes.field
def handle_item(fieldarg, content):
par = nodes.paragraph()
par += addnodes.literal_strong('', fieldarg) # Patch: this line added
# par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
# addnodes.literal_strong))
if fieldarg in types:
par += nodes.Text(' (')
# NOTE: using .pop() here to prevent a single type node to be
# inserted twice into the doctree, which leads to
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
typename = u''.join(n.astext() for n in fieldtype)
typename = typename.replace('int', 'python:int')
typename = typename.replace('long', 'python:long')
typename = typename.replace('float', 'python:float')
typename = typename.replace('bool', 'python:bool')
typename = typename.replace('type', 'python:type')
par.extend(self.make_xrefs(self.typerolename, domain, typename,
addnodes.literal_emphasis, **kw))
else:
par += fieldtype
par += nodes.Text(')')
par += nodes.Text(' -- ')
par += content
return par
fieldname = nodes.field_name('', self.label)
if len(items) == 1 and self.can_collapse:
fieldarg, content = items[0]
bodynode = handle_item(fieldarg, content)
else:
bodynode = self.list_type()
for fieldarg, content in items:
bodynode += nodes.list_item('', handle_item(fieldarg, content))
fieldbody = nodes.field_body('', bodynode)
return nodes.field('', fieldname, fieldbody)
TypedField.make_field = patched_make_field
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
| pytorch-master | functorch/docs/source/conf.py |
# This example was adapated from https://github.com/muhrin/milad
# It is licensed under the GLPv3 license. You can find a copy of it
# here: https://www.gnu.org/licenses/gpl-3.0.en.html .
import torch
from torch import nn
from torch.nn.functional import mse_loss
from functorch import jacrev, vmap
sigma = 0.5
epsilon = 4.
def lennard_jones(r):
return epsilon * ((sigma / r)**12 - (sigma / r)**6)
def lennard_jones_force(r):
"""Get magnitude of LJ force"""
return -epsilon * ((-12 * sigma**12 / r**13) + (6 * sigma**6 / r**7))
training_size = 1000
r = torch.linspace(0.5, 2 * sigma, steps=training_size, requires_grad=True)
# Create a bunch of vectors that point along positive-x
drs = torch.outer(r, torch.tensor([1.0, 0, 0]))
norms = torch.norm(drs, dim=1).reshape(-1, 1)
# Create training energies
training_energies = torch.stack(list(map(lennard_jones, norms))).reshape(-1, 1)
# Create forces with random direction vectors
training_forces = torch.stack([force * dr for force, dr in zip(map(lennard_jones_force, norms), drs)])
model = nn.Sequential(
nn.Linear(1, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 1)
)
def make_prediction(model, drs):
norms = torch.norm(drs, dim=1).reshape(-1, 1)
energies = model(norms)
network_derivs = vmap(jacrev(model))(norms).squeeze(-1)
forces = -network_derivs * drs / norms
return energies, forces
def loss_fn(energies, forces, predicted_energies, predicted_forces):
return mse_loss(energies, predicted_energies) + 0.01 * mse_loss(forces, predicted_forces) / 3
optimiser = torch.optim.Adam(model.parameters(), lr=1e-3)
for epoch in range(400):
optimiser.zero_grad()
energies, forces = make_prediction(model, drs)
loss = loss_fn(training_energies, training_forces, energies, forces)
loss.backward(retain_graph=True)
optimiser.step()
if epoch % 20 == 0:
print(loss.cpu().item())
| pytorch-master | functorch/examples/lennard_jones/lennard_jones.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Runs CIFAR10 training with differential privacy.
"""
import argparse
import logging
import shutil
import sys
from datetime import datetime, timedelta
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import torchvision.transforms as transforms
from torchvision import models
from torchvision.datasets import CIFAR10
from tqdm import tqdm
import functorch
from functorch import vmap, grad_and_value
from functorch import make_functional
# disable warning spam
functorch._C._set_vmap_fallback_warning_enabled(False)
logging.basicConfig(
format="%(asctime)s:%(levelname)s:%(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
stream=sys.stdout,
)
logger = logging.getLogger("ddp")
logger.setLevel(level=logging.INFO)
def save_checkpoint(state, is_best, filename="checkpoint.tar"):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth.tar")
def accuracy(preds, labels):
return (preds == labels).mean()
def compute_norms(sample_grads):
batch_size = sample_grads[0].shape[0]
norms = [sample_grad.view(batch_size, -1).norm(2, dim=-1) for sample_grad in sample_grads]
norms = torch.stack(norms, dim=0).norm(2, dim=0)
return norms, batch_size
def clip_and_accumulate_and_add_noise(model, max_per_sample_grad_norm=1.0, noise_multiplier=1.0):
sample_grads = tuple(param.grad_sample for param in model.parameters())
# step 0: compute the norms
sample_norms, batch_size = compute_norms(sample_grads)
# step 1: compute clipping factors
clip_factor = max_per_sample_grad_norm / (sample_norms + 1e-6)
clip_factor = clip_factor.clamp(max=1.0)
# step 2: clip
grads = tuple(torch.einsum('i,i...', clip_factor, sample_grad)
for sample_grad in sample_grads)
# step 3: add gaussian noise
stddev = max_per_sample_grad_norm * noise_multiplier
noises = tuple(torch.normal(0, stddev, grad_param.shape, device=grad_param.device)
for grad_param in grads)
grads = tuple(noise + grad_param for noise, grad_param in zip(noises, grads))
# step 4: assign the new grads, delete the sample grads
for param, param_grad in zip(model.parameters(), grads):
param.grad = param_grad/batch_size
del param.grad_sample
def train(args, model, train_loader, optimizer, epoch, device):
start_time = datetime.now()
criterion = nn.CrossEntropyLoss()
losses = []
top1_acc = []
for i, (images, target) in enumerate(tqdm(train_loader)):
images = images.to(device)
target = target.to(device)
# Step 1: compute per-sample-grads
# In order to use functional vmap+grad, we need to be able to
# pass the weights to a model.
func_model, weights = make_functional(model)
# To use vmap+grad to compute per-sample-grads, the forward pass
# must be re-formulated on a single example.
# We use the `grad` operator to compute forward+backward on a single example,
# and finally `vmap` to do forward+backward on multiple examples.
def compute_loss_and_output(weights, image, target):
images = image.unsqueeze(0)
targets = target.unsqueeze(0)
output = func_model(weights, images)
loss = criterion(output, targets)
return loss, output.squeeze(0)
# `grad(f)` is a functional API that returns a function `f'` that
# computes gradients by running both the forward and backward pass.
# We want to extract some intermediate
# values from the computation (i.e. the loss and output).
#
# To extract the loss, we use the `grad_and_value` API, that returns the
# gradient of the weights w.r.t. the loss and the loss.
#
# To extract the output, we use the `has_aux=True` flag.
# `has_aux=True` assumes that `f` returns a tuple of two values,
# where the first is to be differentiated and the second "auxiliary value"
# is not to be differentiated. `f'` returns the gradient w.r.t. the loss,
# the loss, and the auxiliary value.
grads_loss_output = grad_and_value(compute_loss_and_output, has_aux=True)
sample_grads, (sample_loss, output) = \
vmap(grads_loss_output, (None, 0, 0))(weights, images, target)
loss = sample_loss.mean()
for grad_sample, weight in zip(sample_grads, model.parameters()):
weight.grad_sample = grad_sample.detach()
# Step 2: Clip the per-sample-grads, sum them to form grads, and add noise
clip_and_accumulate_and_add_noise(
model, args.max_per_sample_grad_norm, args.sigma)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
losses.append(loss.item())
# measure accuracy and record loss
acc1 = accuracy(preds, labels)
top1_acc.append(acc1)
# make sure we take a step after processing the last mini-batch in the
# epoch to ensure we start the next epoch with a clean state
optimizer.step()
optimizer.zero_grad()
if i % args.print_freq == 0:
print(
f"\tTrain Epoch: {epoch} \t"
f"Loss: {np.mean(losses):.6f} "
f"Acc@1: {np.mean(top1_acc):.6f} "
)
train_duration = datetime.now() - start_time
return train_duration
def test(args, model, test_loader, device):
model.eval()
criterion = nn.CrossEntropyLoss()
losses = []
top1_acc = []
with torch.no_grad():
for images, target in tqdm(test_loader):
images = images.to(device)
target = target.to(device)
output = model(images)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
acc1 = accuracy(preds, labels)
losses.append(loss.item())
top1_acc.append(acc1)
top1_avg = np.mean(top1_acc)
print(f"\tTest set:" f"Loss: {np.mean(losses):.6f} " f"Acc@1: {top1_avg :.6f} ")
return np.mean(top1_acc)
# flake8: noqa: C901
def main():
args = parse_args()
if args.debug >= 1:
logger.setLevel(level=logging.DEBUG)
device = args.device
if args.secure_rng:
try:
import torchcsprng as prng
except ImportError as e:
msg = (
"To use secure RNG, you must install the torchcsprng package! "
"Check out the instructions here: https://github.com/pytorch/csprng#installation"
)
raise ImportError(msg) from e
generator = prng.create_random_device_generator("/dev/urandom")
else:
generator = None
augmentations = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
]
normalize = [
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
train_transform = transforms.Compose(normalize)
test_transform = transforms.Compose(normalize)
train_dataset = CIFAR10(
root=args.data_root, train=True, download=True, transform=train_transform
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=int(args.sample_rate * len(train_dataset)),
generator=generator,
num_workers=args.workers,
pin_memory=True,
)
test_dataset = CIFAR10(
root=args.data_root, train=False, download=True, transform=test_transform
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=args.batch_size_test,
shuffle=False,
num_workers=args.workers,
)
best_acc1 = 0
model = models.__dict__[args.architecture](
pretrained=False, norm_layer=(lambda c: nn.GroupNorm(args.gn_groups, c))
)
model = model.to(device)
if args.optim == "SGD":
optimizer = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
elif args.optim == "RMSprop":
optimizer = optim.RMSprop(model.parameters(), lr=args.lr)
elif args.optim == "Adam":
optimizer = optim.Adam(model.parameters(), lr=args.lr)
else:
raise NotImplementedError("Optimizer not recognized. Please check spelling")
# Store some logs
accuracy_per_epoch = []
time_per_epoch = []
for epoch in range(args.start_epoch, args.epochs + 1):
if args.lr_schedule == "cos":
lr = args.lr * 0.5 * (1 + np.cos(np.pi * epoch / (args.epochs + 1)))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
train_duration = train(
args, model, train_loader, optimizer, epoch, device
)
top1_acc = test(args, model, test_loader, device)
# remember best acc@1 and save checkpoint
is_best = top1_acc > best_acc1
best_acc1 = max(top1_acc, best_acc1)
time_per_epoch.append(train_duration)
accuracy_per_epoch.append(float(top1_acc))
save_checkpoint(
{
"epoch": epoch + 1,
"arch": "Convnet",
"state_dict": model.state_dict(),
"best_acc1": best_acc1,
"optimizer": optimizer.state_dict(),
},
is_best,
filename=args.checkpoint_file + ".tar",
)
time_per_epoch_seconds = [t.total_seconds() for t in time_per_epoch]
avg_time_per_epoch = sum(time_per_epoch_seconds) / len(time_per_epoch_seconds)
metrics = {
"accuracy": best_acc1,
"accuracy_per_epoch": accuracy_per_epoch,
"avg_time_per_epoch_str": str(timedelta(seconds=int(avg_time_per_epoch))),
"time_per_epoch": time_per_epoch_seconds,
}
logger.info(
"\nNote:\n- 'total_time' includes the data loading time, training time and testing time.\n- 'time_per_epoch' measures the training time only.\n"
)
logger.info(metrics)
def parse_args():
parser = argparse.ArgumentParser(description="PyTorch CIFAR10 DP Training")
parser.add_argument(
"-j",
"--workers",
default=2,
type=int,
metavar="N",
help="number of data loading workers (default: 2)",
)
parser.add_argument(
"--epochs",
default=90,
type=int,
metavar="N",
help="number of total epochs to run",
)
parser.add_argument(
"--start-epoch",
default=1,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)",
)
parser.add_argument(
"-b",
"--batch-size-test",
default=256,
type=int,
metavar="N",
help="mini-batch size for test dataset (default: 256)"
)
parser.add_argument(
"--sample-rate",
default=0.005,
type=float,
metavar="SR",
help="sample rate used for batch construction (default: 0.005)",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.1,
type=float,
metavar="LR",
help="initial learning rate",
dest="lr",
)
parser.add_argument(
"--momentum", default=0.9, type=float, metavar="M", help="SGD momentum"
)
parser.add_argument(
"--wd",
"--weight-decay",
default=0,
type=float,
metavar="W",
help="SGD weight decay",
dest="weight_decay",
)
parser.add_argument(
"-p",
"--print-freq",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument(
"--resume",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"-e",
"--evaluate",
dest="evaluate",
action="store_true",
help="evaluate model on validation set",
)
parser.add_argument(
"--seed", default=None, type=int, help="seed for initializing training. "
)
parser.add_argument(
"--sigma",
type=float,
default=1.5,
metavar="S",
help="Noise multiplier (default 1.0)",
)
parser.add_argument(
"-c",
"--max-per-sample-grad_norm",
type=float,
default=10.0,
metavar="C",
help="Clip per-sample gradients to this norm (default 1.0)",
)
parser.add_argument(
"--secure-rng",
action="store_true",
default=False,
help="Enable Secure RNG to have trustworthy privacy guarantees."
"Comes at a performance cost. Opacus will emit a warning if secure rng is off,"
"indicating that for production use it's recommender to turn it on.",
)
parser.add_argument(
"--delta",
type=float,
default=1e-5,
metavar="D",
help="Target delta (default: 1e-5)",
)
parser.add_argument(
"--checkpoint-file",
type=str,
default="checkpoint",
help="path to save check points",
)
parser.add_argument(
"--data-root",
type=str,
default="../cifar10",
help="Where CIFAR10 is/will be stored",
)
parser.add_argument(
"--log-dir",
type=str,
default="/tmp/stat/tensorboard",
help="Where Tensorboard log will be stored",
)
parser.add_argument(
"--optim",
type=str,
default="SGD",
help="Optimizer to use (Adam, RMSprop, SGD)",
)
parser.add_argument(
"--lr-schedule", type=str, choices=["constant", "cos"], default="cos"
)
parser.add_argument(
"--device", type=str, default="cuda", help="Device on which to run the code."
)
parser.add_argument(
"--architecture",
type=str,
default="resnet18",
help="model from torchvision to run",
)
parser.add_argument(
"--gn-groups",
type=int,
default=8,
help="Number of groups in GroupNorm",
)
parser.add_argument(
"--clip_per_layer",
action="store_true",
default=False,
help="Use static per-layer clipping with the same clipping threshold for each layer. Necessary for DDP. If `False` (default), uses flat clipping.",
)
parser.add_argument(
"--debug",
type=int,
default=0,
help="debug level (default: 0)",
)
return parser.parse_args()
if __name__ == "__main__":
main()
| pytorch-master | functorch/examples/dp_cifar10/cifar10_transforms.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Runs CIFAR10 training with differential privacy.
"""
import argparse
import logging
import shutil
import sys
from datetime import datetime, timedelta
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import torchvision.transforms as transforms
from torchvision import models
from opacus import PrivacyEngine
from torchvision.datasets import CIFAR10
from tqdm import tqdm
logging.basicConfig(
format="%(asctime)s:%(levelname)s:%(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
stream=sys.stdout,
)
logger = logging.getLogger("ddp")
logger.setLevel(level=logging.INFO)
def save_checkpoint(state, is_best, filename="checkpoint.tar"):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth.tar")
def accuracy(preds, labels):
return (preds == labels).mean()
def train(args, model, train_loader, optimizer, privacy_engine, epoch, device):
start_time = datetime.now()
model.train()
criterion = nn.CrossEntropyLoss()
losses = []
top1_acc = []
for i, (images, target) in enumerate(tqdm(train_loader)):
images = images.to(device)
target = target.to(device)
# compute output
output = model(images)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
# measure accuracy and record loss
acc1 = accuracy(preds, labels)
losses.append(loss.item())
top1_acc.append(acc1)
# compute gradient and do SGD step
loss.backward()
# make sure we take a step after processing the last mini-batch in the
# epoch to ensure we start the next epoch with a clean state
optimizer.step()
optimizer.zero_grad()
if i % args.print_freq == 0:
if not args.disable_dp:
epsilon, best_alpha = privacy_engine.accountant.get_privacy_spent(
delta=args.delta,
alphas=[1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64)),
)
print(
f"\tTrain Epoch: {epoch} \t"
f"Loss: {np.mean(losses):.6f} "
f"Acc@1: {np.mean(top1_acc):.6f} "
f"(ε = {epsilon:.2f}, δ = {args.delta}) for α = {best_alpha}"
)
else:
print(
f"\tTrain Epoch: {epoch} \t"
f"Loss: {np.mean(losses):.6f} "
f"Acc@1: {np.mean(top1_acc):.6f} "
)
train_duration = datetime.now() - start_time
return train_duration
def test(args, model, test_loader, device):
model.eval()
criterion = nn.CrossEntropyLoss()
losses = []
top1_acc = []
with torch.no_grad():
for images, target in tqdm(test_loader):
images = images.to(device)
target = target.to(device)
output = model(images)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
acc1 = accuracy(preds, labels)
losses.append(loss.item())
top1_acc.append(acc1)
top1_avg = np.mean(top1_acc)
print(f"\tTest set:" f"Loss: {np.mean(losses):.6f} " f"Acc@1: {top1_avg :.6f} ")
return np.mean(top1_acc)
# flake8: noqa: C901
def main():
args = parse_args()
if args.debug >= 1:
logger.setLevel(level=logging.DEBUG)
device = args.device
if args.secure_rng:
try:
import torchcsprng as prng
except ImportError as e:
msg = (
"To use secure RNG, you must install the torchcsprng package! "
"Check out the instructions here: https://github.com/pytorch/csprng#installation"
)
raise ImportError(msg) from e
generator = prng.create_random_device_generator("/dev/urandom")
else:
generator = None
augmentations = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
]
normalize = [
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
train_transform = transforms.Compose(
augmentations + normalize if args.disable_dp else normalize
)
test_transform = transforms.Compose(normalize)
train_dataset = CIFAR10(
root=args.data_root, train=True, download=True, transform=train_transform
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=int(args.sample_rate * len(train_dataset)),
generator=generator,
num_workers=args.workers,
pin_memory=True,
)
test_dataset = CIFAR10(
root=args.data_root, train=False, download=True, transform=test_transform
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=args.batch_size_test,
shuffle=False,
num_workers=args.workers,
)
best_acc1 = 0
model = models.__dict__[args.architecture](
pretrained=False, norm_layer=(lambda c: nn.GroupNorm(args.gn_groups, c))
)
model = model.to(device)
if args.optim == "SGD":
optimizer = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
elif args.optim == "RMSprop":
optimizer = optim.RMSprop(model.parameters(), lr=args.lr)
elif args.optim == "Adam":
optimizer = optim.Adam(model.parameters(), lr=args.lr)
else:
raise NotImplementedError("Optimizer not recognized. Please check spelling")
privacy_engine = None
if not args.disable_dp:
if args.clip_per_layer:
# Each layer has the same clipping threshold. The total grad norm is still bounded by `args.max_per_sample_grad_norm`.
n_layers = len(
[(n, p) for n, p in model.named_parameters() if p.requires_grad]
)
max_grad_norm = [
args.max_per_sample_grad_norm / np.sqrt(n_layers)
] * n_layers
else:
max_grad_norm = args.max_per_sample_grad_norm
privacy_engine = PrivacyEngine(
secure_mode=args.secure_rng,
)
clipping = "per_layer" if args.clip_per_layer else "flat"
model, optimizer, train_loader = privacy_engine.make_private(
module=model,
optimizer=optimizer,
data_loader=train_loader,
noise_multiplier=args.sigma,
max_grad_norm=max_grad_norm,
clipping=clipping,
)
# Store some logs
accuracy_per_epoch = []
time_per_epoch = []
for epoch in range(args.start_epoch, args.epochs + 1):
if args.lr_schedule == "cos":
lr = args.lr * 0.5 * (1 + np.cos(np.pi * epoch / (args.epochs + 1)))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
train_duration = train(
args, model, train_loader, optimizer, privacy_engine, epoch, device
)
top1_acc = test(args, model, test_loader, device)
# remember best acc@1 and save checkpoint
is_best = top1_acc > best_acc1
best_acc1 = max(top1_acc, best_acc1)
time_per_epoch.append(train_duration)
accuracy_per_epoch.append(float(top1_acc))
save_checkpoint(
{
"epoch": epoch + 1,
"arch": "Convnet",
"state_dict": model.state_dict(),
"best_acc1": best_acc1,
"optimizer": optimizer.state_dict(),
},
is_best,
filename=args.checkpoint_file + ".tar",
)
time_per_epoch_seconds = [t.total_seconds() for t in time_per_epoch]
avg_time_per_epoch = sum(time_per_epoch_seconds) / len(time_per_epoch_seconds)
metrics = {
"accuracy": best_acc1,
"accuracy_per_epoch": accuracy_per_epoch,
"avg_time_per_epoch_str": str(timedelta(seconds=int(avg_time_per_epoch))),
"time_per_epoch": time_per_epoch_seconds,
}
logger.info(
"\nNote:\n- 'total_time' includes the data loading time, training time and testing time.\n- 'time_per_epoch' measures the training time only.\n"
)
logger.info(metrics)
def parse_args():
parser = argparse.ArgumentParser(description="PyTorch CIFAR10 DP Training")
parser.add_argument(
"-j",
"--workers",
default=2,
type=int,
metavar="N",
help="number of data loading workers (default: 2)",
)
parser.add_argument(
"--epochs",
default=90,
type=int,
metavar="N",
help="number of total epochs to run",
)
parser.add_argument(
"--start-epoch",
default=1,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)",
)
parser.add_argument(
"-b",
"--batch-size-test",
default=256,
type=int,
metavar="N",
help="mini-batch size for test dataset (default: 256)"
)
parser.add_argument(
"--sample-rate",
default=0.005,
type=float,
metavar="SR",
help="sample rate used for batch construction (default: 0.005)",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.1,
type=float,
metavar="LR",
help="initial learning rate",
dest="lr",
)
parser.add_argument(
"--momentum", default=0.9, type=float, metavar="M", help="SGD momentum"
)
parser.add_argument(
"--wd",
"--weight-decay",
default=0,
type=float,
metavar="W",
help="SGD weight decay",
dest="weight_decay",
)
parser.add_argument(
"-p",
"--print-freq",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument(
"--resume",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"-e",
"--evaluate",
dest="evaluate",
action="store_true",
help="evaluate model on validation set",
)
parser.add_argument(
"--seed", default=None, type=int, help="seed for initializing training. "
)
parser.add_argument(
"--sigma",
type=float,
default=1.5,
metavar="S",
help="Noise multiplier (default 1.0)",
)
parser.add_argument(
"-c",
"--max-per-sample-grad_norm",
type=float,
default=10.0,
metavar="C",
help="Clip per-sample gradients to this norm (default 1.0)",
)
parser.add_argument(
"--disable-dp",
action="store_true",
default=False,
help="Disable privacy training and just train with vanilla SGD",
)
parser.add_argument(
"--secure-rng",
action="store_true",
default=False,
help="Enable Secure RNG to have trustworthy privacy guarantees."
"Comes at a performance cost. Opacus will emit a warning if secure rng is off,"
"indicating that for production use it's recommender to turn it on.",
)
parser.add_argument(
"--delta",
type=float,
default=1e-5,
metavar="D",
help="Target delta (default: 1e-5)",
)
parser.add_argument(
"--checkpoint-file",
type=str,
default="checkpoint",
help="path to save check points",
)
parser.add_argument(
"--data-root",
type=str,
default="../cifar10",
help="Where CIFAR10 is/will be stored",
)
parser.add_argument(
"--log-dir",
type=str,
default="/tmp/stat/tensorboard",
help="Where Tensorboard log will be stored",
)
parser.add_argument(
"--optim",
type=str,
default="SGD",
help="Optimizer to use (Adam, RMSprop, SGD)",
)
parser.add_argument(
"--lr-schedule", type=str, choices=["constant", "cos"], default="cos"
)
parser.add_argument(
"--device", type=str, default="cuda", help="Device on which to run the code."
)
parser.add_argument(
"--architecture",
type=str,
default="resnet18",
help="model from torchvision to run",
)
parser.add_argument(
"--gn-groups",
type=int,
default=8,
help="Number of groups in GroupNorm",
)
parser.add_argument(
"--clip_per_layer",
action="store_true",
default=False,
help="Use static per-layer clipping with the same clipping threshold for each layer. Necessary for DDP. If `False` (default), uses flat clipping.",
)
parser.add_argument(
"--debug",
type=int,
default=0,
help="debug level (default: 0)",
)
return parser.parse_args()
if __name__ == "__main__":
main()
| pytorch-master | functorch/examples/dp_cifar10/cifar10_opacus.py |
import argparse
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from functorch import make_functional, grad_and_value, vmap, combine_state_for_ensemble
# Adapted from http://willwhitney.com/parallel-training-jax.html , which is a
# tutorial on Model Ensembling with JAX by Will Whitney.
#
# The original code comes with the following citation:
# @misc{Whitney2021Parallelizing,
# author = {William F. Whitney},
# title = { {Parallelizing neural networks on one GPU with JAX} },
# year = {2021},
# url = {http://willwhitney.com/parallel-training-jax.html},
# }
# GOAL: Demonstrate that it is possible to use eager-mode vmap
# to parallelize training over models.
parser = argparse.ArgumentParser(description="Functorch Ensembled Models")
parser.add_argument(
"--device",
type=str,
default="cpu",
help="CPU or GPU ID for this process (default: 'cpu')",
)
args = parser.parse_args()
DEVICE = args.device
# Step 1: Make some spirals
def make_spirals(n_samples, noise_std=0., rotations=1.):
ts = torch.linspace(0, 1, n_samples, device=DEVICE)
rs = ts ** 0.5
thetas = rs * rotations * 2 * math.pi
signs = torch.randint(0, 2, (n_samples,), device=DEVICE) * 2 - 1
labels = (signs > 0).to(torch.long).to(DEVICE)
xs = rs * signs * torch.cos(thetas) + torch.randn(n_samples, device=DEVICE) * noise_std
ys = rs * signs * torch.sin(thetas) + torch.randn(n_samples, device=DEVICE) * noise_std
points = torch.stack([xs, ys], dim=1)
return points, labels
points, labels = make_spirals(100, noise_std=0.05)
# Step 2: Define two-layer MLP and loss function
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.log_softmax(x, -1)
return x
loss_fn = nn.NLLLoss()
# Step 3: Make the model functional(!!) and define a training function.
# NB: this mechanism doesn't exist in PyTorch today, but we want it to:
# https://github.com/pytorch/pytorch/issues/49171
func_model, weights = make_functional(MLPClassifier().to(DEVICE))
def train_step_fn(weights, batch, targets, lr=0.2):
def compute_loss(weights, batch, targets):
output = func_model(weights, batch)
loss = loss_fn(output, targets)
return loss
grad_weights, loss = grad_and_value(compute_loss)(weights, batch, targets)
# NB: PyTorch is missing a "functional optimizer API" (possibly coming soon)
# so we are going to re-implement SGD here.
new_weights = []
with torch.no_grad():
for grad_weight, weight in zip(grad_weights, weights):
new_weights.append(weight - grad_weight * lr)
return loss, new_weights
# Step 4: Let's verify this actually trains.
# We should see the loss decrease.
def step4():
global weights
for i in range(2000):
loss, weights = train_step_fn(weights, points, labels)
if i % 100 == 0:
print(loss)
step4()
# Step 5: We're ready for multiple models. Let's define an init_fn
# that, given a number of models, returns to us all of the weights.
def init_fn(num_models):
models = [MLPClassifier().to(DEVICE) for _ in range(num_models)]
_, params, _ = combine_state_for_ensemble(models)
return params
# Step 6: Now, can we try multiple models at the same time?
# The answer is: yes! `loss` is a 2-tuple, and we can see that the value keeps
# on decreasing
def step6():
parallel_train_step_fn = vmap(train_step_fn, in_dims=(0, None, None))
batched_weights = init_fn(num_models=2)
for i in range(2000):
loss, batched_weights = parallel_train_step_fn(batched_weights, points, labels)
if i % 200 == 0:
print(loss)
step6()
# Step 7: Now, the flaw with step 6 is that we were training on the same exact
# data. This can lead to all of the models in the ensemble overfitting in the
# same way. The solution that http://willwhitney.com/parallel-training-jax.html
# applies is to randomly subset the data in a way that the models do not recieve
# exactly the same data in each training step!
# Because the goal of this doc is to show that we can use eager-mode vmap to
# achieve similar things as JAX, the rest of this is left as an exercise to the reader.
# In conclusion, to achieve what http://willwhitney.com/parallel-training-jax.html
# does, we used the following additional items that PyTorch does not have:
# 1. NN module functional API that turns a module into a (state, state_less_fn) pair
# 2. Functional optimizers
# 3. A "functional" grad API (that effectively wraps autograd.grad)
# 4. Composability between the functional grad API and torch.vmap.
| pytorch-master | functorch/examples/ensembling/parallel_train.py |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example shows how to use higher to do Model Agnostic Meta Learning (MAML)
for few-shot Omniglot classification.
For more details see the original MAML paper:
https://arxiv.org/abs/1703.03400
This code has been modified from Jackie Loong's PyTorch MAML implementation:
https://github.com/dragen1860/MAML-Pytorch/blob/master/omniglot_train.py
Our MAML++ fork and experiments are available at:
https://github.com/bamos/HowToTrainYourMAMLPytorch
"""
from support.omniglot_loaders import OmniglotNShot
from functorch import make_functional_with_buffers
import torch.optim as optim
import torch.nn.functional as F
from torch import nn
import torch
import matplotlib.pyplot as plt
import argparse
import time
import pandas as pd
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
plt.style.use('bmh')
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('--n_way', type=int, help='n way', default=5)
argparser.add_argument(
'--k_spt', type=int, help='k shot for support set', default=5)
argparser.add_argument(
'--k_qry', type=int, help='k shot for query set', default=15)
argparser.add_argument(
'--device', type=str, help='device', default='cuda')
argparser.add_argument(
'--task_num',
type=int,
help='meta batch size, namely task num',
default=32)
argparser.add_argument('--seed', type=int, help='random seed', default=1)
args = argparser.parse_args()
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
# Set up the Omniglot loader.
device = args.device
db = OmniglotNShot(
'/tmp/omniglot-data',
batchsz=args.task_num,
n_way=args.n_way,
k_shot=args.k_spt,
k_query=args.k_qry,
imgsz=28,
device=device,
)
# Create a vanilla PyTorch neural network that will be
# automatically monkey-patched by higher later.
# Before higher, models could *not* be created like this
# and the parameters needed to be manually updated and copied
# for the updates.
net = nn.Sequential(
nn.Conv2d(1, 64, 3),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
Flatten(),
nn.Linear(64, args.n_way)).to(device)
net.train()
fnet, params, buffers = make_functional_with_buffers(net)
# We will use Adam to (meta-)optimize the initial parameters
# to be adapted.
meta_opt = optim.Adam(params, lr=1e-3)
log = []
for epoch in range(100):
train(db, [params, buffers, fnet], device, meta_opt, epoch, log)
test(db, [params, buffers, fnet], device, epoch, log)
plot(log)
def train(db, net, device, meta_opt, epoch, log):
params, buffers, fnet = net
n_train_iter = db.x_train.shape[0] // db.batchsz
for batch_idx in range(n_train_iter):
start_time = time.time()
# Sample a batch of support and query images and labels.
x_spt, y_spt, x_qry, y_qry = db.next()
task_num, setsz, c_, h, w = x_spt.size()
querysz = x_qry.size(1)
# TODO: Maybe pull this out into a separate module so it
# doesn't have to be duplicated between `train` and `test`?
# Initialize the inner optimizer to adapt the parameters to
# the support set.
n_inner_iter = 5
# inner_opt = torch.optim.SGD(net.parameters(), lr=1e-1)
qry_losses = []
qry_accs = []
meta_opt.zero_grad()
for i in range(task_num):
# Optimize the likelihood of the support set by taking
# gradient steps w.r.t. the model's parameters.
# This adapts the model's meta-parameters to the task.
new_params = params
for _ in range(n_inner_iter):
spt_logits = fnet(new_params, buffers, x_spt[i])
spt_loss = F.cross_entropy(spt_logits, y_spt[i])
grads = torch.autograd.grad(spt_loss, new_params, create_graph=True)
new_params = [p - g * 1e-1 for p, g, in zip(new_params, grads)]
# The final set of adapted parameters will induce some
# final loss and accuracy on the query dataset.
# These will be used to update the model's meta-parameters.
qry_logits = fnet(new_params, buffers, x_qry[i])
qry_loss = F.cross_entropy(qry_logits, y_qry[i])
qry_losses.append(qry_loss.detach())
qry_acc = (qry_logits.argmax(
dim=1) == y_qry[i]).sum().item() / querysz
qry_accs.append(qry_acc)
# Update the model's meta-parameters to optimize the query
# losses across all of the tasks sampled in this batch.
# This unrolls through the gradient steps.
qry_loss.backward()
meta_opt.step()
qry_losses = sum(qry_losses) / task_num
qry_accs = 100. * sum(qry_accs) / task_num
i = epoch + float(batch_idx) / n_train_iter
iter_time = time.time() - start_time
if batch_idx % 4 == 0:
print(
f'[Epoch {i:.2f}] Train Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f} | Time: {iter_time:.2f}'
)
log.append({
'epoch': i,
'loss': qry_losses,
'acc': qry_accs,
'mode': 'train',
'time': time.time(),
})
def test(db, net, device, epoch, log):
# Crucially in our testing procedure here, we do *not* fine-tune
# the model during testing for simplicity.
# Most research papers using MAML for this task do an extra
# stage of fine-tuning here that should be added if you are
# adapting this code for research.
[params, buffers, fnet] = net
n_test_iter = db.x_test.shape[0] // db.batchsz
qry_losses = []
qry_accs = []
for batch_idx in range(n_test_iter):
x_spt, y_spt, x_qry, y_qry = db.next('test')
task_num, setsz, c_, h, w = x_spt.size()
# TODO: Maybe pull this out into a separate module so it
# doesn't have to be duplicated between `train` and `test`?
n_inner_iter = 5
for i in range(task_num):
new_params = params
for _ in range(n_inner_iter):
spt_logits = fnet(new_params, buffers, x_spt[i])
spt_loss = F.cross_entropy(spt_logits, y_spt[i])
grads = torch.autograd.grad(spt_loss, new_params)
new_params = [p - g * 1e-1 for p, g, in zip(new_params, grads)]
# The query loss and acc induced by these parameters.
qry_logits = fnet(new_params, buffers, x_qry[i]).detach()
qry_loss = F.cross_entropy(
qry_logits, y_qry[i], reduction='none')
qry_losses.append(qry_loss.detach())
qry_accs.append(
(qry_logits.argmax(dim=1) == y_qry[i]).detach())
qry_losses = torch.cat(qry_losses).mean().item()
qry_accs = 100. * torch.cat(qry_accs).float().mean().item()
print(
f'[Epoch {epoch+1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}'
)
log.append({
'epoch': epoch + 1,
'loss': qry_losses,
'acc': qry_accs,
'mode': 'test',
'time': time.time(),
})
def plot(log):
# Generally you should pull your plotting code out of your training
# script but we are doing it here for brevity.
df = pd.DataFrame(log)
fig, ax = plt.subplots(figsize=(6, 4))
train_df = df[df['mode'] == 'train']
test_df = df[df['mode'] == 'test']
ax.plot(train_df['epoch'], train_df['acc'], label='Train')
ax.plot(test_df['epoch'], test_df['acc'], label='Test')
ax.set_xlabel('Epoch')
ax.set_ylabel('Accuracy')
ax.set_ylim(70, 100)
fig.legend(ncol=2, loc='lower right')
fig.tight_layout()
fname = 'maml-accs.png'
print(f'--- Plotting accuracy to {fname}')
fig.savefig(fname)
plt.close(fig)
# Won't need this after this PR is merged in:
# https://github.com/pytorch/pytorch/pull/22245
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
if __name__ == '__main__':
main()
| pytorch-master | functorch/examples/maml_omniglot/maml-omniglot-ptonly.py |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example shows how to use higher to do Model Agnostic Meta Learning (MAML)
for few-shot Omniglot classification.
For more details see the original MAML paper:
https://arxiv.org/abs/1703.03400
This code has been modified from Jackie Loong's PyTorch MAML implementation:
https://github.com/dragen1860/MAML-Pytorch/blob/master/omniglot_train.py
Our MAML++ fork and experiments are available at:
https://github.com/bamos/HowToTrainYourMAMLPytorch
"""
from support.omniglot_loaders import OmniglotNShot
import higher
import torch.optim as optim
import torch.nn.functional as F
from torch import nn
import torch
import matplotlib.pyplot as plt
import argparse
import time
import pandas as pd
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
plt.style.use('bmh')
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('--n_way', type=int, help='n way', default=5)
argparser.add_argument(
'--k_spt', type=int, help='k shot for support set', default=5)
argparser.add_argument(
'--k_qry', type=int, help='k shot for query set', default=15)
argparser.add_argument(
'--device', type=str, help='device', default='cuda')
argparser.add_argument(
'--task_num',
type=int,
help='meta batch size, namely task num',
default=32)
argparser.add_argument('--seed', type=int, help='random seed', default=1)
args = argparser.parse_args()
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
# Set up the Omniglot loader.
device = args.device
db = OmniglotNShot(
'/tmp/omniglot-data',
batchsz=args.task_num,
n_way=args.n_way,
k_shot=args.k_spt,
k_query=args.k_qry,
imgsz=28,
device=device,
)
# Create a vanilla PyTorch neural network that will be
# automatically monkey-patched by higher later.
# Before higher, models could *not* be created like this
# and the parameters needed to be manually updated and copied
# for the updates.
net = nn.Sequential(
nn.Conv2d(1, 64, 3),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
Flatten(),
nn.Linear(64, args.n_way)).to(device)
# We will use Adam to (meta-)optimize the initial parameters
# to be adapted.
meta_opt = optim.Adam(net.parameters(), lr=1e-3)
log = []
for epoch in range(100):
train(db, net, device, meta_opt, epoch, log)
test(db, net, device, epoch, log)
plot(log)
def train(db, net, device, meta_opt, epoch, log):
net.train()
n_train_iter = db.x_train.shape[0] // db.batchsz
for batch_idx in range(n_train_iter):
start_time = time.time()
# Sample a batch of support and query images and labels.
x_spt, y_spt, x_qry, y_qry = db.next()
task_num, setsz, c_, h, w = x_spt.size()
querysz = x_qry.size(1)
# TODO: Maybe pull this out into a separate module so it
# doesn't have to be duplicated between `train` and `test`?
# Initialize the inner optimizer to adapt the parameters to
# the support set.
n_inner_iter = 5
inner_opt = torch.optim.SGD(net.parameters(), lr=1e-1)
qry_losses = []
qry_accs = []
meta_opt.zero_grad()
for i in range(task_num):
with higher.innerloop_ctx(
net, inner_opt, copy_initial_weights=False
) as (fnet, diffopt):
# Optimize the likelihood of the support set by taking
# gradient steps w.r.t. the model's parameters.
# This adapts the model's meta-parameters to the task.
# higher is able to automatically keep copies of
# your network's parameters as they are being updated.
for _ in range(n_inner_iter):
spt_logits = fnet(x_spt[i])
spt_loss = F.cross_entropy(spt_logits, y_spt[i])
diffopt.step(spt_loss)
# The final set of adapted parameters will induce some
# final loss and accuracy on the query dataset.
# These will be used to update the model's meta-parameters.
qry_logits = fnet(x_qry[i])
qry_loss = F.cross_entropy(qry_logits, y_qry[i])
qry_losses.append(qry_loss.detach())
qry_acc = (qry_logits.argmax(
dim=1) == y_qry[i]).sum().item() / querysz
qry_accs.append(qry_acc)
# print([b.shape for b in fnet[1].buffers()])
# Update the model's meta-parameters to optimize the query
# losses across all of the tasks sampled in this batch.
# This unrolls through the gradient steps.
qry_loss.backward()
meta_opt.step()
qry_losses = sum(qry_losses) / task_num
qry_accs = 100. * sum(qry_accs) / task_num
i = epoch + float(batch_idx) / n_train_iter
iter_time = time.time() - start_time
if batch_idx % 4 == 0:
print(
f'[Epoch {i:.2f}] Train Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f} | Time: {iter_time:.2f}'
)
log.append({
'epoch': i,
'loss': qry_losses,
'acc': qry_accs,
'mode': 'train',
'time': time.time(),
})
def test(db, net, device, epoch, log):
# Crucially in our testing procedure here, we do *not* fine-tune
# the model during testing for simplicity.
# Most research papers using MAML for this task do an extra
# stage of fine-tuning here that should be added if you are
# adapting this code for research.
net.train()
n_test_iter = db.x_test.shape[0] // db.batchsz
qry_losses = []
qry_accs = []
for _ in range(n_test_iter):
x_spt, y_spt, x_qry, y_qry = db.next('test')
task_num, setsz, c_, h, w = x_spt.size()
# TODO: Maybe pull this out into a separate module so it
# doesn't have to be duplicated between `train` and `test`?
n_inner_iter = 5
inner_opt = torch.optim.SGD(net.parameters(), lr=1e-1)
for i in range(task_num):
with higher.innerloop_ctx(net, inner_opt, track_higher_grads=False) as (fnet, diffopt):
# Optimize the likelihood of the support set by taking
# gradient steps w.r.t. the model's parameters.
# This adapts the model's meta-parameters to the task.
for _ in range(n_inner_iter):
spt_logits = fnet(x_spt[i])
spt_loss = F.cross_entropy(spt_logits, y_spt[i])
diffopt.step(spt_loss)
# The query loss and acc induced by these parameters.
qry_logits = fnet(x_qry[i]).detach()
qry_loss = F.cross_entropy(
qry_logits, y_qry[i], reduction='none')
qry_losses.append(qry_loss.detach())
qry_accs.append(
(qry_logits.argmax(dim=1) == y_qry[i]).detach())
qry_losses = torch.cat(qry_losses).mean().item()
qry_accs = 100. * torch.cat(qry_accs).float().mean().item()
print(
f'[Epoch {epoch+1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}'
)
log.append({
'epoch': epoch + 1,
'loss': qry_losses,
'acc': qry_accs,
'mode': 'test',
'time': time.time(),
})
def plot(log):
# Generally you should pull your plotting code out of your training
# script but we are doing it here for brevity.
df = pd.DataFrame(log)
fig, ax = plt.subplots(figsize=(6, 4))
train_df = df[df['mode'] == 'train']
test_df = df[df['mode'] == 'test']
ax.plot(train_df['epoch'], train_df['acc'], label='Train')
ax.plot(test_df['epoch'], test_df['acc'], label='Test')
ax.set_xlabel('Epoch')
ax.set_ylabel('Accuracy')
ax.set_ylim(70, 100)
fig.legend(ncol=2, loc='lower right')
fig.tight_layout()
fname = 'maml-accs.png'
print(f'--- Plotting accuracy to {fname}')
fig.savefig(fname)
plt.close(fig)
# Won't need this after this PR is merged in:
# https://github.com/pytorch/pytorch/pull/22245
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
if __name__ == '__main__':
main()
| pytorch-master | functorch/examples/maml_omniglot/maml-omniglot-higher.py |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example shows how to use higher to do Model Agnostic Meta Learning (MAML)
for few-shot Omniglot classification.
For more details see the original MAML paper:
https://arxiv.org/abs/1703.03400
This code has been modified from Jackie Loong's PyTorch MAML implementation:
https://github.com/dragen1860/MAML-Pytorch/blob/master/omniglot_train.py
Our MAML++ fork and experiments are available at:
https://github.com/bamos/HowToTrainYourMAMLPytorch
"""
from support.omniglot_loaders import OmniglotNShot
from functorch import make_functional_with_buffers, vmap, grad
import functorch
import torch.optim as optim
import torch.nn.functional as F
from torch import nn
import torch
import matplotlib.pyplot as plt
import argparse
import time
import functools
import pandas as pd
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
plt.style.use('bmh')
# Squash the warning spam
functorch._C._set_vmap_fallback_warning_enabled(False)
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('--n_way', type=int, help='n way', default=5)
argparser.add_argument(
'--k_spt', type=int, help='k shot for support set', default=5)
argparser.add_argument(
'--k_qry', type=int, help='k shot for query set', default=15)
argparser.add_argument(
'--device', type=str, help='device', default='cuda')
argparser.add_argument(
'--task_num',
type=int,
help='meta batch size, namely task num',
default=32)
argparser.add_argument('--seed', type=int, help='random seed', default=1)
args = argparser.parse_args()
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
# Set up the Omniglot loader.
device = args.device
db = OmniglotNShot(
'/tmp/omniglot-data',
batchsz=args.task_num,
n_way=args.n_way,
k_shot=args.k_spt,
k_query=args.k_qry,
imgsz=28,
device=device,
)
# Create a vanilla PyTorch neural network.
inplace_relu = True
net = nn.Sequential(
nn.Conv2d(1, 64, 3),
nn.BatchNorm2d(64, affine=True, track_running_stats=False),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, affine=True, track_running_stats=False),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, affine=True, track_running_stats=False),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(64, args.n_way)).to(device)
net.train()
# Given this module we've created, rip out the parameters and buffers
# and return a functional version of the module. `fnet` is stateless
# and can be called with `fnet(params, buffers, args, kwargs)`
fnet, params, buffers = make_functional_with_buffers(net)
# We will use Adam to (meta-)optimize the initial parameters
# to be adapted.
meta_opt = optim.Adam(params, lr=1e-3)
log = []
for epoch in range(100):
train(db, [params, buffers, fnet], device, meta_opt, epoch, log)
test(db, [params, buffers, fnet], device, epoch, log)
plot(log)
# Trains a model for n_inner_iter using the support and returns a loss
# using the query.
def loss_for_task(net, n_inner_iter, x_spt, y_spt, x_qry, y_qry):
params, buffers, fnet = net
querysz = x_qry.size(0)
def compute_loss(new_params, buffers, x, y):
logits = fnet(new_params, buffers, x)
loss = F.cross_entropy(logits, y)
return loss
new_params = params
for _ in range(n_inner_iter):
grads = grad(compute_loss)(new_params, buffers, x_spt, y_spt)
new_params = [p - g * 1e-1 for p, g, in zip(new_params, grads)]
# The final set of adapted parameters will induce some
# final loss and accuracy on the query dataset.
# These will be used to update the model's meta-parameters.
qry_logits = fnet(new_params, buffers, x_qry)
qry_loss = F.cross_entropy(qry_logits, y_qry)
qry_acc = (qry_logits.argmax(
dim=1) == y_qry).sum() / querysz
return qry_loss, qry_acc
def train(db, net, device, meta_opt, epoch, log):
params, buffers, fnet = net
n_train_iter = db.x_train.shape[0] // db.batchsz
for batch_idx in range(n_train_iter):
start_time = time.time()
# Sample a batch of support and query images and labels.
x_spt, y_spt, x_qry, y_qry = db.next()
task_num, setsz, c_, h, w = x_spt.size()
n_inner_iter = 5
meta_opt.zero_grad()
# In parallel, trains one model per task. There is a support (x, y)
# for each task and a query (x, y) for each task.
compute_loss_for_task = functools.partial(loss_for_task, net, n_inner_iter)
qry_losses, qry_accs = vmap(compute_loss_for_task)(x_spt, y_spt, x_qry, y_qry)
# Compute the maml loss by summing together the returned losses.
qry_losses.sum().backward()
meta_opt.step()
qry_losses = qry_losses.detach().sum() / task_num
qry_accs = 100. * qry_accs.sum() / task_num
i = epoch + float(batch_idx) / n_train_iter
iter_time = time.time() - start_time
if batch_idx % 4 == 0:
print(
f'[Epoch {i:.2f}] Train Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f} | Time: {iter_time:.2f}'
)
log.append({
'epoch': i,
'loss': qry_losses,
'acc': qry_accs,
'mode': 'train',
'time': time.time(),
})
def test(db, net, device, epoch, log):
# Crucially in our testing procedure here, we do *not* fine-tune
# the model during testing for simplicity.
# Most research papers using MAML for this task do an extra
# stage of fine-tuning here that should be added if you are
# adapting this code for research.
[params, buffers, fnet] = net
n_test_iter = db.x_test.shape[0] // db.batchsz
qry_losses = []
qry_accs = []
for batch_idx in range(n_test_iter):
x_spt, y_spt, x_qry, y_qry = db.next('test')
task_num, setsz, c_, h, w = x_spt.size()
# TODO: Maybe pull this out into a separate module so it
# doesn't have to be duplicated between `train` and `test`?
n_inner_iter = 5
for i in range(task_num):
new_params = params
for _ in range(n_inner_iter):
spt_logits = fnet(new_params, buffers, x_spt[i])
spt_loss = F.cross_entropy(spt_logits, y_spt[i])
grads = torch.autograd.grad(spt_loss, new_params)
new_params = [p - g * 1e-1 for p, g, in zip(new_params, grads)]
# The query loss and acc induced by these parameters.
qry_logits = fnet(new_params, buffers, x_qry[i]).detach()
qry_loss = F.cross_entropy(
qry_logits, y_qry[i], reduction='none')
qry_losses.append(qry_loss.detach())
qry_accs.append(
(qry_logits.argmax(dim=1) == y_qry[i]).detach())
qry_losses = torch.cat(qry_losses).mean().item()
qry_accs = 100. * torch.cat(qry_accs).float().mean().item()
print(
f'[Epoch {epoch+1:.2f}] Test Loss: {qry_losses:.2f} | Acc: {qry_accs:.2f}'
)
log.append({
'epoch': epoch + 1,
'loss': qry_losses,
'acc': qry_accs,
'mode': 'test',
'time': time.time(),
})
def plot(log):
# Generally you should pull your plotting code out of your training
# script but we are doing it here for brevity.
df = pd.DataFrame(log)
fig, ax = plt.subplots(figsize=(6, 4))
train_df = df[df['mode'] == 'train']
test_df = df[df['mode'] == 'test']
ax.plot(train_df['epoch'], train_df['acc'], label='Train')
ax.plot(test_df['epoch'], test_df['acc'], label='Test')
ax.set_xlabel('Epoch')
ax.set_ylabel('Accuracy')
ax.set_ylim(70, 100)
fig.legend(ncol=2, loc='lower right')
fig.tight_layout()
fname = 'maml-accs.png'
print(f'--- Plotting accuracy to {fname}')
fig.savefig(fname)
plt.close(fig)
if __name__ == '__main__':
main()
| pytorch-master | functorch/examples/maml_omniglot/maml-omniglot-transforms.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# These Omniglot loaders are from Jackie Loong's PyTorch MAML implementation:
# https://github.com/dragen1860/MAML-Pytorch
# https://github.com/dragen1860/MAML-Pytorch/blob/master/omniglot.py
# https://github.com/dragen1860/MAML-Pytorch/blob/master/omniglotNShot.py
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
import torch
import torch.utils.data as data
import os
import os.path
import errno
class Omniglot(data.Dataset):
urls = [
'https://github.com/brendenlake/omniglot/raw/master/python/images_background.zip',
'https://github.com/brendenlake/omniglot/raw/master/python/images_evaluation.zip'
]
raw_folder = 'raw'
processed_folder = 'processed'
training_file = 'training.pt'
test_file = 'test.pt'
'''
The items are (filename,category). The index of all the categories can be found in self.idx_classes
Args:
- root: the directory where the dataset will be stored
- transform: how to transform the input
- target_transform: how to transform the target
- download: need to download the dataset
'''
def __init__(self, root, transform=None, target_transform=None,
download=False):
self.root = root
self.transform = transform
self.target_transform = target_transform
if not self._check_exists():
if download:
self.download()
else:
raise RuntimeError('Dataset not found.' + ' You can use download=True to download it')
self.all_items = find_classes(os.path.join(self.root, self.processed_folder))
self.idx_classes = index_classes(self.all_items)
def __getitem__(self, index):
filename = self.all_items[index][0]
img = str.join('/', [self.all_items[index][2], filename])
target = self.idx_classes[self.all_items[index][1]]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.all_items)
def _check_exists(self):
return os.path.exists(os.path.join(self.root, self.processed_folder, "images_evaluation")) and \
os.path.exists(os.path.join(self.root, self.processed_folder, "images_background"))
def download(self):
from six.moves import urllib
import zipfile
if self._check_exists():
return
# download files
try:
os.makedirs(os.path.join(self.root, self.raw_folder))
os.makedirs(os.path.join(self.root, self.processed_folder))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
for url in self.urls:
print('== Downloading ' + url)
data = urllib.request.urlopen(url)
filename = url.rpartition('/')[2]
file_path = os.path.join(self.root, self.raw_folder, filename)
with open(file_path, 'wb') as f:
f.write(data.read())
file_processed = os.path.join(self.root, self.processed_folder)
print("== Unzip from " + file_path + " to " + file_processed)
zip_ref = zipfile.ZipFile(file_path, 'r')
zip_ref.extractall(file_processed)
zip_ref.close()
print("Download finished.")
def find_classes(root_dir):
retour = []
for (root, dirs, files) in os.walk(root_dir):
for f in files:
if (f.endswith("png")):
r = root.split('/')
lr = len(r)
retour.append((f, r[lr - 2] + "/" + r[lr - 1], root))
print("== Found %d items " % len(retour))
return retour
def index_classes(items):
idx = {}
for i in items:
if i[1] not in idx:
idx[i[1]] = len(idx)
print("== Found %d classes" % len(idx))
return idx
class OmniglotNShot:
def __init__(self, root, batchsz, n_way, k_shot, k_query, imgsz, device=None):
"""
Different from mnistNShot, the
:param root:
:param batchsz: task num
:param n_way:
:param k_shot:
:param k_qry:
:param imgsz:
"""
self.resize = imgsz
self.device = device
if not os.path.isfile(os.path.join(root, 'omniglot.npy')):
# if root/data.npy does not exist, just download it
self.x = Omniglot(
root, download=True,
transform=transforms.Compose(
[lambda x: Image.open(x).convert('L'),
lambda x: x.resize((imgsz, imgsz)),
lambda x: np.reshape(x, (imgsz, imgsz, 1)),
lambda x: np.transpose(x, [2, 0, 1]),
lambda x: x / 255.]),
)
temp = dict() # {label:img1, img2..., 20 imgs, label2: img1, img2,... in total, 1623 label}
for (img, label) in self.x:
if label in temp.keys():
temp[label].append(img)
else:
temp[label] = [img]
self.x = []
for label, imgs in temp.items(): # labels info deserted , each label contains 20imgs
self.x.append(np.array(imgs))
# as different class may have different number of imgs
self.x = np.array(self.x).astype(np.float) # [[20 imgs],..., 1623 classes in total]
# each character contains 20 imgs
print('data shape:', self.x.shape) # [1623, 20, 84, 84, 1]
temp = [] # Free memory
# save all dataset into npy file.
np.save(os.path.join(root, 'omniglot.npy'), self.x)
print('write into omniglot.npy.')
else:
# if data.npy exists, just load it.
self.x = np.load(os.path.join(root, 'omniglot.npy'))
print('load from omniglot.npy.')
# [1623, 20, 84, 84, 1]
# TODO: can not shuffle here, we must keep training and test set distinct!
self.x_train, self.x_test = self.x[:1200], self.x[1200:]
# self.normalization()
self.batchsz = batchsz
self.n_cls = self.x.shape[0] # 1623
self.n_way = n_way # n way
self.k_shot = k_shot # k shot
self.k_query = k_query # k query
assert (k_shot + k_query) <= 20
# save pointer of current read batch in total cache
self.indexes = {"train": 0, "test": 0}
self.datasets = {"train": self.x_train, "test": self.x_test} # original data cached
print("DB: train", self.x_train.shape, "test", self.x_test.shape)
self.datasets_cache = {"train": self.load_data_cache(self.datasets["train"]), # current epoch data cached
"test": self.load_data_cache(self.datasets["test"])}
def normalization(self):
"""
Normalizes our data, to have a mean of 0 and sdt of 1
"""
self.mean = np.mean(self.x_train)
self.std = np.std(self.x_train)
self.max = np.max(self.x_train)
self.min = np.min(self.x_train)
# print("before norm:", "mean", self.mean, "max", self.max, "min", self.min, "std", self.std)
self.x_train = (self.x_train - self.mean) / self.std
self.x_test = (self.x_test - self.mean) / self.std
self.mean = np.mean(self.x_train)
self.std = np.std(self.x_train)
self.max = np.max(self.x_train)
self.min = np.min(self.x_train)
# print("after norm:", "mean", self.mean, "max", self.max, "min", self.min, "std", self.std)
def load_data_cache(self, data_pack):
"""
Collects several batches data for N-shot learning
:param data_pack: [cls_num, 20, 84, 84, 1]
:return: A list with [support_set_x, support_set_y, target_x, target_y] ready to be fed to our networks
"""
# take 5 way 1 shot as example: 5 * 1
setsz = self.k_shot * self.n_way
querysz = self.k_query * self.n_way
data_cache = []
# print('preload next 50 caches of batchsz of batch.')
for sample in range(10): # num of episodes
x_spts, y_spts, x_qrys, y_qrys = [], [], [], []
for i in range(self.batchsz): # one batch means one set
x_spt, y_spt, x_qry, y_qry = [], [], [], []
selected_cls = np.random.choice(data_pack.shape[0], self.n_way, False)
for j, cur_class in enumerate(selected_cls):
selected_img = np.random.choice(20, self.k_shot + self.k_query, False)
# meta-training and meta-test
x_spt.append(data_pack[cur_class][selected_img[:self.k_shot]])
x_qry.append(data_pack[cur_class][selected_img[self.k_shot:]])
y_spt.append([j for _ in range(self.k_shot)])
y_qry.append([j for _ in range(self.k_query)])
# shuffle inside a batch
perm = np.random.permutation(self.n_way * self.k_shot)
x_spt = np.array(x_spt).reshape(self.n_way * self.k_shot, 1, self.resize, self.resize)[perm]
y_spt = np.array(y_spt).reshape(self.n_way * self.k_shot)[perm]
perm = np.random.permutation(self.n_way * self.k_query)
x_qry = np.array(x_qry).reshape(self.n_way * self.k_query, 1, self.resize, self.resize)[perm]
y_qry = np.array(y_qry).reshape(self.n_way * self.k_query)[perm]
# append [sptsz, 1, 84, 84] => [b, setsz, 1, 84, 84]
x_spts.append(x_spt)
y_spts.append(y_spt)
x_qrys.append(x_qry)
y_qrys.append(y_qry)
# [b, setsz, 1, 84, 84]
x_spts = np.array(x_spts).astype(np.float32).reshape(self.batchsz, setsz, 1, self.resize, self.resize)
y_spts = np.array(y_spts).astype(np.int).reshape(self.batchsz, setsz)
# [b, qrysz, 1, 84, 84]
x_qrys = np.array(x_qrys).astype(np.float32).reshape(self.batchsz, querysz, 1, self.resize, self.resize)
y_qrys = np.array(y_qrys).astype(np.int).reshape(self.batchsz, querysz)
x_spts, y_spts, x_qrys, y_qrys = [
torch.from_numpy(z).to(self.device) for z in
[x_spts, y_spts, x_qrys, y_qrys]
]
data_cache.append([x_spts, y_spts, x_qrys, y_qrys])
return data_cache
def next(self, mode='train'):
"""
Gets next batch from the dataset with name.
:param mode: The name of the splitting (one of "train", "val", "test")
:return:
"""
# update cache if indexes is larger cached num
if self.indexes[mode] >= len(self.datasets_cache[mode]):
self.indexes[mode] = 0
self.datasets_cache[mode] = self.load_data_cache(self.datasets[mode])
next_batch = self.datasets_cache[mode][self.indexes[mode]]
self.indexes[mode] += 1
return next_batch
| pytorch-master | functorch/examples/maml_omniglot/support/omniglot_loaders.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from functorch import make_functional
from functorch.compile import nnc_jit
import torch
import torch.nn as nn
import time
torch._C._jit_override_can_fuse_on_cpu(True)
def bench(f, iters=100, warmup=10):
for _ in range(warmup):
f()
begin = time.time()
for _ in range(iters):
f()
print((time.time() - begin))
class Foo(nn.Module):
def __init__(self, num_layers=3, features=100):
super().__init__()
mods = []
for _ in range(num_layers):
mods.append(nn.Linear(features, features, bias=False))
self.mod = nn.Sequential(*mods)
def forward(self, x):
return (self.mod(x)**2).sum()
batch_size = 16
features = 64
num_layers = 8
inp = torch.randn((batch_size, features))
mod = Foo(num_layers, features)
jit_mod = torch.jit.script(mod)
func_model, weights = make_functional(mod)
lr = 1.0
def functional_step(x, weights):
weights = [weight.detach().requires_grad_() for weight in weights]
out = func_model(weights, x)
out.backward()
new_weights = [weight - lr * weight.grad for weight in weights]
return out, new_weights
optim = torch.optim.SGD(jit_mod.parameters(), lr=lr, momentum=0, dampening=0, weight_decay=0)
def jit_step(x, weights):
optim.zero_grad()
loss = jit_mod(x)
loss.backward()
optim.step()
return loss, None
def train(train_step, weights):
torch.manual_seed(16)
train_step(inp, weights)
begin = time.time()
for itr in range(1000):
loss, weights = train_step(torch.randn(batch_size, features), weights)
if itr % 200 == 0:
print(f"Loss at {itr}: {loss}")
print("Time taken: ", time.time() - begin)
print()
grad_pt = functional_step
grad_nnc = nnc_jit(functional_step)
print("Starting PT training")
train(grad_pt, weights)
print("Starting NNC training")
train(grad_nnc, weights)
print("Starting JIT training")
train(jit_step, None)
| pytorch-master | functorch/examples/compilation/linear_train.py |
import timeit
from functorch.compile import compiled_module, tvm_compile
import torch.nn as nn
import torch
def nop(f, _):
return f
fw_compiler = tvm_compile(target='llvm', tuning_logfile='fw_keops')
bw_compiler = tvm_compile(target='llvm', tuning_logfile='bw_keops')
fw_compiler = nop
bw_compiler = nop
def run(mod, input):
out = mod(input)
out.sum().backward()
grads = [p.grad for p in mod.parameters()]
return (out, *grads)
class Foo(nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.param = nn.Parameter(torch.randn(1))
self.register_buffer("buf", torch.randn(1))
def forward(self, x):
return (self.param * x + self.buf).sum(dim=0)
input = torch.randn(1)
mod = Foo()
compiled_mod = compiled_module(mod, fw_compiler, bw_compiler)
for a, b in zip(run(mod, input), run(compiled_mod, input)):
torch.testing.assert_allclose(a, b)
out = mod(input)
out.sum().backward()
mod.param.data -= mod.param.grad
compiled_mod.orig_module.param.data -= compiled_mod.orig_module.param.grad
compiled_mod.orig_module.param.grad = None
for a, b in zip(run(mod, input), run(compiled_mod, input)):
torch.testing.assert_allclose(a, b)
for _ in range(5):
i = 10000
t = timeit.Timer("mod(input)", globals=globals()).timeit(10000)
print(f"eager {t/i*1e6}")
t = timeit.Timer("compiled_mod(input)", globals=globals()).timeit(10000)
print(f"compiled {t/i*1e6}")
| pytorch-master | functorch/examples/compilation/fuse_module.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from functorch import grad, make_fx
from functorch.compile import nnc_jit
import torch
import time
def f(x):
return torch.sin(x).sum()
inp = torch.randn(100)
grad_pt = grad(f)
grad_fx = make_fx(grad_pt)(inp)
grad_nnc = nnc_jit(grad_pt)
def bench(name, f, iters=10000, warmup=3):
for _ in range(warmup):
f()
begin = time.time()
for _ in range(iters):
f()
print(f"{name}: ", time.time() - begin)
bench("Pytorch: ", lambda: grad_pt(inp))
bench("FX: ", lambda: grad_fx(inp))
bench("NNC: ", lambda: grad_nnc(inp))
| pytorch-master | functorch/examples/compilation/simple_function.py |
from functorch.compile import aot_function, tvm_compile
import torch
import time
import torch.utils
a = torch.randn(2000, 1, 4, requires_grad=True)
b = torch.randn(1, 2000, 4)
def f(a):
return (a * b).sum(dim=0)
fw_compiler = tvm_compile(target='llvm', tuning_logfile='fw_keops')
bw_compiler = tvm_compile(target='llvm', tuning_logfile='bw_keops')
compiled_f = aot_function(f, fw_compiler, bw_compiler)
# fw_compiler = lambda x, _: x
# bw_compiler = lambda x, _: x
iters = 10
out = compiled_f(a)
out.sum().backward()
def bench(func):
begin = time.time()
for _ in range(iters):
out = func(a).sin()
out.sum().backward()
a.grad = None
print(time.time() - begin)
def bench_jax():
import jax.numpy as jnp
import jax
jax_a = jnp.array(a.detach().numpy())
jax_b = jnp.array(b.detach().numpy())
def f(a):
return jnp.sin((a * jax_b).sum(axis=[0])).sum()
jit_f = jax.jit(jax.grad(f))
jit_f(jax_a)
begin = time.time()
for _ in range(iters):
out = jit_f(jax_a)
out.block_until_ready()
print(time.time() - begin)
# for
bench(f)
bench(compiled_f)
# bench_jax()
| pytorch-master | functorch/examples/compilation/eager_fusion.py |
# Eric Jang originally wrote an implementation of MAML in JAX
# (https://github.com/ericjang/maml-jax).
# We translated his implementation from JAX to PyTorch.
from functorch import grad, vmap, make_functional
import matplotlib.pyplot as plt
import math
import torch
import numpy as np
from torch import nn
from torch.nn import functional as F
import matplotlib as mpl
mpl.use('Agg')
class ThreeLayerNet(nn.Module):
def __init__(self):
super(ThreeLayerNet, self).__init__()
self.fc1 = nn.Linear(1, 40)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(40, 40)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(40, 1)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
x = self.relu2(x)
x = self.fc3(x)
return x
# TODO: Use F.mse_loss
def mse_loss(x, y):
return torch.mean((x - y) ** 2)
net, params = make_functional(ThreeLayerNet())
opt = torch.optim.Adam(params, lr=1e-3)
alpha = 0.1
K = 20
losses = []
num_tasks = 4
def sample_tasks(outer_batch_size, inner_batch_size):
# Select amplitude and phase for the task
As = []
phases = []
for _ in range(outer_batch_size):
As.append(np.random.uniform(low=0.1, high=.5))
phases.append(np.random.uniform(low=0., high=np.pi))
def get_batch():
xs, ys = [], []
for A, phase in zip(As, phases):
x = np.random.uniform(low=-5., high=5., size=(inner_batch_size, 1))
y = A * np.sin(x + phase)
xs.append(x)
ys.append(y)
return torch.tensor(xs, dtype=torch.float), torch.tensor(ys, dtype=torch.float)
x1, y1 = get_batch()
x2, y2 = get_batch()
return x1, y1, x2, y2
for it in range(20000):
loss2 = 0.0
opt.zero_grad()
def get_loss_for_task(x1, y1, x2, y2):
def inner_loss(params, x1, y1):
f = net(params, x1)
loss = mse_loss(f, y1)
return loss
grads = grad(inner_loss)(params, x1, y1)
new_params = [(params[i] - alpha * grads[i]) for i in range(len(params))]
v_f = net(new_params, x2)
return mse_loss(v_f, y2)
task = sample_tasks(num_tasks, K)
inner_losses = vmap(get_loss_for_task)(task[0], task[1], task[2], task[3])
loss2 = sum(inner_losses) / len(inner_losses)
loss2.backward()
opt.step()
if it % 100 == 0:
print('Iteration %d -- Outer Loss: %.4f' % (it, loss2))
losses.append(loss2.detach())
t_A = torch.tensor(0.0).uniform_(0.1, 0.5)
t_b = torch.tensor(0.0).uniform_(0.0, math.pi)
t_x = torch.empty(4, 1).uniform_(-5, 5)
t_y = t_A * torch.sin(t_x + t_b)
opt.zero_grad()
t_params = params
for k in range(5):
t_f = net(t_params, t_x)
t_loss = F.l1_loss(t_f, t_y)
grads = torch.autograd.grad(t_loss, t_params, create_graph=True)
t_params = [(t_params[i] - alpha * grads[i]) for i in range(len(params))]
test_x = torch.arange(-2 * math.pi, 2 * math.pi, step=0.01).unsqueeze(1)
test_y = t_A * torch.sin(test_x + t_b)
test_f = net(t_params, test_x)
plt.plot(test_x.data.numpy(), test_y.data.numpy(), label='sin(x)')
plt.plot(test_x.data.numpy(), test_f.data.numpy(), label='net(x)')
plt.plot(t_x.data.numpy(), t_y.data.numpy(), 'o', label='Examples')
plt.legend()
plt.savefig('maml-sine.png')
plt.figure()
plt.plot(np.convolve(losses, [.05] * 20))
plt.savefig('losses.png')
| pytorch-master | functorch/examples/maml_regression/evjang_transforms_module.py |
# Eric Jang originally wrote an implementation of MAML in JAX
# (https://github.com/ericjang/maml-jax).
# We translated his implementation from JAX to PyTorch.
import matplotlib.pyplot as plt
import math
import torch
import numpy as np
from torch.nn import functional as F
import matplotlib as mpl
mpl.use('Agg')
def net(x, params):
x = F.linear(x, params[0], params[1])
x = F.relu(x)
x = F.linear(x, params[2], params[3])
x = F.relu(x)
x = F.linear(x, params[4], params[5])
return x
params = [
torch.Tensor(40, 1).uniform_(-1., 1.).requires_grad_(),
torch.Tensor(40).zero_().requires_grad_(),
torch.Tensor(40, 40).uniform_(-1. / math.sqrt(40), 1. / math.sqrt(40)).requires_grad_(),
torch.Tensor(40).zero_().requires_grad_(),
torch.Tensor(1, 40).uniform_(-1. / math.sqrt(40), 1. / math.sqrt(40)).requires_grad_(),
torch.Tensor(1).zero_().requires_grad_(),
]
opt = torch.optim.Adam(params, lr=1e-3)
alpha = 0.1
K = 20
losses = []
num_tasks = 4
def sample_tasks(outer_batch_size, inner_batch_size):
# Select amplitude and phase for the task
As = []
phases = []
for _ in range(outer_batch_size):
As.append(np.random.uniform(low=0.1, high=.5))
phases.append(np.random.uniform(low=0., high=np.pi))
def get_batch():
xs, ys = [], []
for A, phase in zip(As, phases):
x = np.random.uniform(low=-5., high=5., size=(inner_batch_size, 1))
y = A * np.sin(x + phase)
xs.append(x)
ys.append(y)
return torch.tensor(xs, dtype=torch.float), torch.tensor(ys, dtype=torch.float)
x1, y1 = get_batch()
x2, y2 = get_batch()
return x1, y1, x2, y2
for it in range(20000):
loss2 = 0.0
opt.zero_grad()
def get_loss_for_task(x1, y1, x2, y2):
f = net(x1, params)
loss = F.mse_loss(f, y1)
# create_graph=True because computing grads here is part of the forward pass.
# We want to differentiate through the SGD update steps and get higher order
# derivatives in the backward pass.
grads = torch.autograd.grad(loss, params, create_graph=True)
new_params = [(params[i] - alpha * grads[i]) for i in range(len(params))]
v_f = net(x2, new_params)
return F.mse_loss(v_f, y2)
task = sample_tasks(num_tasks, K)
inner_losses = [get_loss_for_task(task[0][i], task[1][i], task[2][i], task[3][i]) for i in range(num_tasks)]
loss2 = sum(inner_losses) / len(inner_losses)
loss2.backward()
opt.step()
if it % 100 == 0:
print('Iteration %d -- Outer Loss: %.4f' % (it, loss2))
losses.append(loss2.detach())
t_A = torch.tensor(0.0).uniform_(0.1, 0.5)
t_b = torch.tensor(0.0).uniform_(0.0, math.pi)
t_x = torch.empty(4, 1).uniform_(-5, 5)
t_y = t_A * torch.sin(t_x + t_b)
opt.zero_grad()
t_params = params
for k in range(5):
t_f = net(t_x, t_params)
t_loss = F.l1_loss(t_f, t_y)
grads = torch.autograd.grad(t_loss, t_params, create_graph=True)
t_params = [(t_params[i] - alpha * grads[i]) for i in range(len(params))]
test_x = torch.arange(-2 * math.pi, 2 * math.pi, step=0.01).unsqueeze(1)
test_y = t_A * torch.sin(test_x + t_b)
test_f = net(test_x, t_params)
plt.plot(test_x.data.numpy(), test_y.data.numpy(), label='sin(x)')
plt.plot(test_x.data.numpy(), test_f.data.numpy(), label='net(x)')
plt.plot(t_x.data.numpy(), t_y.data.numpy(), 'o', label='Examples')
plt.legend()
plt.savefig('maml-sine.png')
plt.figure()
plt.plot(np.convolve(losses, [.05] * 20))
plt.savefig('losses.png')
| pytorch-master | functorch/examples/maml_regression/evjang.py |
# Eric Jang originally wrote an implementation of MAML in JAX
# (https://github.com/ericjang/maml-jax).
# We translated his implementation from JAX to PyTorch.
from functorch import grad, vmap
import matplotlib.pyplot as plt
import math
import torch
import numpy as np
from torch.nn import functional as F
import matplotlib as mpl
mpl.use('Agg')
def net(params, x):
x = F.linear(x, params[0], params[1])
x = F.relu(x)
x = F.linear(x, params[2], params[3])
x = F.relu(x)
x = F.linear(x, params[4], params[5])
return x
params = [
torch.Tensor(40, 1).uniform_(-1., 1.).requires_grad_(),
torch.Tensor(40).zero_().requires_grad_(),
torch.Tensor(40, 40).uniform_(-1. / math.sqrt(40), 1. / math.sqrt(40)).requires_grad_(),
torch.Tensor(40).zero_().requires_grad_(),
torch.Tensor(1, 40).uniform_(-1. / math.sqrt(40), 1. / math.sqrt(40)).requires_grad_(),
torch.Tensor(1).zero_().requires_grad_(),
]
# TODO: use F.mse_loss
def mse_loss(x, y):
return torch.mean((x - y) ** 2)
opt = torch.optim.Adam(params, lr=1e-3)
alpha = 0.1
K = 20
losses = []
num_tasks = 4
def sample_tasks(outer_batch_size, inner_batch_size):
# Select amplitude and phase for the task
As = []
phases = []
for _ in range(outer_batch_size):
As.append(np.random.uniform(low=0.1, high=.5))
phases.append(np.random.uniform(low=0., high=np.pi))
def get_batch():
xs, ys = [], []
for A, phase in zip(As, phases):
x = np.random.uniform(low=-5., high=5., size=(inner_batch_size, 1))
y = A * np.sin(x + phase)
xs.append(x)
ys.append(y)
return torch.tensor(xs, dtype=torch.float), torch.tensor(ys, dtype=torch.float)
x1, y1 = get_batch()
x2, y2 = get_batch()
return x1, y1, x2, y2
for it in range(20000):
loss2 = 0.0
opt.zero_grad()
def get_loss_for_task(x1, y1, x2, y2):
def inner_loss(params, x1, y1):
f = net(params, x1)
loss = mse_loss(f, y1)
return loss
grads = grad(inner_loss)(tuple(params), x1, y1)
new_params = [(params[i] - alpha * grads[i]) for i in range(len(params))]
v_f = net(new_params, x2)
return mse_loss(v_f, y2)
task = sample_tasks(num_tasks, K)
inner_losses = vmap(get_loss_for_task)(task[0], task[1], task[2], task[3])
loss2 = sum(inner_losses) / len(inner_losses)
loss2.backward()
opt.step()
if it % 100 == 0:
print('Iteration %d -- Outer Loss: %.4f' % (it, loss2))
losses.append(loss2.detach())
t_A = torch.tensor(0.0).uniform_(0.1, 0.5)
t_b = torch.tensor(0.0).uniform_(0.0, math.pi)
t_x = torch.empty(4, 1).uniform_(-5, 5)
t_y = t_A * torch.sin(t_x + t_b)
opt.zero_grad()
t_params = params
for k in range(5):
t_f = net(t_params, t_x)
t_loss = F.l1_loss(t_f, t_y)
grads = torch.autograd.grad(t_loss, t_params, create_graph=True)
t_params = [(t_params[i] - alpha * grads[i]) for i in range(len(params))]
test_x = torch.arange(-2 * math.pi, 2 * math.pi, step=0.01).unsqueeze(1)
test_y = t_A * torch.sin(test_x + t_b)
test_f = net(t_params, test_x)
plt.plot(test_x.data.numpy(), test_y.data.numpy(), label='sin(x)')
plt.plot(test_x.data.numpy(), test_f.data.numpy(), label='net(x)')
plt.plot(t_x.data.numpy(), t_y.data.numpy(), 'o', label='Examples')
plt.legend()
plt.savefig('maml-sine.png')
plt.figure()
plt.plot(np.convolve(losses, [.05] * 20))
plt.savefig('losses.png')
| pytorch-master | functorch/examples/maml_regression/evjang_transforms.py |
import pandas
import matplotlib.pyplot as plt
df = pandas.read_csv("perf.csv")
ops = pandas.unique(df["operator"])
nops = len(ops)
pivot_op_shape = df.pivot_table(values="time", index=["operator", "shape"], columns=["fuser"])
pivot_speedups = (pivot_op_shape.T / pivot_op_shape["eager"]).T
plt.rcParams["figure.figsize"] = (20, 100)
fig, axs = plt.subplots(nops)
plt.subplots_adjust(hspace=0.5)
for idx, op in enumerate(ops):
op_speedups = pivot_speedups.T[op].T
op_speedups.plot(ax=axs[idx], kind="bar", ylim=(0, 5), rot=45)
axs[idx].set_title(op)
axs[idx].set_xlabel("")
plt.savefig("scorecard.svg")
| pytorch-master | functorch/benchmarks/process_scorecard.py |
#!/usr/bin/env python3
import argparse
import os
import logging
import pandas as pd
from functorch._src.benchmark_utils import compute_utilization
# process the chrome traces output by the pytorch profiler
# require the json input file's name to be in format {model_name}_chrome_trace_*.json
# the runtimes file should have format (model_name, runtime)
def get_model_name(filename):
"""
Get model name from a file in format {model_name}_chrome_trace_*.json
"""
_, tail = os.path.split(filename)
modelname = tail[:tail.find("_chrome_trace")]
return modelname
def get_total_length(run_times_df, modelname):
return float(run_times_df[run_times_df["name"] == modelname]["runtime"])
def main():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
parser.add_argument(
"--runtime", "-runf", help="file name of the runtime file", required=True
)
group.add_argument(
"--filename", "-f", action="append", help="a filename of the json file to process"
)
group.add_argument(
"--folder", "-fd", help="a folder of the json files to process"
)
args = parser.parse_args()
if args.filename:
filenames = args.filename
elif args.folder:
filenames = []
directory = args.folder
for filename in os.listdir(directory):
f = os.path.join(directory, filename)
if os.path.isfile(f) and f.endswith(".json"):
filenames.append(f)
else:
print("Please provide a filename or a folder name")
print("modelname, GPU Utilization, MM and Conv time")
run_times_df = pd.read_csv(args.runtime)
for filename in filenames:
try:
modelname = get_model_name(filename)
total_length = get_total_length(run_times_df, modelname) * 1e6
utilization, mm_conv_utilization = compute_utilization(filenames, total_length)
print(f"{modelname}, {utilization}, {mm_conv_utilization}")
except BaseException:
logging.exception(f"{filename}, ERROR")
print(f"{filename}, ERROR")
if __name__ == "__main__":
main()
| pytorch-master | functorch/benchmarks/chrome_trace_parser.py |
import sys
import time
import torch
import inspect
import itertools
from functorch import pointwise_operator
torch.set_num_threads(1)
torch._C._debug_set_fusion_group_inlining(False)
def rand(*shape):
return torch.rand(*shape).mul(16).add(1)
# ------------------------------------------------------------------------------
# Shape test cases
# ------------------------------------------------------------------------------
def scalar():
return (rand(1), rand(1))
def small():
return (rand(32), rand(32))
def small_2d():
return (rand(1, 32), rand(1, 32))
def small_broadcast():
return (rand(4, 32), rand(32))
def medium():
return (rand(32, 12, 64, 64), rand(32, 12, 64, 64))
def medium_sliced():
return (rand(32, 12, 64, 64)[..., ::2],
rand(32, 12, 64, 64)[..., ::2])
def medium_transpose():
return (rand(32, 12, 64, 64).transpose(-1, -2),
rand(32, 12, 64, 64).transpose(-1, -2))
def medium2():
return (rand(32, 3, 224, 224), rand(32, 3, 224, 224))
def medium3d():
return (rand(16, 32, 64), rand(16, 32, 64))
def medium_channels_last():
return (rand(32, 3, 224, 224).to(memory_format=torch.channels_last),
rand(32, 3, 224, 224).to(memory_format=torch.channels_last))
def medium_broadcast():
return (rand(32, 12, 64, 64), rand(64))
def medium_broadcast_channels_last():
return (rand(32, 3, 223, 223).to(memory_format=torch.channels_last),
rand(3, 1, 1))
def large():
return (rand(8192, 8192), rand(8192, 8192))
def large_transpose():
return (rand(8192, 8192).transpose(0, 1),
rand(8192, 8192).transpose(0, 1))
def large_channels_last():
return (rand(32, 32, 256, 256).to(memory_format=torch.channels_last),
rand(32, 32, 256, 256).to(memory_format=torch.channels_last))
def pathological_broadcast():
return (rand(1, 32, 32, 2), rand(1024, 1, 1, 2))
# ------------------------------------------------------------------------------
# Operator test cases
# ------------------------------------------------------------------------------
def add(a, b):
return a + b
def sub(a, b):
return a - b
def mul(a, b):
return a * b
def div(a, b):
return a / b
def relu(a):
return a.relu()
def sigmoid(a):
return a.sigmoid()
def tanh(a):
return a.tanh()
def log(a):
return a.log()
def exp(a):
return a.exp()
def square(a):
return a ** 2
def fma(a, b):
return a * b + b
def hardswish(a):
return a * (a + 3.0).clamp(0.0, 6.0) / 6.0
def native_hardswish(a):
return torch._C._nn.hardswish(a)
def softplus(a):
return (a * 1.0).exp().log1p() / 1.0
def mish(a):
return a * ((a * 1.0).exp().log1p() / 1.0).tanh()
# ------------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------------
def time_cpu(fn, args, iters):
s = time.perf_counter()
for _ in range(iters):
fn(*args)
e = time.perf_counter()
return e - s
def time_cuda(fn, args, iters):
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
for _ in range(iters):
fn(*args)
end.record()
torch.cuda.synchronize()
return start.elapsed_time(end) / 1e3
def benchmark_with_timer(fn, args, timer):
timer(fn, args, 3)
calibration = timer(fn, args, 1)
iters = int(1.0 / calibration)
return timer(fn, args, iters) / iters
def benchmark(fn, args):
timer = time_cpu if args[0].device.type == "cpu" else time_cuda
return benchmark_with_timer(fn, args, timer)
def micros(s):
return f"{s * 1e6:.1f}"
shapes = [
scalar,
small,
small_2d,
small_broadcast,
medium,
medium2,
medium3d,
medium_sliced,
medium_transpose,
medium_channels_last,
medium_broadcast,
medium_broadcast_channels_last,
large,
large_transpose,
large_channels_last,
pathological_broadcast,
]
operators = [
add,
sub,
mul,
div,
relu,
sigmoid,
tanh,
log,
exp,
square,
fma,
hardswish,
native_hardswish,
]
nope = set()
for shape, operator in itertools.product(shapes, operators):
nargs = len(inspect.signature(operator).parameters)
args = shape()[:nargs]
try:
if shape == medium_transpose:
raise RuntimeError("pointwise_operator hangs on medium_transpose")
pw_op = pointwise_operator(operator)
torch.testing.assert_allclose(operator(*args), pw_op(*args))
except Exception:
print(f"pointwise_operator failed on {operator.__name__}, {shape.__name__}")
nope.add((operator, shape))
ts_op = torch.jit.script(operator)
torch.testing.assert_allclose(operator(*args), ts_op(*args))
print("fuser,device,operator,shape,time")
results = []
for shape, operator in itertools.product(shapes, operators):
nargs = len(inspect.signature(operator).parameters)
args = shape()[:nargs]
result = benchmark(operator, args)
print(",".join(["eager", args[0].device.type, operator.__name__, shape.__name__, micros(result)]))
try:
if shape == medium_transpose:
raise RuntimeError("pointwise_operator hangs on medium_transpose")
if (operator, shape) in nope:
raise RuntimeError("pointwise_operator fails on medium_transpose")
pw_op = pointwise_operator(operator)
result = benchmark(pw_op, args)
print(",".join(["pointwise", args[0].device.type, operator.__name__, shape.__name__, micros(result)]))
except Exception:
print(",".join(["pointwise", args[0].device.type, operator.__name__, shape.__name__, micros(float("nan"))]))
ts_op = torch.jit.script(operator)
result = benchmark(ts_op, args)
print(",".join(["fuser", args[0].device.type, operator.__name__, shape.__name__, micros(result)]))
sys.stdout.flush()
| pytorch-master | functorch/benchmarks/pointwise_scorecard.py |
import torch
import torch.nn as nn
import torchvision.models as models
from opacus.utils.module_modification import convert_batchnorm_modules
import time
from functorch import vmap, grad
from functorch import make_functional
from opacus import PrivacyEngine
device = 'cuda'
batch_size = 128
torch.manual_seed(0)
model_functorch = convert_batchnorm_modules(models.resnet18(num_classes=10))
model_functorch = model_functorch.to(device)
criterion = nn.CrossEntropyLoss()
images = torch.randn(batch_size, 3, 32, 32, device=device)
targets = torch.randint(0, 10, (batch_size,), device=device)
func_model, weights = make_functional(model_functorch)
def compute_loss(weights, image, target):
images = image.unsqueeze(0)
targets = target.unsqueeze(0)
output = func_model(weights, images)
loss = criterion(output, targets)
return loss
def functorch_per_sample_grad():
compute_grad = grad(compute_loss)
compute_per_sample_grad = vmap(compute_grad, (None, 0, 0))
start = time.time()
result = compute_per_sample_grad(weights, images, targets)
torch.cuda.synchronize()
end = time.time()
return result, end - start # end - start in seconds
torch.manual_seed(0)
model_opacus = convert_batchnorm_modules(models.resnet18(num_classes=10))
model_opacus = model_opacus.to(device)
criterion = nn.CrossEntropyLoss()
for p_f, p_o in zip(model_functorch.parameters(), model_opacus.parameters()):
assert torch.allclose(p_f, p_o) # Sanity check
privacy_engine = PrivacyEngine(
model_opacus,
sample_rate=0.01,
alphas=[10, 100],
noise_multiplier=1,
max_grad_norm=10000.0,
)
def opacus_per_sample_grad():
start = time.time()
output = model_opacus(images)
loss = criterion(output, targets)
loss.backward()
torch.cuda.synchronize()
end = time.time()
expected = [p.grad_sample for p in model_opacus.parameters()]
for p in model_opacus.parameters():
delattr(p, 'grad_sample')
p.grad = None
return expected, end - start
for _ in range(5):
_, seconds = functorch_per_sample_grad()
print(seconds)
result, seconds = functorch_per_sample_grad()
print(seconds)
for _ in range(5):
_, seconds = opacus_per_sample_grad()
print(seconds)
expected, seconds = opacus_per_sample_grad()
print(seconds)
result = [r.detach() for r in result]
print(len(result))
# TODO: The following shows that the per-sample-grads computed are different.
# This concerns me a little; we should compare to a source of truth.
# for i, (r, e) in enumerate(list(zip(result, expected))[::-1]):
# if torch.allclose(r, e, rtol=1e-5):
# continue
# print(-(i+1), ((r - e)/(e + 0.000001)).abs().max())
| pytorch-master | functorch/benchmarks/per_sample_grads.py |
import torch
import torch.fx as fx
from functorch import make_fx
from torch.profiler import profile, ProfilerActivity
from functorch._src.compile_utils import fx_graph_cse
def profile_it(f, inp):
for _ in range(5):
f(inp)
itr = 5
with profile(activities=[ProfilerActivity.CUDA], record_shapes=True) as prof:
for _ in range(itr):
f(inp)
timing = prof.key_averages()
cuda_time_total = 0
for e in timing:
cuda_time_total = cuda_time_total + e.cuda_time_total
return cuda_time_total / itr
def profile_function(name, f, inp):
fx_g = make_fx(f)(inp)
new_g = fx_graph_cse(fx_g.graph)
new_g = fx.GraphModule(fx_g, new_g)
# do not benchmark against the scripted version because script already does some CSE
# script_f = torch.jit.script(fx_g)
# script_g = torch.jit.script(new_g)
# avg_cuda_time_f = profile_it(script_f, inp)
# avg_cuda_time_g = profile_it(script_g, inp)
avg_cuda_time_f = profile_it(fx_g, inp)
avg_cuda_time_g = profile_it(new_g, inp)
num_node_decrease = len(fx_g.graph.nodes) - len(new_g.graph.nodes)
print(f"{name}, {avg_cuda_time_f}, {avg_cuda_time_g}, {num_node_decrease}, {len(fx_g.graph.nodes)}")
g_gpu = torch.Generator(device='cuda')
g_gpu.manual_seed(2147483647)
inp = torch.randn(2**20, device='cuda', generator=g_gpu)
def f1(x):
return x.cos().cos()
profile_function("f1", f1, inp)
def fsum(x):
a = x.sum()
b = x.sum()
c = x.sum()
d = x.sum()
return a + b + c + d
profile_function("fsum", fsum, inp)
def fconcat(x):
a = torch.cat((x, x))
b = torch.cat((x, x))
return a + b
profile_function("fconcat", fconcat, inp)
def fsum2(x):
a = x.sum()
for _ in range(30):
a = a + x.sum()
return a
profile_function("fsum2", fsum2, inp)
def fsummulti(x):
a = 0
for _ in range(3):
a = a + x.sum()
a = a * x.sum()
return a
profile_function("fsummulti", fsummulti, inp)
def fsummulti2(x):
a = 0
for _ in range(30):
a = a + x.sum()
a = a * x.sum()
return a
profile_function("fsummulti2", fsummulti2, inp)
def fcos(x):
a = 0
for _ in range(3):
a = a + x.cos()
return a
profile_function("fcos", fcos, inp)
def fcos2(x):
a = 0
for _ in range(30):
a = a + x.cos()
return a
profile_function("fcos2", fcos2, inp)
| pytorch-master | functorch/benchmarks/cse.py |
from functools import partial
import numpy as np
import pandas as pd
import timeit
import torch
from functorch.compile import pointwise_operator
WRITE_CSV = False
CUDA = False
SIZES = [1, 512, 8192]
NUMBER = [100, 10, 1, 1]
REPEAT = 20
@pointwise_operator
def nnc_add(a, b):
return a + b
@pointwise_operator
def nnc_addnorm(a, b, mean, std):
return (a + b - mean) / std
def eager_addnorm(a, b, mean, std):
return (a + b - mean) / std
def inplace_addnorm(a, b, mean, std, out):
out = torch.add(a, b, out=out)
torch.sub(out, mean, out=out)
torch.div(out, std, out=out)
return out
ts_addnorm = torch.jit.script(eager_addnorm)
ts_ip_addnorm = torch.jit.script(inplace_addnorm)
def maybe_synced(fn):
if CUDA:
synchronize = torch.cuda.synchronize
synchronize() # warmup
def _fn():
result = fn()
synchronize()
return result
return _fn
return fn
def benchmark_loop(setup):
result = np.zeros((REPEAT, len(SIZES), 2), dtype=np.float64)
for s, n in enumerate(SIZES):
nnc, aten = setup(n)
nnc = maybe_synced(nnc)
aten = maybe_synced(aten)
for r in range(result.shape[0]):
result[r, s, 0] = timeit.timeit(nnc, number=NUMBER[s])
result[r, s, 1] = timeit.timeit(aten, number=NUMBER[s])
result = np.median(result, axis=0)
assert result.shape == (len(SIZES), 2)
result = result[:, 1] / result[:, 0]
print(result)
return result
def test(make_args, nnc=nnc_add, aten=torch.add):
def setup(n):
args = make_args(n)
result_aten = aten(*args)
result_nnc = nnc(*args)
assert result_nnc.dtype == result_aten.dtype
assert result_nnc.size() == result_aten.size()
assert result_nnc.stride() == result_aten.stride()
torch.testing.assert_allclose(result_aten, result_nnc)
return (lambda: nnc(*args), lambda: aten(*args))
return benchmark_loop(setup)
def test_inplace(make_args, nnc=nnc_add, aten=torch.add):
def inplace_setup(n):
a, b = make_args(n)
result_aten = torch.clone(a)
result_nnc = torch.clone(a)
nnc(result_nnc, b, out=result_nnc)
aten(result_aten, b, out=result_aten)
torch.testing.assert_allclose(result_aten, result_nnc)
return (lambda: nnc(a, b, out=a), lambda: aten(a, b, out=a))
return benchmark_loop(inplace_setup)
def test_out(make_args, out, nnc=nnc_add, aten=torch.add):
def out_setup(n):
args = make_args(n)
result_aten = out(n)
result_nnc = out(n)
aten(*args, out=result_aten)
nnc(*args, out=result_nnc)
torch.testing.assert_allclose(result_aten, result_nnc)
result = out(n)
return (lambda: nnc(*args, out=result), lambda: aten(*args, out=result))
return benchmark_loop(out_setup)
def test_backwards(make_args, nnc=nnc_add, aten=torch.add):
def backwards_setup(n):
args = make_args(n)
(grad_var,) = [a for a in args if a.requires_grad]
aten(*args).sum().backward()
correct = grad_var.grad.clone()
grad_var.grad.zero_()
nnc(*args).sum().backward()
torch.testing.assert_allclose(correct, grad_var.grad)
return (
lambda: nnc(*args).sum().backward(),
lambda: aten(*args).sum().backward(),
)
return benchmark_loop(backwards_setup)
def main():
torch.set_num_threads(1) # TODO(jansel): add parallel support
torch._C._jit_override_can_fuse_on_cpu(True)
device = "cuda" if CUDA else "cpu"
I = partial(torch.randint, 0, 100, device=device)
R = partial(torch.randn, device=device)
results = [
("add", test(lambda n: (R(n, n), R(n, n)))),
("broadcast1", test(lambda n: (R(n, n), R(1)))),
("broadcast2", test(lambda n: (R(n, n), R(n, 1)))),
("broadcast3", test(lambda n: (R(n, 1), R(1, n)))),
("inplace", test_inplace(lambda n: (R(n, n), R(n, 1)))),
("out=", test_out(lambda n: (R(n, n), R(n, n)), out=lambda n: R(n, n))),
("transposed1", test(lambda n: (R(n, n), R(n, n).transpose(0, 1)))),
(
"transposed2",
test(lambda n: (R(n, n).transpose(0, 1), R(n, n).transpose(0, 1))),
),
("slice1", test(lambda n: (R(n + 1, n + 1, 2)[:n, :n, 0], R(n, n)))),
("slice2", test(lambda n: (R(n, n, 2)[:, :, 0], R(n, n, 2)[:, :, 0]))),
(
"strided out",
test_out(
lambda n: (R(n, n), R(n, n)),
out=lambda n: R(n + 1, n + 1, 2)[:n, :n, 0],
),
),
(
"out convert",
test_out(
lambda n: (R(n, n), R(n, n)), out=lambda n: R(n, n, dtype=torch.float64)
),
),
("issue #57611 (n,32,32,2)", test(lambda n: (R(1, 32, 32, 2), R(n, 1, 1, 2)))),
("float+double", test(lambda n: (R(n, n), R(n, n, dtype=torch.float64)))),
(
"int+long",
test(
lambda n: (I([n, n], dtype=torch.int32), I([n, n], dtype=torch.int64))
),
),
(
"int+short",
test(
lambda n: (I([n, n], dtype=torch.int32), I([n, n], dtype=torch.int16))
),
),
(
"float+int",
test(
lambda n: (R([n, n], dtype=torch.float32), I([n, n], dtype=torch.int32))
),
),
(
"double+long",
test(
lambda n: (R([n, n], dtype=torch.float64), I([n, n], dtype=torch.int64))
),
),
(
"fused addnorm",
test(
lambda n: (R(n, n), R(n, n), R(n, n), R(n, n)),
nnc=nnc_addnorm,
aten=eager_addnorm,
),
),
(
"fused addnorm (vs TS)",
test(
lambda n: (R(n, n), R(n, n), R(n, n), R(n, n)),
nnc=nnc_addnorm,
aten=ts_addnorm,
),
),
(
"fused addnorm out=",
test_out(
lambda n: (R(n, n), R(n, n), R(n, n), R(n, n)),
nnc=nnc_addnorm,
aten=inplace_addnorm,
out=lambda n: R(n, n),
),
),
(
"fused addnorm out= (vs TS)",
test_out(
lambda n: (R(n, n), R(n, n), R(n, n), R(n, n)),
nnc=nnc_addnorm,
aten=ts_ip_addnorm,
out=lambda n: R(n, n),
),
),
(
"fused addnorm backward",
test_backwards(
lambda n: (R(n, n), R(n, n, requires_grad=True), R(n, n), R(n, n)),
nnc=nnc_addnorm,
aten=eager_addnorm,
),
),
(
"fused addnorm backward (vs TS)",
test_backwards(
lambda n: (R(n, n), R(n, n, requires_grad=True), R(n, n), R(n, n)),
nnc=nnc_addnorm,
aten=ts_addnorm,
),
),
]
df = pd.DataFrame(
np.stack([r for n, r in results]),
columns=[f"{n}x{n}".rjust(9) for n in SIZES],
index=[n for n, r in results],
)
if WRITE_CSV:
df.to_csv("../operator_authoring_results.csv")
print("wrote ../operator_authoring_results.csv")
print()
print("Speedups over aten")
pd.options.display.float_format = "{:.2f}x".format
print(df)
if __name__ == "__main__":
main()
| pytorch-master | functorch/benchmarks/operator_authoring.py |
import torch
from functorch.compile import memory_efficient_fusion, clear_compile_cache
import benchmark_helper
device = "cuda"
dtype = torch.float16
# LightSeq pattern 1
class DropoutResBias:
@staticmethod
def fn(input, bias, residual):
a = torch.add(input, bias)
b = torch.nn.functional.dropout(a, p=0.7, training=True)
c = b + residual
return c
@staticmethod
def args():
batch_size, seq_len, hidden_size = 32, 196, 1024
input = torch.randn(
batch_size,
seq_len,
hidden_size,
requires_grad=True,
device=device,
dtype=dtype,
)
bias = torch.randn(hidden_size, requires_grad=True, device=device, dtype=dtype)
residual = torch.randn(
batch_size,
seq_len,
hidden_size,
requires_grad=False,
device=device,
dtype=dtype,
)
args = (input, bias, residual)
return args
class DropoutResBiasScalar:
@staticmethod
def fn(input, bias, residual, p: float):
a = torch.add(input, bias)
b = torch.nn.functional.dropout(a, p, training=True)
c = b + residual
return c
@staticmethod
def args():
batch_size, seq_len, hidden_size = 32, 196, 1024
input = torch.randn(
batch_size,
seq_len,
hidden_size,
requires_grad=True,
device=device,
dtype=dtype,
)
bias = torch.randn(hidden_size, requires_grad=True, device=device, dtype=dtype)
residual = torch.randn(
batch_size,
seq_len,
hidden_size,
requires_grad=False,
device=device,
dtype=dtype,
)
args = (input, bias, residual, 0.7)
return args
# LightSeq pattern 2
class BiasReluDropout:
@staticmethod
def fn(input, bias):
a = torch.add(input, bias)
b = torch.nn.functional.relu(a)
c = torch.nn.functional.dropout(b, p=0.6, training=True)
return c
@staticmethod
def args():
batch_size = 32
seq_len = 196
intermediate_size = 4096
input = torch.randn(
batch_size,
seq_len,
intermediate_size,
requires_grad=True,
device=device,
dtype=dtype,
)
bias = torch.randn(
intermediate_size, requires_grad=True, device=device, dtype=dtype
)
args = (input, bias)
return args
class BiasDropoutResLayerNorm:
@staticmethod
def fn(input, bias, residual):
hidden_size = 1024
a = torch.add(input, bias)
b = torch.nn.functional.dropout(a, p=0.7, training=True)
c = b + residual
d = torch.nn.functional.layer_norm(c, normalized_shape=(hidden_size,))
return d
@staticmethod
def args():
batch_size = 32
seq_len = 196
hidden_size = 1024
input = torch.randn(
batch_size,
seq_len,
hidden_size,
requires_grad=True,
device=device,
dtype=dtype,
)
bias = torch.randn(hidden_size, requires_grad=True, device=device, dtype=dtype)
residual = torch.randn(
batch_size,
seq_len,
hidden_size,
requires_grad=False,
device=device,
dtype=dtype,
)
args = (input, bias, residual)
return args
class LayerNormSigmoid:
@staticmethod
def fn(inp):
hidden_size = 512
a = torch.nn.functional.layer_norm(inp, normalized_shape=(hidden_size,))
b = torch.sigmoid(a)
return b
@staticmethod
def args():
batch_size = 8192
hidden_size = 512
inp = torch.randn(
batch_size, hidden_size, requires_grad=True, device=device, dtype=dtype
)
args = (inp,)
return args
for cl in [DropoutResBias, BiasReluDropout, DropoutResBiasScalar, BiasDropoutResLayerNorm, LayerNormSigmoid]:
# Clear the compile cache
clear_compile_cache()
# Get the function and inputs
obj = cl()
fn = obj.fn
args = obj.args()
# Find the static args
static_argnums = []
for idx, arg in enumerate(args):
if not isinstance(arg, torch.Tensor):
static_argnums.append(idx)
# Get the optimized function
opt_fn = memory_efficient_fusion(fn, static_argnums)
# Profile cuda kernels
benchmark_helper.profile_cuda_kernels(fn, args, "Eager")
with torch.jit.fuser("fuser2"):
benchmark_helper.profile_cuda_kernels(opt_fn, args, "AOTAutograd")
# Time it with Torch Timer
benchmark_helper.time_with_torch_timer(fn, args, "Eager")
with torch.jit.fuser("fuser2"):
benchmark_helper.time_with_torch_timer(opt_fn, args, "AOTAutograd")
# Time it with manual Timer
benchmark_helper.time_with_manual_timer(fn, args, "Eager")
with torch.jit.fuser("fuser2"):
benchmark_helper.time_with_manual_timer(opt_fn, args, "AOTAutograd")
| pytorch-master | functorch/benchmarks/transformer_fusion_patterns/benchmark.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| pytorch-master | functorch/benchmarks/transformer_fusion_patterns/__init__.py |
import torch
from torch.profiler import profile, record_function, ProfilerActivity
from torch.utils.benchmark import Timer
import time
def profile_cuda_kernels(fn, args, string_id="Model time"):
print("################################################")
print(f"#### Profiling for {string_id} starts #########")
print("################################################")
warmup = 50
old_args = args[:]
n_repeats = 1
n_layers = 1
ref = fn(*old_args)
gO = torch.rand_like(ref)
for _ in range(0, warmup // n_layers):
args = list(old_args[:])
ref = fn(*args)
ref.backward(gO)
torch.cuda.synchronize()
# Forward profile
def fwd_run():
for _ in range(0, n_repeats // n_layers):
args = list(old_args[:])
for arg in args:
if isinstance(arg, torch.Tensor):
arg.grad = None
ref = fn(*args)
print(f"###### Forward profile for {string_id} starts #####")
with profile(activities=[ProfilerActivity.CUDA], record_shapes=True) as prof:
with record_function("baseline"):
fwd_run()
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=30))
print(f"###### Forward profile for {string_id} ends #####")
# Backward profile
def bwd_run():
for _ in range(0, n_repeats // n_layers):
args = list(old_args[:])
for arg in args:
if isinstance(arg, torch.Tensor):
arg.grad = None
ref = fn(*args)
print(f"###### Backward profile for {string_id} starts #####")
torch.cuda.synchronize()
with profile(
activities=[ProfilerActivity.CUDA], record_shapes=True
) as prof:
with record_function("baseline"):
ref.backward(gO)
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=30))
torch.cuda.synchronize()
print(f"###### Backward profile for {string_id} ends #####")
bwd_run()
print("################################################")
print(f"#### Profiling for {string_id} ends #########")
print("################################################\n\n\n\n")
def time_with_torch_timer(fn, args, string_id, kwargs=None):
if kwargs is None:
kwargs = {}
print("################################################")
print(f"#### Torch Timer for {string_id} starts #########")
print("################################################")
ref = fn(*args, **kwargs)
gO = torch.rand_like(ref)
env = {"args": args, "gO": gO, "kwargs": kwargs, "fn": fn}
grad_none = {"for x in args: x.grad=None"}
fn_call = "fn(*args, **kwargs)"
# Measure end-to-end fwd time
timer = Timer(stmt=f"{fn_call}", globals=env)
fwd_latency = round(timer.timeit(1000).mean * 10 ** 6, 3)
timer_blocked = timer.blocked_autorange()
print(f"Forward = {fwd_latency}")
# Measure end-to-end fwd bwd
timer = Timer(
stmt=f"{grad_none}; fwd = {fn_call}; fwd.backward(gO)",
globals=env,
)
fwd_bwd_latency = round(timer.timeit(1000).mean * 10 ** 6, 3)
timer_blocked = timer.blocked_autorange()
# print(f"Forward + sum + Backward = {fwd_sum_bwd_latency}")
bwd_latency = round(fwd_bwd_latency - fwd_latency, 3)
print(f"Backward = {bwd_latency}")
print("################################################")
print(f"#### Torch Timer for {string_id} ends ###############")
print("################################################\n\n\n\n")
def time_with_manual_timer(fn, args, string_id):
print("################################################")
print(f"#### Manual Timer for {string_id} starts #########")
print("################################################")
warmup = 50
repeats = 1000
old_args = args[:]
ref = fn(*old_args)
gO = torch.rand_like(ref)
for _ in range(0, warmup):
args = list(old_args[:])
for arg in args:
if isinstance(arg, torch.Tensor):
arg.grad = None
ref = fn(*args)
ref.backward(gO)
torch.cuda.synchronize()
fwd_times = []
bwd_times = []
for _ in range(0, repeats):
args = list(old_args[:])
for arg in args:
if isinstance(arg, torch.Tensor):
arg.grad = None
fwd_start = time.time()
ref = fn(*args)
torch.cuda.synchronize()
fwd_end = time.time()
bwd_start = time.time()
ref.backward(gO)
torch.cuda.synchronize()
bwd_end = time.time()
fwd_times.append(fwd_end - fwd_start)
bwd_times.append(bwd_end - bwd_start)
avg_fwd = round(sum(fwd_times) / repeats * 10 ** 6, 2)
avg_bwd = round(sum(bwd_times) / repeats * 10 ** 6, 2)
avg_total = round(avg_fwd + avg_bwd, 2)
print(f"Forward = {avg_fwd}")
print(f"Backward = {avg_bwd}")
print("################################################")
print(f"#### Manual Timer for {string_id} ends #########")
print("################################################\n\n\n")
| pytorch-master | functorch/benchmarks/transformer_fusion_patterns/benchmark_helper.py |
import torch
from functorch.compile import memory_efficient_pointwise_fusion, clear_compile_cache
import benchmark_helper
# ALL comments regarding the patetrns
def bias_gelu_dropout(input, bias):
a = torch.add(input, bias)
b = torch.nn.functional.gelu(a)
c = torch.nn.functional.dropout(b, p=0.6, training=True)
return c
def aot_fn(input, bias):
a = torch.add(input, bias)
b = a * 0.5 * (1.0 + torch.tanh(0.79788456 * a * (1 + 0.044715 * a * a)))
c = torch.nn.functional.dropout(b, p=0.6, training=True)
return c
fn = bias_gelu_dropout
clear_compile_cache()
# Set inputs
device = "cuda"
dtype = torch.float16
batch_size = 32
seq_len = 196
intermediate_size = 4096
# batch_size = 2
# seq_len = 4
# intermediate_size = 3
input = torch.randn(
batch_size,
seq_len,
intermediate_size,
requires_grad=True,
device=device,
dtype=dtype,
)
bias = torch.randn(intermediate_size, requires_grad=True, device=device, dtype=dtype)
# Get the optimized function
opt_fn = memory_efficient_pointwise_fusion(
aot_fn, compiler_name="torchscript_nvfuser"
)
# Profile cuda kernels
benchmark_helper.profile_cuda_kernels(fn, (input, bias), "Eager")
with torch.jit.fuser("fuser2"):
benchmark_helper.profile_cuda_kernels(opt_fn, (input, bias), "AOTAutograd")
# Time it with Torch Timer
benchmark_helper.time_with_torch_timer(fn, (input, bias), "Eager")
with torch.jit.fuser("fuser2"):
benchmark_helper.time_with_torch_timer(opt_fn, (input, bias), "AOTAutograd")
# Time it with manual Timer
benchmark_helper.time_with_manual_timer(fn, (input, bias), "Eager")
with torch.jit.fuser("fuser2"):
benchmark_helper.time_with_manual_timer(opt_fn, (input, bias), "AOTAutograd")
| pytorch-master | functorch/benchmarks/transformer_fusion_patterns/bias_gelu_dropout.py |
"""
==========================
Per-sample-gradients
==========================
What is it?
--------------------------------------------------------------------
Per-sample-gradient computation is computing the gradient for each and every
sample in a batch of data. It is a useful quantity in differential privacy
and optimization research.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
torch.manual_seed(0)
# Here's a simple CNN
class SimpleCNN(nn.Module):
def __init__(self):
super(SimpleCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
output = x
return output
def loss_fn(predictions, targets):
return F.nll_loss(predictions, targets)
# Let's generate a batch of dummy data. Pretend that we're working with an
# MNIST dataset where the images are 28 by 28 and we have a minibatch of size 64.
device = 'cuda'
num_models = 10
batch_size = 64
data = torch.randn(batch_size, 1, 28, 28, device=device)
targets = torch.randint(10, (64,), device=device)
# In regular model training, one would forward the batch of examples and then
# call .backward() to compute gradients:
model = SimpleCNN().to(device=device)
predictions = model(data)
loss = loss_fn(predictions, targets)
loss.backward()
# Conceptually, per-sample-gradient computation is equivalent to: for each sample
# of the data, perform a forward and a backward pass to get a gradient.
def compute_grad(sample, target):
sample = sample.unsqueeze(0)
target = target.unsqueeze(0)
prediction = model(sample)
loss = loss_fn(prediction, target)
return torch.autograd.grad(loss, list(model.parameters()))
def compute_sample_grads(data, targets):
sample_grads = [compute_grad(data[i], targets[i]) for i in range(batch_size)]
sample_grads = zip(*sample_grads)
sample_grads = [torch.stack(shards) for shards in sample_grads]
return sample_grads
per_sample_grads = compute_sample_grads(data, targets)
# sample_grads[0] is the per-sample-grad for model.conv1.weight
# model.conv1.weight.shape is [32, 1, 3, 3]; notice how there is one gradient
# per sample in the batch for a total of 64.
print(per_sample_grads[0].shape)
######################################################################
# Per-sample-grads using functorch
# --------------------------------------------------------------------
# We can compute per-sample-gradients efficiently by using function transforms.
# First, let's create a stateless functional version of ``model`` by using
# ``functorch.make_functional_with_buffers``.
from functorch import make_functional_with_buffers, vmap, grad
fmodel, params, buffers = make_functional_with_buffers(model)
# Next, let's define a function to compute the loss of the model given a single
# input rather than a batch of inputs. It is important that this function accepts the
# parameters, the input, and the target, because we will be transforming over them.
# Because the model was originally written to handle batches, we'll use
# ``torch.unsqueeze`` to add a batch dimension.
def compute_loss(params, buffers, sample, target):
batch = sample.unsqueeze(0)
targets = target.unsqueeze(0)
predictions = fmodel(params, buffers, batch)
loss = loss_fn(predictions, targets)
return loss
# Now, let's use ``grad`` to create a new function that computes the gradient
# with respect to the first argument of compute_loss (i.e. the params).
ft_compute_grad = grad(compute_loss)
# ``ft_compute_grad`` computes the gradient for a single (sample, target) pair.
# We can use ``vmap`` to get it to compute the gradient over an entire batch
# of samples and targets. Note that in_dims=(None, None, 0, 0) because we wish
# to map ``ft_compute_grad`` over the 0th dimension of the data and targets
# and use the same params and buffers for each.
ft_compute_sample_grad = vmap(ft_compute_grad, in_dims=(None, None, 0, 0))
# Finally, let's used our transformed function to compute per-sample-gradients:
ft_per_sample_grads = ft_compute_sample_grad(params, buffers, data, targets)
for per_sample_grad, ft_per_sample_grad in zip(per_sample_grads, ft_per_sample_grads):
assert torch.allclose(per_sample_grad, ft_per_sample_grad, atol=1e-6, rtol=1e-6)
# A quick note: there are limitations around what types of functions can be
# transformed by vmap. The best functions to transform are ones that are
# pure functions: a function where the outputs are only determined by the inputs
# that have no side effects (e.g. mutation). vmap is unable to handle mutation of
# arbitrary Python data structures, but it is able to handle many in-place
# PyTorch operations.
| pytorch-master | functorch/notebooks/_src/plot_per_sample_gradients.py |
"""
==========================
Model ensembling
==========================
This example illustrates how to vectorize model ensembling using vmap.
What is model ensembling?
--------------------------------------------------------------------
Model ensembling combines the predictions from multiple models together.
Traditionally this is done by running each model on some inputs separately
and then combining the predictions. However, if you're running models with
the same architecture, then it may be possible to combine them together
using ``vmap``. ``vmap`` is a function transform that maps functions across
dimensions of the input tensors. One of its use cases is eliminating
for-loops and speeding them up through vectorization.
Let's demonstrate how to do this using an ensemble of simple CNNs.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
torch.manual_seed(0)
# Here's a simple CNN
class SimpleCNN(nn.Module):
def __init__(self):
super(SimpleCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
output = x
return output
# Let's generate some dummy data. Pretend that we're working with an MNIST dataset
# where the images are 28 by 28.
# Furthermore, let's say we wish to combine the predictions from 10 different
# models.
device = 'cuda'
num_models = 10
data = torch.randn(100, 64, 1, 28, 28, device=device)
targets = torch.randint(10, (6400,), device=device)
models = [SimpleCNN().to(device) for _ in range(num_models)]
# We have a couple of options for generating predictions. Maybe we want
# to give each model a different randomized minibatch of data, or maybe we
# want to run the same minibatch of data through each model (e.g. if we were
# testing the effect of different model initializations).
# Option 1: different minibatch for each model
minibatches = data[:num_models]
predictions1 = [model(minibatch) for model, minibatch in zip(models, minibatches)]
# Option 2: Same minibatch
minibatch = data[0]
predictions2 = [model(minibatch) for model in models]
######################################################################
# Using vmap to vectorize the ensemble
# --------------------------------------------------------------------
# Let's use ``vmap`` to speed up the for-loop. We must first prepare the models
# for use with ``vmap``.
#
# First, let's combine the states of the model together by stacking each parameter.
# For example, model[i].fc1.weight has shape [9216, 128]; we are going to stack the
# .fc1.weight of each of the 10 models to produce a big weight of shape [10, 9216, 128].
#
# functorch offers the following convenience function to do that. It returns a
# stateless version of the model (fmodel) and stacked parameters and buffers.
from functorch import combine_state_for_ensemble
fmodel, params, buffers = combine_state_for_ensemble(models)
[p.requires_grad_() for p in params]
# Option 1: get predictions using a different minibatch for each model.
# By default, vmap maps a function across the first dimension of all inputs to the
# passed-in function. After `combine_state_for_ensemble`, each of of ``params``,
# ``buffers`` have an additional dimension of size ``num_models`` at the front;
# and ``minibatches`` has a dimension of size ``num_models``.
print([p.size(0) for p in params])
assert minibatches.shape == (num_models, 64, 1, 28, 28)
from functorch import vmap
predictions1_vmap = vmap(fmodel)(params, buffers, minibatches)
assert torch.allclose(predictions1_vmap, torch.stack(predictions1), atol=1e-6, rtol=1e-6)
# Option 2: get predictions using the same minibatch of data
# vmap has an in_dims arg that specify which dimensions to map over.
# Using ``None``, we tell vmap we want the same minibatch to apply for all of
# the 10 models.
predictions2_vmap = vmap(fmodel, in_dims=(0, 0, None))(params, buffers, minibatch)
assert torch.allclose(predictions2_vmap, torch.stack(predictions2), atol=1e-6, rtol=1e-6)
# A quick note: there are limitations around what types of functions can be
# transformed by vmap. The best functions to transform are ones that are
# pure functions: a function where the outputs are only determined by the inputs
# that have no side effects (e.g. mutation). vmap is unable to handle mutation of
# arbitrary Python data structures, but it is able to handle many in-place
# PyTorch operations.
| pytorch-master | functorch/notebooks/_src/plot_ensembling.py |
"""
=============================
Jacobians, hessians, and more
=============================
Computing jacobians or hessians are useful in a number of non-traditional
deep learning models. It is difficult (or annoying) to compute these quantities
efficiently using a standard autodiff system like PyTorch Autograd; functorch
provides ways of computing various higher-order autodiff quantities efficiently.
"""
import torch
import torch.nn.functional as F
from functools import partial
torch.manual_seed(0)
######################################################################
# Setup: Comparing functorch vs the naive approach
# --------------------------------------------------------------------
# Let's start with a function that we'd like to compute the jacobian of.
# This is a simple linear function with non-linear activation.
def predict(weight, bias, x):
return F.linear(x, weight, bias).tanh()
# Here's some dummy data: a weight, a bias, and a feature vector.
D = 16
weight = torch.randn(D, D)
bias = torch.randn(D)
x = torch.randn(D)
# Let's think of ``predict`` as a function that maps the input ``x`` from R^D -> R^D.
# PyTorch Autograd computes vector-Jacobian products. In order to compute the full
# Jacobian of this R^D -> R^D function, we would have to compute it row-by-row
# by using a different unit vector each time.
xp = x.clone().requires_grad_()
unit_vectors = torch.eye(D)
def compute_jac(xp):
jacobian_rows = [torch.autograd.grad(predict(weight, bias, xp), xp, vec)[0]
for vec in unit_vectors]
return torch.stack(jacobian_rows)
jacobian = compute_jac(xp)
# Instead of computing the jacobian row-by-row, we can use ``vmap`` to get rid
# of the for-loop and vectorize the computation. We can't directly apply vmap
# to PyTorch Autograd; instead, functorch provides a ``vjp`` transform:
from functorch import vmap, vjp
_, vjp_fn = vjp(partial(predict, weight, bias), x)
ft_jacobian, = vmap(vjp_fn)(unit_vectors)
assert torch.allclose(ft_jacobian, jacobian)
# In another tutorial a composition of reverse-mode AD and vmap gave us
# per-sample-gradients. In this tutorial, composing reverse-mode AD and vmap
# gives us Jacobian computation! Various compositions of vmap and autodiff
# transforms can give us different interesting quantities.
#
# functorch provides ``jacrev`` as a convenience function that performs
# the vmap-vjp composition to compute jacobians. ``jacrev`` accepts an argnums
# argument that says which argument we would like to compute Jacobians with
# respect to.
from functorch import jacrev
ft_jacobian = jacrev(predict, argnums=2)(weight, bias, x)
assert torch.allclose(ft_jacobian, jacobian)
# Let's compare the performance of the two ways to compute jacobian.
# The functorch version is much faster (and becomes even faster the more outputs
# there are). In general, we expect that vectorization via ``vmap`` can help
# eliminate overhead and give better utilization of your hardware.
from torch.utils.benchmark import Timer
without_vmap = Timer(stmt="compute_jac(xp)", globals=globals())
with_vmap = Timer(stmt="jacrev(predict, argnums=2)(weight, bias, x)", globals=globals())
print(without_vmap.timeit(500))
print(with_vmap.timeit(500))
# It's pretty easy to flip the problem around and say we want to compute
# Jacobians of the parameters to our model (weight, bias) instead of the input.
ft_jac_weight, ft_jac_bias = jacrev(predict, argnums=(0, 1))(weight, bias, x)
######################################################################
# reverse-mode Jacobian (jacrev) vs forward-mode Jacobian (jacfwd)
# --------------------------------------------------------------------
# We offer two APIs to compute jacobians: jacrev and jacfwd:
# - jacrev uses reverse-mode AD. As you saw above it is a composition of our
# vjp and vmap transforms.
# - jacfwd uses forward-mode AD. It is implemented as a composition of our
# jvp and vmap transforms.
# jacfwd and jacrev can be subsituted for each other and have different
# performance characteristics.
#
# As a general rule of thumb, if you're computing the jacobian of an R^N -> R^M
# function, if there are many more outputs than inputs (i.e. M > N) then jacfwd is
# preferred, otherwise use jacrev. There are exceptions to this rule, but a
# non-rigorous argument for this follows:
# In reverse-mode AD, we are computing the jacobian row-by-row, while in
# forward-mode AD (which computes Jacobian-vector products), we are computing
# it column-by-column. The Jacobian matrix has M rows and N columns.
from functorch import jacrev, jacfwd
# Benchmark with more inputs than outputs
Din = 32
Dout = 2048
weight = torch.randn(Dout, Din)
bias = torch.randn(Dout)
x = torch.randn(Din)
using_fwd = Timer(stmt="jacfwd(predict, argnums=2)(weight, bias, x)", globals=globals())
using_bwd = Timer(stmt="jacrev(predict, argnums=2)(weight, bias, x)", globals=globals())
print(f'jacfwd time: {using_fwd.timeit(500)}')
print(f'jacrev time: {using_bwd.timeit(500)}')
# Benchmark with more outputs than inputs
Din = 2048
Dout = 32
weight = torch.randn(Dout, Din)
bias = torch.randn(Dout)
x = torch.randn(Din)
using_fwd = Timer(stmt="jacfwd(predict, argnums=2)(weight, bias, x)", globals=globals())
using_bwd = Timer(stmt="jacrev(predict, argnums=2)(weight, bias, x)", globals=globals())
print(f'jacfwd time: {using_fwd.timeit(500)}')
print(f'jacrev time: {using_bwd.timeit(500)}')
######################################################################
# Hessian computation with functorch.hessian
# --------------------------------------------------------------------
# We offer a convenience API to compute hessians: functorch.hessian.
# Hessians are the jacobian of the jacobian, which suggests that one can just
# compose functorch's jacobian transforms to compute one.
# Indeed, under the hood, ``hessian(f)`` is simply ``jacfwd(jacrev(f))``
#
# Depending on your model, you may want to use ``jacfwd(jacfwd(f))`` or
# ``jacrev(jacrev(f))`` instead to compute hessians.
from functorch import hessian
# # TODO: make sure PyTorch has tanh_backward implemented for jvp!!
# hess0 = hessian(predict, argnums=2)(weight, bias, x)
# hess1 = jacfwd(jacfwd(predict, argnums=2), argnums=2)(weight, bias, x)
hess2 = jacrev(jacrev(predict, argnums=2), argnums=2)(weight, bias, x)
######################################################################
# Batch Jacobian (and Batch Hessian)
# --------------------------------------------------------------------
# In the above examples we've been operating with a single feature vector.
# In some cases you might want to take the Jacobian of a batch of outputs
# with respect to a batch of inputs where each input produces an independent
# output. That is, given a batch of inputs of shape (B, N) and a function
# that goes from (B, N) -> (B, M), we would like a Jacobian of shape (B, M, N).
# The easiest way to do this is to sum over the batch dimension and then
# compute the Jacobian of that function:
def predict_with_output_summed(weight, bias, x):
return predict(weight, bias, x).sum(0)
batch_size = 64
Din = 31
Dout = 33
weight = torch.randn(Dout, Din)
bias = torch.randn(Dout)
x = torch.randn(batch_size, Din)
batch_jacobian0 = jacrev(predict_with_output_summed, argnums=2)(weight, bias, x)
# If you instead have a function that goes from R^N -> R^M but inputs that are
# batched, you compose vmap with jacrev to compute batched jacobians:
compute_batch_jacobian = vmap(jacrev(predict, argnums=2), in_dims=(None, None, 0))
batch_jacobian1 = compute_batch_jacobian(weight, bias, x)
assert torch.allclose(batch_jacobian0, batch_jacobian1)
# Finally, batch hessians can be computed similarly. It's easiest to think about
# them by using vmap to batch over hessian computation, but in some cases the sum
# trick also works.
compute_batch_hessian = vmap(hessian(predict, argnums=2), in_dims=(None, None, 0))
batch_hess = compute_batch_hessian(weight, bias, x)
| pytorch-master | functorch/notebooks/_src/plot_jacobians_and_hessians.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import _C
# Monkey patch PyTorch. This is a hack, we should try to upstream
# these pieces.
from ._src import monkey_patching as _monkey_patching
# Top-level APIs. Please think carefully before adding something to the
# top-level namespace:
# - private helper functions should go into functorch._src
# - very experimental things should go into functorch.experimental
# - compilation related things should go into functorch.compile
# functorch transforms
from ._src.vmap import vmap
from ._src.eager_transforms import (
grad, grad_and_value, vjp, jacrev, jvp, jacfwd, hessian,
)
from ._src.python_key import make_fx
# utilities. Maybe these should go in their own namespace in the future?
from ._src.make_functional import (
make_functional_with_buffers,
make_functional,
combine_state_for_ensemble,
FunctionalModule,
FunctionalModuleWithBuffers,
)
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
| pytorch-master | functorch/functorch/__init__.py |
from .batch_norm_replacement import replace_all_batch_norm_modules_
# PyTorch forward-mode is not mature yet
from .._src.eager_transforms import jvp, jacfwd, hessian, functionalize
from .._src.vmap import chunk_vmap
| pytorch-master | functorch/functorch/experimental/__init__.py |
import torch.nn as nn
def batch_norm_without_running_stats(module: nn.Module):
if isinstance(module, nn.modules.batchnorm._BatchNorm) and module.track_running_stats:
module.running_mean = None
module.running_var = None
module.num_batches_tracked = None
module.track_running_stats = False
def replace_all_batch_norm_modules_(root: nn.Module) -> nn.Module:
"""
In place updates :attr:`root` by setting the ``running_mean`` and ``running_var`` to be None and
setting track_running_stats to be False for any nn.BatchNorm module in :attr:`root`
"""
# base case
batch_norm_without_running_stats(root)
for obj in root.modules():
batch_norm_without_running_stats(obj)
return root
| pytorch-master | functorch/functorch/experimental/batch_norm_replacement.py |
import torch
import torch.fx as fx
import operator
import math
import torch.utils._pytree as pytree
import copy
import os
from collections import defaultdict
from torch.fx.passes import graph_drawer
from typing import Tuple
from .compile_utils import fx_graph_cse, get_aten_target
from . import config
AOT_PARTITIONER_DEBUG = config.debug_partitioner
class InvalidNodeBase(object):
def __repr__(self):
return "Invalid Node"
InvalidNode = InvalidNodeBase()
def _extract_graph_with_inputs_outputs(joint_graph, inputs, outputs):
"""
Given a graph, extracts out a subgraph that takes the specified nodes as
inputs and returns the specified outputs.
This includes specifying non-placeholder nodes as inputs.
The general strategy is to initialize all inputs with proxies as we
encounter them, and trace through the graph, only keeping values which take
in valid proxies. Then, all dead code is eliminated.
"""
new_graph = fx.Graph()
env = {}
# Add new placeholder nodes in the order specified by the inputs
for node in inputs:
new_node = new_graph.placeholder(node.name)
# Can't use node_copy here as we may be turning previous call_function into placeholders
new_node.meta = node.meta
env[node] = new_node
for node in joint_graph.nodes:
if node in inputs:
continue
elif node.op == 'placeholder':
env[node] = InvalidNode
elif node.op == 'call_function':
all_args = pytree.tree_flatten((node.args, node.kwargs))[0]
all_args = [isinstance(env[x], InvalidNodeBase) for x in all_args if isinstance(x, fx.Node)]
if any(all_args):
env[node] = InvalidNode
continue
env[node] = new_graph.node_copy(node, lambda x: env[x])
elif node.op == 'get_attr':
env[node] = new_graph.node_copy(node, lambda x: env[x])
elif node.op == 'output':
pass
output_values = []
for x in outputs:
if isinstance(x, fx.Node):
if x not in env:
raise RuntimeError(f"Node {x} couldn't be found in env")
output_values.append(env[x])
else:
output_values.append(x)
new_graph.output(output_values)
new_graph.eliminate_dead_code()
new_graph.lint()
return new_graph
def _is_primal(node):
return node.op == "placeholder" and "tangents" not in node.target
def _is_tangent(node):
return node.op == "placeholder" and "tangents" in node.target
def _extract_fwd_bwd_outputs(joint_module: fx.GraphModule):
num_fwd_outputs = joint_module._out_spec.children_specs[0].num_leaves
outputs = pytree.tree_flatten([node.args for node in joint_module.graph.nodes if node.op == 'output'])[0]
fwd_outputs = outputs[:num_fwd_outputs]
bwd_outputs = outputs[num_fwd_outputs:]
return fwd_outputs, bwd_outputs
def _extract_fwd_bwd_modules(joint_module: fx.GraphModule, saved_values):
fwd_outputs, bwd_outputs = _extract_fwd_bwd_outputs(joint_module)
primal_inputs = list(filter(_is_primal, joint_module.graph.nodes))
tangent_inputs = list(filter(_is_tangent, joint_module.graph.nodes))
# Construct the forward module
fwd_graph = _extract_graph_with_inputs_outputs(joint_module.graph, primal_inputs, fwd_outputs + saved_values)
bwd_graph = _extract_graph_with_inputs_outputs(joint_module.graph, saved_values + tangent_inputs, bwd_outputs)
# This is to filter out saved values that don't actually end up being used by the backwards pass
for node in bwd_graph.nodes:
if node.op == 'placeholder' and not node.users:
for saved_value in saved_values:
if saved_value.name == node.name:
saved_values.remove(saved_value)
break
# Now, we re-generate the fwd/bwd graphs.
# NB: This might increase compilation time, but I doubt it matters
fwd_graph = _extract_graph_with_inputs_outputs(joint_module.graph, primal_inputs, fwd_outputs + saved_values)
bwd_graph = _extract_graph_with_inputs_outputs(joint_module.graph, saved_values + tangent_inputs, bwd_outputs)
fwd_module = fx.GraphModule(joint_module, fwd_graph)
bwd_module = fx.GraphModule(joint_module, bwd_graph)
return fwd_module, bwd_module
def default_partition(
joint_module: fx.GraphModule, _joint_inputs
) -> Tuple[fx.GraphModule, fx.GraphModule]:
"""
Partitions the :attr:`joint_module` in a manner that closely resembles the
behavior observed in the original ``.forward()`` and ``.backward()`` of the
callable, i.e., the resulting forward graph contains those operators that
are executed in the original ``.forward()`` callable passed to
:func:`aot_function`.
The default partitioner collects the operators that are between the forward
inputs and the forward outputs. This helps in finding the tensors which have
to be stashed for the backward pass. These stashed tensors become the output
of the generated forward graph. The remaining operators are then placed in
the backward graph.
.. warning::
This API is experimental and likely to change.
Args:
joint_module(fx.GraphModule): The joint forward and backward graph. This
is the result of AOT Autograd tracing.
Returns:
Returns the generated forward and backward Fx graph modules.
"""
primal_inputs = list(filter(_is_primal, joint_module.graph.nodes))
fwd_outputs, bwd_outputs = _extract_fwd_bwd_outputs(joint_module)
forward_only_graph = _extract_graph_with_inputs_outputs(joint_module.graph, primal_inputs, fwd_outputs)
forward_node_names = {node.name for node in forward_only_graph.nodes if node.op != 'output'}
saved_values = []
for node in joint_module.graph.nodes:
if node.name not in forward_node_names:
continue
# Since we can't save tuple of tensor values, we need to flatten out what we're saving
if 'tensor_meta' not in node.meta and node.op == 'call_function':
users = node.users
assert all(user.target == operator.getitem for user in users)
for user in users:
saved_values.append(user)
else:
saved_values.append(node)
saved_values = list(set(saved_values))
return _extract_fwd_bwd_modules(joint_module, saved_values)
def _prod(x):
s = 1
for i in x:
s *= i
return s
def _size_of(metadata):
sizes = {
torch.float: 4,
torch.float16: 2,
torch.bfloat16: 2,
torch.float32: 4,
torch.float64: 8,
torch.int: 4,
torch.int8: 1,
torch.int16: 2,
torch.int32: 4,
torch.int64: 8,
torch.uint8: 1,
torch.bool: 1,
}
numel = _prod(metadata.shape)
dtype = metadata.dtype
if dtype not in sizes:
raise NotImplementedError("Don't know the size of dtype ", dtype)
return numel * sizes[dtype]
# Used for some investigative purposes
def _count_ops(graph):
from collections import defaultdict
cnt = defaultdict(int)
for node in graph.nodes:
if node.op == 'call_function':
cnt[node.target.__name__] += 1
print(sorted(cnt.items(), key=lambda x: x[1], reverse=True))
def min_cut_rematerialization_partition(
joint_module: fx.GraphModule, _joint_inputs, compiler="nvfuser"
) -> Tuple[fx.GraphModule, fx.GraphModule]:
"""
Partitions the joint graph such that the backward recomputes the forward.
Recomputing helps in trading off memory bandwidth with computation.
To create the fwd and bwd graph, we copy the joint graph, manually set the
outputs to just original forward or backward outputs. And then we run the
resulting graphs through dead code elimintation.
.. warning::
This API is experimental and likely to change.
Args:
joint_module(fx.GraphModule): The joint forward and backward graph. This
is the result of AOT Autograd tracing.
Returns:
Returns the generated forward and backward Fx graph modules.
"""
try:
import networkx as nx
except ImportError:
raise RuntimeError("Need networkx installed to perform smart recomputation heuristics")
joint_module.graph.eliminate_dead_code()
joint_module.recompile()
fx_g = joint_module.graph
# add the CSE pass
cse_graph = fx_graph_cse(fx_g)
joint_module.graph = cse_graph
full_bw_graph = joint_module.graph
name_to_node = {}
for node in joint_module.graph.nodes:
name_to_node[node.name] = node
def classify_nodes(joint_module):
required_bw_nodes = set()
for node in joint_module.graph.nodes:
if node.op == 'placeholder' and "tangents" in node.target:
required_bw_nodes.add(node)
if node in required_bw_nodes:
for user in node.users:
required_bw_nodes.add(user)
primal_inputs = list(filter(_is_primal, joint_module.graph.nodes))
fwd_outputs, _ = _extract_fwd_bwd_outputs(joint_module)
forward_only_graph = _extract_graph_with_inputs_outputs(joint_module.graph, primal_inputs, fwd_outputs)
required_fw_nodes = {name_to_node[node.name] for node in forward_only_graph.nodes
if node.op != 'output'}
unclaimed_nodes = {node for node in joint_module.graph.nodes
if node not in required_fw_nodes and node not in required_bw_nodes}
return required_fw_nodes, required_bw_nodes, unclaimed_nodes
required_fw_nodes, required_bw_nodes, unclaimed_nodes = classify_nodes(joint_module)
for node in reversed(joint_module.graph.nodes):
if node not in required_fw_nodes:
node.dist_from_bw = 0
else:
node.dist_from_bw = int(1e9)
for user in node.users:
node.dist_from_bw = min(node.dist_from_bw, user.dist_from_bw + 1)
aten = torch.ops.aten
prims = torch.ops.prims
pointwise_ops = [aten.add, aten.sub, aten.div, aten.atan2, aten.mul, aten.max, aten.min, aten.pow, aten.remainder, aten.fmod, aten.__and__, aten.__or__, aten.__xor__, aten.__lshift__, aten.__rshift__, aten.eq, aten.ne, aten.ge, aten.gt, aten.le, aten.lt, aten.abs, aten.bitwise_not, aten.ceil, aten.floor, aten.frac, aten.neg, aten.relu, aten.round, aten.silu, aten.trunc, aten.log, aten.log10, aten.log1p, aten.log2, aten.lgamma, aten.exp, aten.expm1, aten.erf, aten.erfc, aten.cos, aten.acos, aten.cosh, aten.sin, aten.asin, aten.sinh, aten.tan, aten.atan, aten.tanh, aten.atanh, aten.sqrt, aten.rsqrt, aten.reciprocal, aten.sigmoid, aten.softplus, aten.threshold, aten.threshold_backward, aten.clamp, aten.where, aten.lerp, aten.addcmul, aten.gelu, aten.gelu_backward] # noqa: E501
if compiler == "inductor":
pointwise_ops += [prims.div, prims.convert_element_type, aten.sign, aten.clone, aten._to_copy] # noqa: E501
misc_ops = [aten.to, aten.type_as, operator.getitem]
reduction_ops = [aten.softmax, aten._softmax, aten._softmax_backward_data, aten.sum, aten.mean, aten._grad_sum_to_size, aten.sum_to_size, aten.amax] # noqa: E501
if compiler == "inductor":
reduction_ops += [prims.var, prims.sum, aten.var, aten.std]
# not recomputed by default since these are kinda expensive/hard to fuse into
# norm_ops = [aten.instance_norm, aten._batch_norm_impl_index, aten.native_batch_norm, aten.batch_norm, aten._batch_norm_impl_index_backward, aten.native_layer_norm, aten.layer_norm, aten.native_layer_norm_backward] # noqa: E501
# Not used by default since NVFuser can't fuse view ops
# view_ops = [aten.expand, aten.clone, aten.transpose, aten.t, aten.view, aten._unsafe_view, aten.permute, aten.transpose, aten.t, aten._reshape_alias, aten.squeeze, aten.unsqueeze, aten.reshape, aten.cat, aten.slice, aten.split, aten.select, aten.repeat] # noqa: E501
# These are the view ops that NVFuser can fuse
view_ops = [aten.squeeze, aten.unsqueeze]
if compiler == "inductor":
view_ops += [prims.broadcast_in_dim, aten.select, aten.permute, aten._unsafe_view, aten.view, aten.expand, aten.slice, aten.reshape, aten.broadcast_tensors] # noqa: E501
random_ops = [aten.native_dropout, aten.rand_like, aten.randn_like]
compute_intensive_ops = [aten.mm, aten.convolution, aten.convolution_backward, aten.bmm, aten.addmm, aten.upsample_bilinear2d] # noqa: E501
unrecomputable_ops = random_ops + compute_intensive_ops
recomputable_ops = set(
pointwise_ops
+ misc_ops
+ reduction_ops
+ view_ops
)
fusible_ops = recomputable_ops | set(random_ops)
if AOT_PARTITIONER_DEBUG:
joint_module_ops = set(
str(node.target._overloadpacket)
for node in joint_module.graph.nodes
if node.op == "call_function" and hasattr(node.target, "_overloadpacket")
)
ops_ignored = joint_module_ops - set([str(i) for i in recomputable_ops])
print("Ops banned from rematerialization: ", ops_ignored)
print()
AGGRESSIVE_RECOMPUTATION = False
def _maybe_size_of(node):
if 'tensor_meta' in node.meta:
return _size_of(node.meta['tensor_meta'])
return 0
def ban_recomputation(node):
if AGGRESSIVE_RECOMPUTATION:
return (node.op == 'call_function' and get_aten_target(node) in unrecomputable_ops)
else:
if node.op != 'call_function':
return False
if get_aten_target(node) not in recomputable_ops:
return True
if node.target == operator.getitem:
return False
if compiler == "inductor" and node.dist_from_bw > 4:
return True
# If the output of an op is 4x smaller (arbitrary choice),
# then we don't allow recomputation.
if 'tensor_meta' not in node.meta:
return False
input_tensors_size = sum(_maybe_size_of(i) for i in node.args if isinstance(i, fx.Node))
output_size = _size_of(node.meta['tensor_meta'])
return (output_size * 4 < input_tensors_size)
def is_fusible(a, b):
return get_aten_target(a) in fusible_ops and get_aten_target(b) in fusible_ops
def is_materialized(node):
if node.op == 'placeholder':
return True
return not all(is_fusible(node, user) for user in node.users)
def get_node_weight(node):
mem_sz = _size_of(node.meta['tensor_meta'])
# Heuristic to bias towards nodes closer to the backwards pass
# Complete guess about current value
mem_sz = int(mem_sz * (1.1 ** max(min(node.dist_from_bw, 100), 1)))
# mem_sz = int(mem_sz + node.dist_from_bw)
if is_materialized(node):
return mem_sz
else:
return mem_sz * 2
nx_graph = nx.DiGraph()
for node in full_bw_graph.nodes:
if node.op == 'output':
continue
if node in required_bw_nodes:
nx_graph.add_edge(node.name + "_in", "sink", capacity=math.inf)
continue
if node.op == 'placeholder' and "primals" in node.target:
nx_graph.add_edge("source", node.name + "_in", capacity=math.inf)
# If a node can't be recomputed (too expensive or involves randomness),
# we prevent it from being recomputed by adding an inf edge to the source
# We only need to ban nodes in the fw pass, as those are the only ones that would be recomputed.
if ban_recomputation(node) and node in required_fw_nodes:
nx_graph.add_edge("source", node.name + "_in", capacity=math.inf)
if 'tensor_meta' not in node.meta:
weight = math.inf
else:
weight = get_node_weight(node)
# Creates the weights on the "node" edge
nx_graph.add_edge(node.name + "_in", node.name + "_out", capacity=weight)
for user in node.users:
nx_graph.add_edge(node.name + "_out", user.name + "_in", capacity=math.inf)
cut_value, partition = nx.minimum_cut(nx_graph, "source", "sink")
reachable, non_reachable = partition
cutset = set()
for u, nbrs in ((n, nx_graph[n]) for n in reachable):
cutset.update((u, v) for v in nbrs if v in non_reachable)
cut_nodes = set()
for node_in, node_out in cutset:
assert node_in[:-3] == node_out[:-4]
node_name = node_in[:-3]
cut_nodes.add(node_name)
# To make this stuff deterministic
node_idx = {node: idx for idx, node in enumerate(joint_module.graph.nodes)}
saved_values = sorted((name_to_node[node] for node in cut_nodes), key=lambda x: node_idx[x])
fw_module, bw_module = _extract_fwd_bwd_modules(joint_module, saved_values)
if AOT_PARTITIONER_DEBUG:
print("Theoretical Activations Stored: ", sum([_size_of(i.meta['tensor_meta']) for i in saved_values]) / 1e9)
fw_module_nodes = set([node.name for node in fw_module.graph.nodes if node.op == 'call_function'])
bw_module_nodes = set([node.name for node in bw_module.graph.nodes if node.op == 'call_function'])
remat_nodes = fw_module_nodes & bw_module_nodes
counts = defaultdict(int)
for node in fw_module.graph.nodes:
if node.name in remat_nodes and hasattr(node.target, '_overloadpacket'):
counts[str(node.target._overloadpacket)] += 1
print("# nodes rematerialized: ", len(remat_nodes))
print("Count of Ops Rematerialized: ", sorted(counts.items(), key=lambda x: x[1], reverse=True))
return fw_module, bw_module
def draw_graph(traced: torch.fx.GraphModule, fname: str, figname: str = "fx_graph", clear_meta=True):
if clear_meta:
new_graph = copy.deepcopy(traced.graph)
traced = fx.GraphModule(traced, new_graph)
for node in traced.graph.nodes:
node.meta = {}
base, ext = os.path.splitext(fname)
if not ext:
ext = ".svg"
print(f"Writing FX graph to file: {base}{ext}")
g = graph_drawer.FxGraphDrawer(traced, figname)
x = g.get_main_dot_graph()
getattr(x, "write_" + ext.lstrip("."))(f"{base}{ext}")
def draw_joint_graph(graph, joint_inputs, file_name="full_graph.png"):
draw_graph(graph, file_name)
return default_partition(graph, joint_inputs)
| pytorch-master | functorch/functorch/_src/partitioners.py |
import torch
import torch.fx as fx
from torch.utils._pytree import tree_flatten
aten = torch.ops.aten
def get_aten_target(node):
if hasattr(node.target, 'overloadpacket'):
return node.target.overloadpacket
return node.target
rand_ops = [aten.dropout, aten._fused_dropout, aten._standard_gamma,
aten.bernoulli, aten.multinomial, aten.native_dropout,
aten.normal, aten.poisson, aten.binomial, aten.rrelu,
aten.rand_like, aten.rand, aten.randint, aten.randn, aten.randperm]
# return a new copy of torch.fx.graph.Graph with CSE applied to the input graph
def fx_graph_cse(fx_g: torch.fx.graph.Graph):
new_graph = fx.Graph()
env = {} # map from node in the old graph to node in the new graph
hash_env = {} # map from hash to a node in the new graph
token_map = {} # map from hash to token
for n in fx_g.nodes:
# The placeholder, output, and get_attr nodes are copied to the new grpah without change
# do not CSE away random operations
if n.op == 'placeholder' or n.op == 'output' or n.op == 'get_attr' or get_aten_target(n) in rand_ops:
new_node = new_graph.node_copy(n, lambda x: env[x])
env[n] = new_node
else: # n.op == 'call_function', should never see n.op == 'call_module' or 'call_method'
# substitute args and kwargs memebrs to their mapping in env if exists
# specs can be used to reconstruct nested list/dictionaries
def substitute(arg_list):
arg_list, spec = tree_flatten(arg_list)
for i in range(len(arg_list)):
v = arg_list[i]
if isinstance(v, torch.fx.node.Node) and v in env:
arg_list[i] = env[v]
return tuple(arg_list), spec
args, args_spec = substitute(n.args)
kwargs, kwargs_spec = substitute(n.kwargs)
# each token corresponds to a unique node
# nodes with the same token can be substituted
token = {"target": n.target, "args": args, "args_spec": args_spec,
"kwargs": kwargs, "kwargs_spec": kwargs_spec}
# hash substituted args to a number, do not hash specs because specs are not hashable
hash_arg = hash((args, kwargs))
hash_val = (n.target, hash_arg)
# check if a node has a substitute and can be eliminated
hash_val_in_hash_env = hash_val in hash_env
if hash_val_in_hash_env and token_map[hash_val] == token:
env[n] = hash_env[hash_val]
continue
new_node = new_graph.node_copy(n, lambda x: env[x])
env[n] = new_node
if not hash_val_in_hash_env:
hash_env[hash_val] = new_node
token_map[hash_val] = token
return new_graph
def strip_overloads(gm):
"""
Modifies the target of graph nodes in :attr:`gm` to strip overloads.
Args:
gm(fx.GraphModule): The input Fx graph module to be modified
"""
for node in gm.graph.nodes:
if isinstance(node.target, torch._ops.OpOverload):
node.target = node.target.overloadpacket
gm.recompile()
def get_placeholders(graph):
return list(filter(lambda x: x.op == 'placeholder', graph.nodes))
def get_outputs(graph):
for node in graph.nodes:
if node.op == 'output':
return tree_flatten(node.args[0])[0]
raise AssertionError("No output node found")
| pytorch-master | functorch/functorch/_src/compile_utils.py |
# Polyfilled from pytorch core while we figure out the `remove_duplicate` issues.
def _named_members(mod, get_members_fn, prefix='', recurse=True, remove_duplicate=True):
r"""Helper method for yielding various names + members of modules."""
memo = set()
modules = mod.named_modules(prefix=prefix, remove_duplicate=remove_duplicate) if recurse else [(prefix, mod)]
for module_prefix, module in modules:
members = get_members_fn(module)
for k, v in members:
if v is None or v in memo:
continue
if remove_duplicate:
memo.add(v)
name = module_prefix + ('.' if module_prefix else '') + k
yield name, v
def _named_parameters(mod, prefix: str = '', recurse: bool = True, remove_duplicate: bool = True):
gen = _named_members(
mod,
lambda module: module._parameters.items(),
prefix=prefix, recurse=recurse, remove_duplicate=remove_duplicate)
for elem in gen:
yield elem
def _named_buffers(mod, prefix: str = '', recurse: bool = True, remove_duplicate: bool = True):
gen = _named_members(
mod,
lambda module: module._buffers.items(),
prefix=prefix, recurse=recurse, remove_duplicate=remove_duplicate)
for elem in gen:
yield elem
| pytorch-master | functorch/functorch/_src/named_members_polyfill.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Global flags for aot autograd
"""
import os
use_functionalize = False
# TODO: flip this to true by default
# Waiting on
# https://github.com/pytorch/pytorch/pull/81617
# https://github.com/pytorch/pytorch/pull/81609
# https://github.com/pytorch/pytorch/pull/81604
# fix for test_aot_autograd_exhaustive_sgn_cpu_float32 _efficientzerotensor
# fix for complex numbers
use_fake_tensor = False
debug_partitioner = os.environ.get('AOT_PARTITIONER_DEBUG', False)
# Prints out forward + backwards FX graphs
debug_graphs = os.environ.get('AOT_FX_GRAPHS', False)
# Prints out joint graph traced, before partitioning
debug_joint = os.environ.get('AOT_FX_GRAPHS_JOINT', False)
| pytorch-master | functorch/functorch/_src/config.py |
import torch
from torch import Tensor
import torch._decomp
from typing import Tuple, List, Optional
aten = torch.ops.aten
decomposition_table = torch._decomp.decomposition_table
register_decomposition = torch._decomp.register_decomposition
get_decompositions = torch._decomp.get_decompositions
# Decompositions have been ported to torch._decomp inside of PyTorch core.
# The only decompositions here are temporary or hacks.
# Please submit your contributions to PyTorch core!
def maybe_register_decomposition(op):
def decorator(f):
try:
return register_decomposition(op)(f)
except Exception:
return f
return decorator
# Functions where we need a special decomposition for jvp but there's another version that
# should be used more generally (ex. for jvp we need to recompute the mean and variance for
# the backwards of a normalization function. Without jvp, it should used the saved value)
decomposition_table_for_jvp = {}
def register_decomposition_for_jvp(fn):
return register_decomposition(fn, registry=decomposition_table_for_jvp)
@maybe_register_decomposition(aten.trace.default)
def trace(self: Tensor) -> Tensor:
return torch.sum(torch.diag(self))
@maybe_register_decomposition(aten.log_sigmoid_forward.default)
def log_sigmoid_forward(self: Tensor) -> Tuple[Tensor, Tensor]:
min = torch.minimum(self.new_zeros(()), self)
z = torch.exp(-torch.abs(self))
if self.is_cuda:
buffer = self.new_zeros((0,))
else:
buffer = z
return min - torch.log1p(z), buffer
def recompute_mean_var(input: Tensor, rstd: Tensor, inner_dim_indices: List[int], keepdim: bool):
# for most norm decompositions, it will be the same as the core version except for here.
# We recompute the mean and variance so that they track gradients through input
mean = torch.mean(input, dim=inner_dim_indices, keepdim=keepdim)
var = torch.var(input, dim=inner_dim_indices, unbiased=False, keepdim=keepdim)
eps = torch.pow(1 / rstd, 2) - var # this makes me so sad inside
eps = eps.detach()
rstd = 1 / torch.sqrt(var + eps)
return mean, rstd
@register_decomposition_for_jvp(aten.native_layer_norm_backward)
def native_layer_norm_backward(
grad_out: Tensor,
input: Tensor,
normalized_shape: List[int],
mean: Tensor,
rstd: Tensor,
weight: Optional[Tensor],
bias: Optional[Tensor],
output_mask: List[bool],
) -> Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]]:
input_shape = input.shape
input_ndim = input.dim()
axis = input_ndim - len(normalized_shape)
inner_dims = input_shape[axis:]
outer_dims = input_shape[:axis]
inner_dim_indices = list(range(axis, input_ndim))
outer_dim_indices = list(range(0, axis))
N = 1
for i in inner_dims:
N *= i
M = 1
for i in outer_dims:
M *= i
if M <= 0 or N <= 0:
return (
input.new_zeros(input_shape),
input.new_zeros(input_shape[axis:]),
input.new_zeros(input_shape[axis:]),
)
mean_, rstd_ = recompute_mean_var(input, rstd, inner_dim_indices, keepdim=True)
x_hat = (input - mean_) * rstd_
if weight is not None:
grad_x_hat = grad_out * weight
else:
grad_x_hat = grad_out
a = grad_x_hat * N
b = torch.sum(grad_x_hat, inner_dim_indices, True)
c1 = torch.mul(grad_x_hat, x_hat)
c2 = torch.sum(c1, inner_dim_indices, True)
c3 = torch.mul(x_hat, c2)
inner = a - b - c3
if output_mask[0]:
d_input: Optional[Tensor] = (rstd_ / N) * inner
else:
d_input = torch.zeros_like(input) # should be None but doesn't work with vjp
if output_mask[1] and weight is not None:
if len(outer_dim_indices) > 0:
d_weight: Optional[Tensor] = torch.sum(
grad_out * x_hat, outer_dim_indices, False
)
else:
d_weight = grad_out * x_hat
elif weight is not None:
d_weight = torch.zeros_like(weight) # should be None but doesn't work with vjp
else:
d_weight = torch.zeros(()) # should be None but doesn't work with vjp
if output_mask[2] and bias is not None:
if len(outer_dim_indices) > 0:
d_bias: Optional[Tensor] = torch.sum(grad_out, outer_dim_indices, False)
else:
d_bias = grad_out
elif bias is not None:
d_bias = torch.zeros_like(bias) # should be None but doesn't work with vjp
else:
d_bias = torch.zeros(()) # should be None but doesn't work with vjp
return (d_input, d_weight, d_bias)
def prod(x: List[int]):
r = 1
for i in x:
r *= i
return r
@register_decomposition_for_jvp(aten.native_batch_norm_backward) # @register_decomposition_for_jvp after in core
def native_batch_norm_backward(
grad_out: Tensor,
input: Tensor,
weight: Optional[Tensor],
running_mean: Optional[Tensor],
running_var: Optional[Tensor],
save_mean: Optional[Tensor],
save_invstd: Optional[Tensor],
train: bool,
eps: float,
output_mask: List[bool],
) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
input_shape = input.shape
input_rank = input.dim()
assert input_rank >= 2, "rank of the input must be at least 2"
axis = 1
num_features = prod(input_shape) / input_shape[axis]
mean = save_mean
invstd = save_invstd
if train:
assert save_mean is not None and save_invstd is not None, "when train=True, save_mean and save_invstd are required"
reduciton_dims = [0] + list(range(2, input.dim()))
assert invstd is not None # for typing
mean, invstd = recompute_mean_var(input, invstd, reduciton_dims, keepdim=False)
else:
assert running_mean is not None and running_var is not None
mean = running_mean
invstd = torch.rsqrt(running_var + eps)
broadcast_mask = [1] * input_rank
broadcast_mask[axis] = input_shape[axis]
reduction_axes: List[int] = []
for i in range(input_rank):
if i != axis:
reduction_axes.append(i)
mean = torch.reshape(mean, broadcast_mask)
norm = 1.0 / num_features
grad_output_sum = torch.sum(grad_out, reduction_axes)
dot_p = torch.sum(grad_out * (input - mean), reduction_axes)
grad_mean = torch.reshape(grad_output_sum * norm, broadcast_mask)
proj_scale = torch.reshape(torch.mul(dot_p * norm, invstd * invstd), broadcast_mask)
if weight is None:
grad_scale = torch.reshape(invstd, broadcast_mask) * 1.0
else:
grad_scale = torch.reshape(invstd * weight, broadcast_mask)
if train:
proj = (input - mean) * proj_scale
grad_input = ((grad_out - proj) - grad_mean) * grad_scale
else:
grad_input = grad_out * grad_scale
if output_mask[1]:
grad_weight = dot_p * invstd
elif weight is not None:
grad_weight = torch.zeros_like(weight) # should be None but doesn't work with vjp
else:
grad_weight = torch.zeros(()) # should be None but doesn't work with vjp
if output_mask[2]:
grad_bias = grad_output_sum
else:
grad_bias = torch.zeros_like(grad_output_sum) # should be None but doesn't work with vjp
return (grad_input, grad_weight, grad_bias)
| pytorch-master | functorch/functorch/_src/decompositions.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
__all__ = ["make_fx", "ProxyTensor", "dispatch_trace", "PythonKeyTracer", "pythonkey_decompose"]
from torch.fx.experimental.proxy_tensor import make_fx, ProxyTensor, dispatch_trace, PythonKeyTracer, decompose
pythonkey_decompose = decompose
PythonTensor = ProxyTensor
| pytorch-master | functorch/functorch/_src/python_key.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torch.utils._pytree import tree_flatten, tree_unflatten
def tree_map_(fn_, pytree):
flat_args, _ = tree_flatten(pytree)
[fn_(arg) for arg in flat_args]
return pytree
class PlaceHolder():
def __repr__(self):
return '*'
def treespec_pprint(spec):
leafs = [PlaceHolder() for _ in range(spec.num_leaves)]
result = tree_unflatten(leafs, spec)
return repr(result)
| pytorch-master | functorch/functorch/_src/pytree_hacks.py |
"""
From https://docs.google.com/spreadsheets/d/12R3nCOLskxPYjjiNkdqy4OdQ65eQp_htebXGODsjSeA/edit#gid=0
Try to keep this list in sync with that.
"""
top_torch = [
("t", 6837449),
("tensor", 585786),
("mode", 462182),
("cat", 394818),
("max", 368038),
("zeros", 329495),
("load", 327756),
("no_grad", 294694),
("save", 265130),
("from_numpy", 243063),
("manual_seed", 165044),
("ones", 153696),
("randn", 150796),
("stack", 133358),
("sum", 130772),
("arange", 98087),
("rand", 94715),
("mean", 88546),
("exp", 73883),
("zeros_like", 72831),
("min", 72248),
("sigmoid", 66798),
("log", 62135),
("matmul", 47811),
("clamp", 45304),
("sqrt", 44911),
("abs", 43535),
("tanh", 42793),
("empty", 40311),
("argmax", 38435),
("bmm", 33984),
("pow", 33571),
("norm", 31125),
("mm", 30995),
("is_tensor", 29546),
("ones_like", 29512),
("nonzero", 28681),
("full", 28373),
("unsqueeze", 27911),
("where", 26585),
("randperm", 26450),
("eye", 24342),
("mul", 23236),
("topk", 22537),
("as_tensor", 21967),
("sort", 21412),
("squeeze", 20863),
("randint", 20771),
("linspace", 20041),
("add", 19201),
("transpose", 18663),
("split", 18325),
("gather", 17904),
("set_grad_enabled", 16013),
("sin", 15669),
("cos", 15562),
("div", 15513),
("index_select", 14866),
("multinomial", 14331),
("flatten", 14267),
("isnan", 14170),
("randn_like", 13096),
("eq", 12680),
("einsum", 12480),
("round", 12367),
("floor", 11628),
("allclose", 11000),
("reshape", 10605),
("diag", 10167),
("chunk", 9581),
("std", 9379),
("set_default_tensor_type", 9281),
("triu", 8559),
("meshgrid", 8292),
("set_num_threads", 8126),
("unique", 7964),
("full_like", 7780),
("tril", 7538),
("dot", 7275),
("sign", 6943),
("equal", 6916),
("normal", 6750),
("cumsum", 6556),
("dist", 6058),
("isfinite", 6030),
("gt", 5935),
("set_printoptions", 5888),
("range", 5491),
("empty_like", 5351),
("flip", 5342),
("masked_select", 5341),
("bernoulli", 5262),
("atan", 5253),
("var", 5247),
("prod", 5200),
("erf", 5088),
("inverse", 5072),
("addmm", 4854),
("logsumexp", 4582),
("fft", 4436),
("lt", 4421),
("log2", 4316),
("enable_grad", 4238),
("rand_like", 4187),
("argsort", 3972),
("seed", 3932),
("mv", 3547),
("ger", 3309),
("ge", 3248),
("atan2", 3210),
("ceil", 3202),
("ne", 3075),
("bincount", 3063),
("acos", 3055),
("rsqrt", 3031),
("svd", 3029),
("numel", 3003),
("log1p", 2840),
("unbind", 2808),
("le", 2714),
("isinf", 2707),
("cross", 2646),
("set_default_dtype", 2536),
("argmin", 2535),
("sparse_coo_tensor", 2489),
("log10", 2304),
("kthvalue", 2192),
("set_rng_state", 2158),
("get_rng_state", 1996),
("get_default_dtype", 1879),
("det", 1868),
("qr", 1864),
("histc", 1852),
("symeig", 1832),
("trace", 1801),
("median", 1795),
("addcmul", 1751),
("remainder", 1717),
("baddbmm", 1693),
("lgamma", 1665),
("repeat_interleave", 1598),
("fmod", 1576),
("reciprocal", 1575),
("tan", 1560),
("initial_seed", 1532),
("take", 1529),
("stft", 1487),
("get_num_threads", 1477),
("real", 1459),
("cholesky", 1406),
("quantize_per_tensor", 1392),
("diag_embed", 1364),
("lerp", 1363),
("asin", 1345),
("eig", 1333),
("trunc", 1290),
("diagonal", 1287),
("cosh", 1279),
("rfft", 1269),
("cumprod", 1260),
("addr", 1211),
("roll", 1198),
("narrow", 1188),
("digamma", 1172),
("square", 1163),
("sinh", 1131),
("logspace", 1084),
("broadcast_tensors", 1070),
("irfft", 1013),
("frac", 997),
("hann_window", 994),
("solve", 989),
("logdet", 977),
("expm1", 968),
("cdist", 946),
("addmv", 903),
("randint_like", 888),
("tensordot", 888),
("ifft", 877),
("true_divide", 854),
("erfinv", 830),
("addcdiv", 819),
("addbmm", 813),
("renorm", 781),
("pinverse", 753),
("isclose", 740),
("erfc", 729),
("is_storage", 725),
("triangular_solve", 723),
("rot90", 709),
("logical_not", 686),
("geqrf", 681),
("slogdet", 677),
("lu", 665),
("hamming_window", 659),
("orgqr", 651),
("ormqr", 622),
("is_floating_point", 602),
("diagflat", 562),
("cholesky_solve", 559),
("tril_indices", 552),
("chain_matmul", 551),
("triu_indices", 548),
("angle", 522),
("poisson", 505),
("matrix_power", 485),
("unique_consecutive", 471),
("quantize_per_channel", 465),
("std_mean", 458),
("bartlett_window", 447),
("var_mean", 428),
("lstsq", 421),
("logical_and", 419),
("mvlgamma", 411),
("blackman_window", 400),
("bitwise_not", 395),
("cholesky_inverse", 388),
("as_strided", 384),
("floor_divide", 353),
("cartesian_prod", 321),
("lu_solve", 317),
("set_flush_denormal", 310),
("empty_strided", 283),
("logical_xor", 282),
("polygamma", 282),
("logical_or", 280),
("set_num_interop_threads", 278),
("combinations", 274),
("trapz", 270),
("matrix_rank", 260),
("lu_unpack", 255),
("result_type", 244),
("conj", 231),
("cummax", 230),
("lobpcg", 229),
("bitwise_xor", 217),
("promote_types", 213),
("get_num_interop_threads", 211),
("cummin", 205),
("bitwise_and", 198),
("dequantize", 192),
("bitwise_or", 191),
("imag", 191),
("can_cast", 184),
("istft", 180),
("compiled_with_cxx11_abi", 159),
("is_complex", 151),
("block_diag", 136),
("pca_lowrank", 124),
("absolute", 122),
("svd_lowrank", 108),
("neg", 2),
]
top_nn_functional = [
("nn.functional.softmax", 10522),
("nn.functional.relu", 8572),
("nn.functional.interpolate", 7277),
("nn.functional.pad", 5207),
("nn.functional.log_softmax", 4699),
("nn.functional.normalize", 2338),
("nn.functional.cross_entropy", 2083),
("nn.functional.grid_sample", 1970),
("nn.functional.one_hot", 1967),
("nn.functional.mse_loss", 1920),
("nn.functional.conv2d", 1593),
("nn.functional.dropout", 1516),
("nn.functional.softplus", 1385),
("nn.functional.sigmoid", 1128),
("nn.functional.linear", 1036),
("nn.functional.gelu", 930),
("nn.functional.avg_pool2d", 899),
("nn.functional.max_pool2d", 876),
("nn.functional.nll_loss", 863),
("nn.functional.embedding", 737),
("nn.functional.tanh", 664),
("nn.functional.leaky_relu", 640),
("nn.functional.adaptive_avg_pool2d", 633),
("nn.functional.cosine_similarity", 627),
("nn.functional.unfold", 609),
("nn.functional.conv1d", 596),
("nn.functional.binary_cross_entropy_with_logits", 591),
("nn.functional.l1_loss", 571),
("nn.functional.binary_cross_entropy", 492),
("nn.functional.elu", 416),
("nn.functional.batch_norm", 413),
("nn.functional.upsample", 413),
("nn.functional.fold", 305),
("nn.functional.affine_grid", 298),
("nn.functional.max_pool1d", 297),
("nn.functional.torch", 294),
("nn.functional.threshold", 263),
("nn.functional.smooth_l1_loss", 262),
("nn.functional.pairwise_distance", 253),
("nn.functional.logsigmoid", 243),
("nn.functional.adaptive_max_pool2d", 235),
("nn.functional.relu6", 213),
("nn.functional.pixel_shuffle", 209),
("nn.functional.avg_pool3d", 203),
("nn.functional.bilinear", 203),
("nn.functional.conv_transpose2d", 201),
("nn.functional.gumbel_softmax", 197),
("nn.functional.max_unpool2d", 196),
("nn.functional.kl_div", 191),
("nn.functional.hardtanh", 189),
("nn.functional.ctc_loss", 185),
("nn.functional.layer_norm", 178),
("nn.functional.conv3d", 172),
("nn.functional.max_unpool3d", 167),
("nn.functional.hardshrink", 165),
("nn.functional.hardswish", 156),
("nn.functional.selu", 156),
("nn.functional.glu", 155),
("nn.functional.assert_int_or_pair", 150),
("nn.functional.hardsigmoid", 146),
("nn.functional.upsample_bilinear", 146),
("nn.functional.max_pool3d", 140),
("nn.functional.adaptive_avg_pool3d", 139),
("nn.functional.instance_norm", 124),
("nn.functional.embedding_bag", 122),
("nn.functional.upsample_nearest", 110),
("nn.functional.avg_pool1d", 105),
("nn.functional.prelu", 102),
("nn.functional.celu", 92),
("nn.functional.dropout2d", 86),
("nn.functional.hinge_embedding_loss", 82),
("nn.functional.softsign", 81),
("nn.functional.max_unpool1d", 74),
("nn.functional.silu", 74),
("nn.functional.softshrink", 70),
("nn.functional.leaky_relu_", 68),
("nn.functional.softmin", 67),
("nn.functional.channel_shuffle", 66),
("nn.functional.multilabel_margin_loss", 66),
("nn.functional.dropout3d", 65),
("nn.functional.multi_margin_loss", 65),
("nn.functional.lp_pool2d", 64),
("nn.functional.conv_transpose1d", 62),
("nn.functional.triplet_margin_loss", 62),
("nn.functional.tanhshrink", 61),
("nn.functional.adaptive_max_pool1d", 59),
("nn.functional.cosine_embedding_loss", 58),
("nn.functional.multi_head_attention_forward", 58),
("nn.functional.max_pool1d_with_indices", 53),
("nn.functional.poisson_nll_loss", 53),
("nn.functional.margin_ranking_loss", 52),
("nn.functional.soft_margin_loss", 52),
("nn.functional.adaptive_max_pool3d", 51),
("nn.functional.group_norm", 51),
("nn.functional.local_response_norm", 51),
("nn.functional.multilabel_soft_margin_loss", 51),
("nn.functional.relu_", 50),
("nn.functional.alpha_dropout", 49),
("nn.functional.feature_alpha_dropout", 49),
("nn.functional.lp_pool1d", 49),
("nn.functional.adaptive_max_pool1d_with_indices", 48),
("nn.functional.adaptive_max_pool2d_with_indices", 48),
("nn.functional.adaptive_max_pool3d_with_indices", 48),
("nn.functional.fractional_max_pool2d", 48),
("nn.functional.fractional_max_pool2d_with_indices", 48),
("nn.functional.fractional_max_pool3d", 48),
("nn.functional.fractional_max_pool3d_with_indices", 48),
("nn.functional.max_pool2d_with_indices", 48),
("nn.functional.max_pool3d_with_indices", 48),
("nn.functional.handle_torch_function", 47),
("nn.functional.has_torch_function", 47),
("nn.functional.adaptive_avg_pool1d", 43),
("nn.functional.pdist", 43),
("nn.functional.rrelu_", 37),
("nn.functional.elu_", 34),
("nn.functional.boolean_dispatch", 33),
("nn.functional.hardtanh_", 26),
("nn.functional.triplet_margin_with_distance_loss", 23),
("nn.functional.selu_", 20),
("nn.functional.pixel_unshuffle", 19),
("nn.functional.conv_transpose3d", 18),
("nn.functional.gaussian_nll_loss", 15),
("nn.functional.has_torch_function_unary", 15),
("nn.functional.has_torch_function_variadic", 15),
("nn.functional.celu_", 13),
("nn.functional.huber_loss", 7),
("nn.functional.mish", 4),
("nn.functional.threshold_", 3),
("nn.functional.grad", 2),
("nn.functional.conv_tbc", 1),
("nn.functional.math", 1),
]
top_nn_module = [
("nn.Module", 927129, None),
("nn.Linear", 530688, "nn.functional.linear"),
("nn.Sequential", 384968, None),
("nn.Conv2d", 383320, "nn.functional.conv2d"),
("nn.ReLU", 318877, "nn.functional.relu"),
("nn.BatchNorm2d", 233265, "nn.functional.batch_norm"),
("nn.Dropout", 179268, "nn.functional.dropout"),
("nn.ModuleList", 171225, None),
("nn.Parameter", 153291, None),
("nn.CrossEntropyLoss", 152696, "nn.functional.cross_entropy"),
("nn.MaxPool2d", 138619, "nn.functional.max_pool2d"),
("nn.Embedding", 111844, "nn.functional.embedding"),
("nn.DataParallel", 104238, None),
("nn.MSELoss", 82954, "nn.functional.mse_loss"),
("nn.Sigmoid", 75810, "nn.functional.sigmoid"),
("nn.LeakyReLU", 65632, "nn.functional.leaky_relu"),
("nn.BatchNorm1d", 65374, "nn.functional.batch_norm"),
("nn.Softmax", 65114, "nn.functional.softmax"),
("nn.Tanh", 59445, "nn.functional.tanh"),
("nn.AdaptiveAvgPool2d", 59071, "nn.functional.adaptive_avg_pool2d"),
("nn.AvgPool2d", 58377, "nn.functional.avg_pool2d"),
("nn.ConvTranspose2d", 57524, "nn.functional.conv_transpose2d"),
("nn.LSTM", 57411, None),
("nn.Conv1d", 41108, "nn.functional.conv1d"),
("nn.LayerNorm", 36089, "nn.functional.layer_norm"),
("nn.BCELoss", 34005, "nn.functional.binary_cross_entropy"),
("nn.Upsample", 32527, "nn.functional.interpolate"),
("nn.BCEWithLogitsLoss", 29944, "nn.functional.binary_cross_entropy_with_logits"),
("nn.GRU", 25421, None),
("nn.Dropout2d", 23512, "nn.functional.dropout2d"),
("nn.LogSoftmax", 22897, "nn.functional.log_softmax"),
("nn.L1Loss", 22778, "nn.functional.l1_loss"),
("nn.GroupNorm", 22183, "nn.functional.group_norm"),
("nn.NLLLoss", 21751, "nn.functional.nll_loss"),
("nn.Conv3d", 20874, "nn.functional.conv3d"),
("nn.Identity", 17911, None),
("nn.InstanceNorm2d", 16426, "nn.functional.instance_norm"),
("nn.BatchNorm3d", 16378, "nn.functional.batch_norm"),
("nn.PReLU", 13472, "nn.functional.prelu"),
("nn.ReLU6", 12622, "nn.functional.relu6"),
("nn.ELU", 12508, "nn.functional.elu"),
("nn.LSTMCell", 10885, None),
("nn.Flatten", 10384, "torch.flatten"),
("nn.ModuleDict", 10255, None),
("nn.ReflectionPad2d", 9954, "nn.functional.pad"),
("nn.MaxPool3d", 9526, "nn.functional.max_pool3d"),
("nn.MaxPool1d", 9154, "nn.functional.max_pool1d"),
("nn.RNN", 9154, None),
("nn.ZeroPad2d", 8847, "nn.functional.pad"),
("nn.ParameterList", 7702, None),
("nn.SyncBatchNorm", 6814, None),
("nn.PixelShuffle", 6571, "nn.functional.pixel_shuffle"),
("nn.SmoothL1Loss", 6517, "nn.functional.smooth_l1_loss"),
("nn.Hardswish", 6458, "nn.functional.hardswish"),
("nn.AdaptiveMaxPool2d", 6071, "nn.functional.adaptive_max_pool2d"),
("nn.SELU", 6043, "nn.functional.selu"),
("nn.ConvTranspose3d", 6039, "nn.functional.conv_transpose3d"),
("nn.GRUCell", 5840, None),
("nn.ReplicationPad2d", 5600, "nn.functional.pad"),
("nn.KLDivLoss", 5541, "nn.functional.kl_div"),
("nn.ConvTranspose1d", 5183, "nn.functional.conv_transpose1d"),
("nn.Softplus", 5120, "nn.functional.softplus"),
("nn.SiLU", 4895, "nn.functional.silu"),
("nn.AvgPool3d", 4523, "nn.functional.avg_pool3d"),
("nn.CosineSimilarity", 4058, "nn.functional.cosine_similarity"),
("nn.GELU", 3932, "nn.functional.gelu"),
("nn.UpsamplingBilinear2d", 3673, "nn.functional.interpolate"),
("nn.InstanceNorm1d", 3658, "nn.functional.instance_norm"),
("nn.Transformer", 3604, None),
("nn.MultiheadAttention", 3435, "nn.functional.multi_head_attention_forward"),
("nn.AvgPool1d", 3195, "nn.functional.avg_pool1d"),
("nn.Dropout3d", 2964, "nn.functional.dropout3d"),
("nn.AdaptiveAvgPool3d", 2915, "nn.functional.adaptive_avg_pool3d"),
("nn.InstanceNorm3d", 2893, "nn.functional.instance_norm"),
("nn.Hardtanh", 2613, "nn.functional.hardtanh"),
("nn.MarginRankingLoss", 2568, "nn.functional.margin_ranking_loss"),
("nn.GLU", 2526, "nn.functional.glu"),
("nn.AdaptiveAvgPool1d", 2481, "nn.functional.adaptive_avg_pool1d"),
("nn.EmbeddingBag", 2344, "nn.functional.embedding_bag"),
("nn.TransformerEncoderLayer", 2292, None),
("nn.TransformerEncoder", 2091, None),
("nn.MaxUnpool2d", 2031, "nn.functional.max_unpool2d"),
("nn.UpsamplingNearest2d", 2004, "nn.functional.interpolate"),
("nn.ConstantPad1d", 1904, "nn.functional.pad"),
("nn.ConstantPad2d", 1791, "nn.functional.pad"),
("nn.CTCLoss", 1789, "nn.functional.ctc_loss"),
("nn.AdaptiveMaxPool1d", 1713, "nn.functional.adaptive_max_pool1d"),
("nn.AdaptiveLogSoftmaxWithLoss", 1665, None),
("nn.Bilinear", 1664, "nn.functional.bilinear"),
("nn.RNNCell", 1653, None),
("nn.MultiLabelSoftMarginLoss", 1624, "nn.functional.multilabel_soft_margin_loss"),
("nn.Unfold", 1452, "nn.functional.unfold"),
("nn.RReLU", 1431, "nn.functional.rrelu"),
("nn.CosineEmbeddingLoss", 1357, "nn.functional.cosine_embedding_loss"),
("nn.LocalResponseNorm", 1331, "nn.functional.local_response_norm"),
("nn.Softmax2d", 1300, "nn.functional.softmax"),
("nn.PairwiseDistance", 1241, "nn.functional.pairwise_distance"),
("nn.LogSigmoid", 1235, "nn.functional.logsigmoid"),
("nn.TripletMarginLoss", 1230, "nn.functional.triplet_margin_loss"),
("nn.RNNBase", 1133, None),
("nn.Threshold", 1043, "nn.functional.threshold"),
("nn.AdaptiveMaxPool3d", 1025, "nn.functional.adaptive_max_pool3d"),
("nn.CELU", 1018, "nn.functional.celu"),
("nn.NLLLoss2d", 966, "nn.functional.nll_loss"),
("nn.Softsign", 877, "nn.functional.softsign"),
("nn.ReplicationPad1d", 862, "nn.functional.pad"),
("nn.SoftMarginLoss", 856, "nn.functional.soft_margin_loss"),
("nn.ParameterDict", 742, None),
("nn.ReflectionPad1d", 731, "nn.functional.pad"),
("nn.Softshrink", 713, "nn.functional.softshrink"),
("nn.AlphaDropout", 710, "nn.functional.alpha_dropout"),
("nn.Tanhshrink", 681, "nn.functional.tanhshrink"),
("nn.PoissonNLLLoss", 676, "nn.functional.poisson_nll_loss"),
("nn.MaxUnpool3d", 660, "nn.functional.max_unpool3d"),
("nn.Fold", 630, "nn.functional.fold"),
("nn.MultiMarginLoss", 622, "nn.functional.multi_margin_loss"),
("nn.TransformerDecoderLayer", 614, None),
("nn.TransformerDecoder", 607, None),
("nn.Hardshrink", 592, "nn.functional.hardshrink"),
("nn.ConstantPad3d", 582, "nn.functional.pad"),
("nn.MultiLabelMarginLoss", 580, "nn.functional.multilabel_margin_loss"),
("nn.LPPool2d", 550, "nn.functional.lp_pool2d"),
("nn.Softmin", 537, "nn.functional.softmin"),
("nn.MaxUnpool1d", 518, "nn.functional.max_unpool1d"),
("nn.FractionalMaxPool2d", 484, "nn.functional.fractional_max_pool2d"),
("nn.Hardsigmoid", 477, "nn.functional.hardsigmoid"),
("nn.ReplicationPad3d", 470, "nn.functional.pad"),
("nn.HingeEmbeddingLoss", 442, "nn.functional.hinge_embedding_loss"),
("nn.LPPool1d", 386, "nn.functional.lp_pool1d"),
("nn.FractionalMaxPool3d", 252, "nn.functional.fractional_max_pool3d"),
("nn.Container", 217, None),
("nn.Unflatten", 206, "nn.functional.unflatten"),
("nn.FeatureAlphaDropout", 136, "nn.functional.feature_alpha_dropout"),
("nn.TripletMarginWithDistanceLoss", 107, "nn.functional.triplet_margin_with_distance_loss"),
("nn.ChannelShuffle", 90, "nn.functional.channel_shuffle"),
("nn.RNNCellBase", 88, None),
("nn.LazyLinear", 81, "nn.functional.linear"),
("nn.UninitializedParameter", 60, None),
("nn.CrossMapLRN2d", 59, None),
("nn.GaussianNLLLoss", 55, "nn.functional.gaussian_nll_loss"),
("nn.PixelUnshuffle", 45, "nn.functional.pixel_unshuffle"),
("nn.Mish", 31, "nn.functional.mish"),
("nn.ReflectionPad3d", 22, "nn.functional.pad"),
("nn.HuberLoss", 18, "nn.functional.huber_loss"),
("nn.LazyConv2d", 15, None),
("nn.LazyConv1d", 9, None),
("nn.LazyConv3d", 8, None),
("nn.LazyConvTranspose1d", 8, None),
("nn.LazyConvTranspose2d", 8, None),
("nn.LazyConvTranspose3d", 8, None),
("nn.LazyBatchNorm1d", 3, None),
("nn.LazyBatchNorm2d", 3, None),
("nn.LazyBatchNorm3d", 3, None),
("nn.UninitializedBuffer", 3, None),
]
# No rankings because these are a little hard to get rankings for
method_only_ops = [
'bfloat16',
'bool',
'byte',
'char',
'contiguous',
'cpu',
'cuda',
'detach',
'double',
'expand',
'expand_as',
'float',
'get_device',
'half',
'hardshrink',
'index_add',
'index_copy',
'index_fill',
'index_put',
'int',
'is_contiguous',
'is_pinned',
'is_set_to',
'is_shared',
'is_signed',
'item',
'long',
'masked_scatter',
'masked_fill',
'narrow_copy',
'numpy',
'pin_memory',
'repeat',
'reshape_as',
'select',
'short',
'storage_offset',
'sum_to_size',
'to',
'to_mkldnn',
'tolist',
'type',
'type_as',
'unfold',
'view',
'view_as',
]
def get_nn_functional_top_list():
top_nn_functional_ = {k: v for k, v in top_nn_functional}
for _, count, functional_name in top_nn_module:
if functional_name is None:
continue
if functional_name == 'torch.flatten':
continue
if functional_name not in top_nn_functional_:
top_nn_functional_[functional_name] = count
else:
top_nn_functional_[functional_name] += count
top_nn_functional_ = [(k, v) for k, v in top_nn_functional_.items()]
top_nn_functional_.sort(key=lambda x: x[1], reverse=True)
return top_nn_functional_
usage_count = {}
for k, v in get_nn_functional_top_list():
usage_count[k] = v
for k, v in top_torch:
usage_count[k] = v
| pytorch-master | functorch/functorch/_src/top_operators_github_usage.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| pytorch-master | functorch/functorch/_src/__init__.py |
import torch
import functorch._C
m = functorch._C._dispatch_library("FRAGMENT", "aten", "")
def custom_vjp(name, filter_fn, fwd_fn, bwd_fn):
m.def_(f"{name}(Tensor[] args) -> Tensor[]")
m.impl(f"{name}", "CompositeImplicitAutograd", fwd_fn)
m.def_(f"{name}_vjp(Tensor[] args) -> Tensor[]")
m.impl(f"{name}_vjp", "CompositeImplicitAutograd", bwd_fn)
# TODO: it looks like the autograd alias key doesn't work
m.gen_backward_binding(f"{name}", "AutogradCPU")
m.gen_backward_binding(f"{name}", "AutogradCUDA")
def wrapped(*args):
return filter_fn(getattr(torch.ops.aten, name)(args))
return wrapped
| pytorch-master | functorch/functorch/_src/custom_function.py |
import torch.fx as fx
import copy
import torch
import math
from typing import Callable, List
from functools import wraps, partial
from dataclasses import dataclass
from .compile_utils import get_placeholders, get_outputs
class ConcreteProp(torch.fx.Interpreter):
def run_node(self, n):
result = super().run_node(n)
found_tensor = False
def extract_tensor_meta(obj):
if isinstance(obj, torch.Tensor):
nonlocal found_tensor
found_tensor = True
return obj
else:
return obj
from torch.fx.node import map_aggregate
concrete_value = map_aggregate(result, extract_tensor_meta)
if found_tensor:
n.meta['concrete_value'] = concrete_value
return result
def propagate(self, *args):
return super().run(*args)
# inplace modifies node/inps
def _convert_node_to_placeholder(node, inps):
if node.op == 'output':
return
node.op = 'placeholder'
node.args = ()
node.target = node.name
concrete_val = node.meta['concrete_value']
if isinstance(concrete_val, torch.Tensor):
inps.append(concrete_val)
else:
inps.append(torch.zeros(()))
for tuple_user in list(node.users):
_convert_node_to_placeholder(tuple_user, inps)
def dump_state(fx_g, inps):
print(f"""
# Working Repro with {len(fx_g.graph.nodes)} nodes
inps = {[(i.shape, i.dtype, i.device.type) for i in inps]}
inps = [torch.zeros(())] + [torch.ones(shape, dtype=dtype, device=device) for (shape, dtype, device) in inps]
{fx_g.code}
""")
@dataclass
class ReproState:
graph: fx.Graph
inps: List[torch.Tensor]
def minifier(fail_f: fx.GraphModule, inps, module_fails, dump_state: Callable = dump_state):
"""
Minimizes a FX graph with given inputs, such that the resulting FX graph still returns True for module_fails.
Does 2 main strategies:
1. Truncates suffix: Removes some suffix from the graph and sets a new output.
2. Delta Debugging: Tries replacing half of the graph with inputs. If fails,
tries replacing quarter of the graph, etc.
>>> failing_function = fx.symbolic_trace(f)
>>> minimize(failing_function, [torch.randn(5)], lambda fx_g, inps: fx_g(*inps))
note: module_fails returns True if it fails.
"""
failing_graph = fail_f.graph
cur_size = len(failing_graph.nodes)
num_queries = 0
def graph_fails(graph, inps):
nonlocal num_queries
num_queries += 1
mod = fx.GraphModule(fail_f, graph)
mod.graph.lint()
return module_fails(mod, inps)
ConcreteProp(fail_f).propagate(*inps)
if not graph_fails(failing_graph, inps):
raise RuntimeError("Input graph did not fail the tester")
print(f"Started off with {cur_size} nodes")
def _register_strategy(strategy: Callable, name: str):
@wraps(strategy)
def new_func(old_state: ReproState, granularity=1):
print()
print(f"Strategy: {name} (G: {granularity}) ({len(old_state.graph.nodes)} nodes, {len(old_state.inps)} inputs)")
new_state = strategy(copy.deepcopy(old_state.graph), list(old_state.inps), granularity)
if new_state is not None:
new_nodes = len(new_state.graph.nodes)
old_nodes = len(old_state.graph.nodes)
new_inps = len(new_state.inps)
old_inps = len(old_state.inps)
new_outs = len(get_outputs(new_state.graph))
old_outs = len(get_outputs(old_state.graph))
progress_made = False
if new_nodes < old_nodes:
progress_made = True
print(f"SUCCESS: Went from {old_nodes} to {new_nodes} nodes")
if new_inps > old_inps:
progress_made = True
print(f"SUCCESS: Went from {old_inps} to {new_inps} inputs")
if new_outs < old_outs:
progress_made = True
print(f"SUCCESS: Went from {old_outs} to {new_outs} outputs")
if not progress_made:
raise RuntimeError("Success raised but no progress made?")
if not graph_fails(new_state.graph, new_state.inps):
print("WARNING: Something went wrong, not applying this minification")
return None
return new_state
else:
print(f"FAIL: {name}")
return None
return new_func
def register_strategy(name: str):
return partial(_register_strategy, name=name)
@register_strategy("Truncate suffix")
def remove_suffix(cur_graph, cur_inps, granularity):
tested = set()
new_graph = fx.Graph()
env = {}
for idx, node in enumerate(cur_graph.nodes):
new_node = new_graph.node_copy(node, lambda x: env[x])
if node.op not in ['placeholder', 'output']:
if idx % granularity == 0 and idx not in tested:
output_node = new_graph.output((new_node,))
if len(new_graph.nodes) < len(cur_graph.nodes) and graph_fails(new_graph, cur_inps):
return ReproState(new_graph, cur_inps)
else:
tested.add(idx)
new_graph.erase_node(output_node)
env[node] = new_node
return None
@register_strategy("Remove outputs")
def remove_outputs(cur_graph, cur_inps, granularity):
for idx, node in enumerate(cur_graph.nodes):
node.idx = idx
if node.op == 'output':
output = node
break
output_args = sorted(output.args[0], key=lambda x: x.idx if isinstance(x, fx.Node) else int(1e9))
if len(output_args) == 1:
return None
for idx in range(1, len(output_args)):
output.args = (output_args[:idx],)
if graph_fails(cur_graph, cur_inps):
return ReproState(cur_graph, cur_inps)
return None
def remove_unused_inputs_unchecked(cur_state: ReproState):
cur_graph = cur_state.graph
cur_inps = cur_state.inps
ph_nodes = get_placeholders(cur_graph)
assert len(ph_nodes) == len(cur_inps)
new_inps = []
for idx in range(len(ph_nodes)):
if len(ph_nodes[idx].users) == 0:
cur_graph.erase_node(ph_nodes[idx])
else:
new_inps.append(cur_inps[idx])
if len(new_inps) < len(cur_inps):
return ReproState(cur_graph, new_inps)
return None
def remove_unused_inputs_checked(cur_state: ReproState):
new_state = remove_unused_inputs_unchecked(cur_state)
if new_state is not None and graph_fails(new_state.graph, new_state.inps):
return new_state
return None
def _remove_unused_wrapper(cur_graph, cur_inps, granularity):
return remove_unused_inputs_checked(ReproState(cur_graph, cur_inps))
remove_unused_inputs = register_strategy("Remove unused inputs")(_remove_unused_wrapper)
@register_strategy("Eliminate dead code")
def eliminate_dead_code(cur_graph, cur_inps, granularity):
if cur_graph.eliminate_dead_code() and graph_fails(cur_graph, cur_inps):
return ReproState(cur_graph, cur_inps)
return None
def _consolidate_placeholders(cur_graph):
new_graph = fx.Graph()
env = {}
for node in cur_graph.nodes:
if node.op == 'placeholder':
new_node = new_graph.node_copy(node, lambda x: env[x])
env[node] = new_node
for node in cur_graph.nodes:
if node.op != 'placeholder':
new_node = new_graph.node_copy(node, lambda x: env[x])
env[node] = new_node
return new_graph
@register_strategy("Delta Debugging")
def delta_debugging(cur_graph: fx.Graph, cur_inps, granularity):
num_nodes = len(cur_graph.nodes)
for start_range in range(0, num_nodes, granularity):
is_removing = False
new_graph = copy.deepcopy(cur_graph)
new_inps = cur_inps[:]
end_range = min(num_nodes, start_range + granularity)
for idx in range(start_range, end_range):
new_node = list(new_graph.nodes)[idx]
if new_node.op not in ['placeholder', 'output']:
is_removing = True
_convert_node_to_placeholder(new_node, new_inps)
if not is_removing:
continue
new_graph = _consolidate_placeholders(new_graph)
new_state = remove_unused_inputs_unchecked(ReproState(new_graph, new_inps))
if new_state is None:
new_state = ReproState(new_graph, new_inps)
if graph_fails(new_state.graph, new_state.inps):
return ReproState(new_state.graph, new_state.inps)
return None
failing_state = ReproState(failing_graph, inps)
def try_granularity(failing_state, granularity):
print(f"Trying granularity {granularity}")
for strategy in [eliminate_dead_code, remove_unused_inputs, remove_suffix, delta_debugging]:
new_state = strategy(failing_state, granularity)
if new_state is not None:
return new_state
return None
while True:
granularity = int(2**(math.floor(math.log2(len(failing_state.graph.nodes)))))
has_progress = False
while granularity >= 1:
new_state = try_granularity(failing_state, granularity)
if new_state is not None:
dump_state(fx.GraphModule(fail_f, new_state.graph), new_state.inps)
failing_state = new_state
has_progress = True
break
granularity //= 2
if not has_progress:
new_state = remove_outputs(failing_state, 1)
if new_state is not None:
has_progress = True
failing_state = new_state
if not has_progress:
break
if not graph_fails(failing_state.graph, failing_state.inps):
raise RuntimeError("Uh oh, something went wrong :( Final graph is not failing")
print(f"Made {num_queries} queries")
failing_fx = fx.GraphModule(fail_f, failing_state.graph)
dump_state(failing_fx, failing_state.inps)
return failing_fx, failing_state.inps
| pytorch-master | functorch/functorch/_src/fx_minifier.py |
import torch
import functorch._C as _C
import functools
# Monkeypatch tensor printing in pytorch
_old_str = torch._tensor_str._str
def prep_value(text, indent=4):
first_line_txt = ''
lines = text.split('\n')
lines[0] = lines[0]
lines[0] = ' ' * indent + first_line_txt + lines[0]
for i in range(1, len(lines)):
lines[i] = ' ' * (indent + len(first_line_txt)) + lines[i]
return '\n'.join(lines)
@functools.wraps(_old_str)
def _functorch_str(tensor, *, tensor_contents=None):
level = _C.maybe_get_level(tensor)
if level == -1:
return _old_str(tensor)
if _C.is_functionaltensor(tensor):
# Since we're unwrapping the FunctionalTensorWrapper, we need to make sure
# that it's up to date first
torch._sync(tensor)
value = _C.get_unwrapped(tensor)
dl_enabled = _C.tls_set_is_included()
try:
# Disable temporarily kDynamicLayerFrontModeKey/kDynamicLayerBackModeKey as included dispatch keys
if (dl_enabled):
_C._set_dynamic_layer_keys_included(False)
value_repr = repr(value)
finally:
# Reenable kDynamicLayerFrontModeKey/kDynamicLayerBackModeKey as included dispatch keys
if (dl_enabled):
_C._set_dynamic_layer_keys_included(True)
if _C.is_batchedtensor(tensor):
bdim = _C.maybe_get_bdim(tensor)
assert bdim != -1
return (
f'BatchedTensor(lvl={level}, bdim={bdim}, value=\n'
f'{prep_value(value_repr)}\n'
f')'
)
if _C.is_gradtrackingtensor(tensor):
return (
f'GradTrackingTensor(lvl={level}, value=\n'
f'{prep_value(value_repr)}\n'
f')'
)
if _C.is_functionaltensor(tensor):
return f'FunctionalTensor(lvl={level}, value=\\\n{value_repr})'
raise ValueError("We don't know how to print this, please file us an issue")
torch._tensor_str._str = _functorch_str
# Monkeypatch .backward() to error out if any transforms are active.
# TODO: remove the monkeypatching and add an extension point into PyTorch core
_old_backward = torch.Tensor.backward
@functools.wraps(_old_backward)
def _backward(*args, **kwargs):
if _C.are_transforms_active():
raise RuntimeError(
"backward() called inside a functorch transform. This is not "
"supported, please use functorch.grad or functorch.vjp instead "
"or call backward() outside of functorch transforms.")
return _old_backward(*args, **kwargs)
torch.Tensor.backward = _backward
| pytorch-master | functorch/functorch/_src/monkey_patching.py |
import dataclasses
import warnings
from contextlib import contextmanager, nullcontext
from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
import torch.fx.traceback as fx_traceback
import torch.nn as nn
import torch.utils._pytree as pytree
import torch.utils.dlpack
from torch import Tensor
from torch._subclasses import FakeTensorMode
from torch.fx import immutable_collections, Interpreter
from torch.nn.utils import stateless
from functorch import make_fx
from functorch._C import CompileCache
from functorch.experimental import functionalize
from . import config
from .decompositions import register_decomposition
from .named_members_polyfill import _named_buffers, _named_parameters
from .partitioners import default_partition
try:
from torchdynamo import disable as disable_torchdynamo
except ImportError:
def disable_torchdynamo(x):
return x
pytree._register_pytree_node(
immutable_collections.immutable_list,
lambda x: (list(x), None),
lambda x, c: immutable_collections.immutable_list(x),
)
pytree._register_pytree_node(
immutable_collections.immutable_dict,
lambda x: (list(x.values()), list(x.keys())),
lambda x, c: immutable_collections.immutable_dict(
{key: value for key, value in zip(c, x)}
),
)
aten = torch.ops.aten
@contextmanager
def preserve_rng_state():
rng_state = torch.clone(torch.random.get_rng_state())
if torch.cuda.is_available():
cuda_rng_state = torch.clone(torch.cuda.get_rng_state())
try:
yield
finally:
torch.random.set_rng_state(rng_state)
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
def create_joint_forward_backward(fn):
def joint_forward_backward(
primals: List[Any], tangents: List[Any]
) -> Tuple[List[Any], List[Any]]:
# Call the forward pass
outs = fn(*primals)
# Get the inputs that need gradients
grad_primals = []
inputs_needs_grads = []
for p in primals:
is_grad_tensor = isinstance(p, Tensor) and p.requires_grad
inputs_needs_grads.append(is_grad_tensor)
if is_grad_tensor:
grad_primals.append(p)
# Get the outputs that need gradients
assert len(tangents) == len(outs)
needed_outs = []
needed_tangents = []
for out, tangent in zip(outs, tangents):
if isinstance(out, Tensor) and out.requires_grad:
needed_outs.append(out)
needed_tangents.append(tangent)
backward_out = []
# Call the backwards pass
if grad_primals:
backward_out = torch.autograd.grad(
needed_outs,
grad_primals,
grad_outputs=needed_tangents,
allow_unused=True,
)
backward_out_iter = iter(backward_out)
return outs, [
next(backward_out_iter) if i else None for i in inputs_needs_grads
]
return joint_forward_backward
def normalize_as_list(x):
if isinstance(x, tuple):
return list(x)
elif isinstance(x, list):
return x
return [x]
aot_autograd_decompositions = {}
# TODO: Remove these stupid decompositions
@register_decomposition(aten._reshape_alias, aot_autograd_decompositions)
def _reshape_alias(x, shape, strides):
return aten.view(x, shape)
graph_being_compiled: str = None
nth_graph: int = 0
model_name: str = "model"
def set_model_name(name):
global model_name
model_name = name
def get_graph_being_compiled() -> str:
"""
Returns the name of the graph being compiled.
"""
global model_name, graph_being_compiled, nth_graph
return f"{model_name}_{graph_being_compiled}_{nth_graph}"
@contextmanager
def track_graph_compiling(graph_name, increment_index=False):
global graph_being_compiled
graph_being_compiled = graph_name
yield
if increment_index:
global nth_graph
nth_graph += 1
graph_being_compiled = None
def make_boxed_func(f):
def g(args):
return f(*args)
g._boxed_call = True
return g
def make_boxed_compiler(compiler):
@wraps(compiler)
def f(fx_g, inps):
out_f = compiler(fx_g, inps)
fx_g = make_boxed_func(out_f)
return fx_g
return f
def call_func_with_args(f, args, steal_args=False):
if not steal_args:
args = list(args)
assert isinstance(args, list)
if hasattr(f, "_boxed_call"):
out = normalize_as_list(f(args))
else:
# TODO: Please remove soon
# https://github.com/pytorch/pytorch/pull/83137#issuecomment-1211320670
warnings.warn(
"Your compiler for AOTAutograd is returning a a function that doesn't take boxed arguments. "
"Please wrap it with functorch.compile.make_boxed_func or handle the boxed arguments yourself. "
"See https://github.com/pytorch/pytorch/pull/83137#issuecomment-1211320670 for rationale."
)
out = normalize_as_list(f(*args))
return out
@dataclasses.dataclass
class AOTConfig:
"""
Configuration for AOTDispatcher
"""
fw_compiler: Callable
bw_compiler: Callable
partition_fn: Callable
decompositions: Dict[Callable, Callable]
def aot_dispatch_base(flat_fn, flat_args: List[Tensor], aot_config: AOTConfig):
fw_module = make_fx(flat_fn, aot_config.decompositions)(*flat_args)
with track_graph_compiling("forward"):
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
@wraps(compiled_fw)
def new_fn(args):
fw_outs = call_func_with_args(compiled_fw, args)
return fw_outs
return new_fn
@contextmanager
def _disable_jit_autocast():
old_jit_autocast_flag = torch._C._jit_set_autocast_mode(False)
try:
yield
finally:
torch._C._jit_set_autocast_mode(old_jit_autocast_flag)
def aot_dispatch_autograd(flat_fn, flat_args: List[Tensor], aot_config: AOTConfig):
with _disable_jit_autocast():
joint_forward_backward = create_joint_forward_backward(flat_fn)
# Set input tensors that require grad to leaves
with torch.set_grad_enabled(True):
out = flat_fn(*flat_args)
out = pytree.tree_map(
lambda x: x.detach().contiguous() if isinstance(x, Tensor) else x,
out,
)
if isinstance(out, (list, tuple)):
num_outs = len(out)
else:
num_outs = 1
joint_inputs = (flat_args, out)
with torch.set_grad_enabled(True):
fx_g = make_fx(joint_forward_backward, aot_config.decompositions)(
*joint_inputs
)
if config.use_functionalize:
# Functionalize the foward backward graph. First create a
# fake fn to make functionalize happy
def fake_fn(primals, tangents):
return fx_g(primals, tangents)
fx_g = make_fx(functionalize(fake_fn))(*joint_inputs)
if config.debug_joint:
print(fx_g.code)
with track_graph_compiling("joint"):
fw_module, bw_module = aot_config.partition_fn(fx_g, joint_inputs)
if config.debug_graphs:
print(fw_module.code, bw_module.code)
with torch.no_grad():
with track_graph_compiling("forward"):
compiled_fw = aot_config.fw_compiler(fw_module, flat_args)
fw_outs = call_func_with_args(compiled_fw, flat_args)
if config.debug_partitioner:
activation_sizes = 0
for out in fw_outs[num_outs:]:
if isinstance(out, torch.Tensor):
activation_sizes += out.storage().nbytes()
print(f"Real Activations Stored(GB): {activation_sizes/1e9}")
bw_args = fw_outs[num_outs:] + fw_outs[0:num_outs]
with track_graph_compiling("backward", True):
compiled_bw = aot_config.bw_compiler(bw_module, bw_args)
class CompiledFunction(torch.autograd.Function):
@staticmethod
@disable_torchdynamo
def forward(ctx, *flat_tensor_args):
fw_outs = call_func_with_args(compiled_fw, flat_tensor_args)
ctx.save_for_backward(*fw_outs[num_outs:])
return tuple(fw_outs[0:num_outs])
@staticmethod
@disable_torchdynamo
def backward(ctx, *flat_args):
contiguous_args = [t.contiguous() for t in flat_args]
all_args = list(ctx.saved_tensors) + list(contiguous_args)
ctx.maybe_clear_saved_tensors()
out = call_func_with_args(compiled_bw, all_args, steal_args=True)
return tuple(out)
return CompiledFunction.apply
def create_aot_dispatcher_function(
flat_fn, flat_args: List[Tensor], aot_config: AOTConfig
):
"""
Traces the forward and backward graphs of the attr:`flat_fn` to generate a
joint graph. The joint graph is an Fx graph with Aten ops. Please refer to
the tracing mechanism to understand the graph capturing details.
The joint graph is then passed through attr:`partition_fn` to isolate the
forward and backward portions, which are then respectively compiled via the
provided attr:`fw_compiler` and attr:`bw_compiler`.
The resulting compiled forward and backward graphs are then wrapped up in a
``torch.autograd.Function`` object.
"""
if aot_config.decompositions is None:
aot_config.decompositions = {}
aot_config.decompositions = {
**aot_autograd_decompositions,
**aot_config.decompositions,
}
fake_mode = FakeTensorMode.push() if config.use_fake_tensor else nullcontext()
with preserve_rng_state(), fake_mode as mode:
def process_inputs(flat_args):
flat_args = pytree.tree_map(
lambda x: x.detach().requires_grad_(x.requires_grad)
if isinstance(x, Tensor)
else x,
flat_args,
)
fake_flat_tensor_args = pytree.tree_map(
lambda x: mode.from_tensor(x)
if mode
else x
if isinstance(x, Tensor)
else x,
flat_args,
)
return fake_flat_tensor_args
fake_flat_tensor_args = process_inputs(flat_args)
needs_autograd = (
any(
[
x.requires_grad
for x in fake_flat_tensor_args
if isinstance(x, Tensor)
]
)
and torch.is_grad_enabled()
)
# crappy version of dispatcher
# TODO: Do this properly
if needs_autograd:
return make_boxed_func(
aot_dispatch_autograd(flat_fn, fake_flat_tensor_args, aot_config)
)
else:
return aot_dispatch_base(flat_fn, fake_flat_tensor_args, aot_config)
class _CompileCache(CompileCache):
pass
# using a C++-based pytree reduces the overhead by about 50%
compile_cache = None
# Inspired by autodidax (thanks!)
class PytreeThunk:
spec = None
# These are some kinda dumb microoptimizations that save about 3-4 us of overhead.
is_simple = (
None # if the output spec is a tuple/list, we won't bother unflattening it.
)
is_really_simple = None # if the output spec is a LeafSpec
def set(self, spec):
assert self.spec is None or self.spec == spec
self.spec = spec
if type(self.spec) in [tuple, list] and all(
isinstance(i, pytree.LeafSpec) for i in spec.children_specs
):
self.is_simple = True
if isinstance(self.spec, pytree.LeafSpec):
self.is_really_simple = True
def unflatten(self, x):
if self.is_really_simple:
return x[0]
if self.is_simple:
return x
return pytree.tree_unflatten(x, self.spec)
def filter_tensor_and_static_args(args, static_argnums):
"""
Separate out the tensor and static args. Also, for the static args, store
the hash.
"""
tensor_args = []
static_args = []
static_args_hashed = []
for idx, arg in enumerate(args):
if idx not in static_argnums:
tensor_args.append(arg)
else:
static_args.append(arg)
static_args_hashed.append(arg.__hash__())
return tensor_args, static_args, static_args_hashed
def rearrange(tensor_args, static_args, static_argnums):
"""
Generate the args as per the original spec. static_argnums is sorted.
"""
tensor_index = 0
static_index = 0
index = 0
args = []
assert len(static_args) == len(static_argnums)
while tensor_index < len(tensor_args) and static_index < len(static_args):
if index == static_argnums[static_index]:
args.append(static_args[static_index])
static_index += 1
else:
args.append(tensor_args[tensor_index])
tensor_index += 1
index += 1
while tensor_index < len(tensor_args):
args.append(tensor_args[tensor_index])
tensor_index += 1
while static_index < len(static_args):
args.append(static_args[static_index])
static_index += 1
return args
KNOWN_TYPES = [torch.Tensor, int, str, float, bool]
def aot_function(
fn: Callable,
fw_compiler: Callable,
bw_compiler: Optional[Callable] = None,
partition_fn: Callable = default_partition,
decompositions: Optional[Dict] = None,
hasher_type: str = "StaticShapeHasher",
static_argnums: Optional[Tuple[int]] = None,
) -> Callable:
"""
Traces the forward and backward graph of :attr:`fn` using torch dispatch
mechanism, and then compiles the generated forward and backward graphs
through :attr:`fw_compiler` and :attr:`bw_compiler`.
:func:`aot_function` traces the forward and backward graph ahead of time,
and generates a joint forward and backward graph. :attr:`partition_fn` is
then used to separate out forward and backward graphs. The partitioner
function can be used to perform optimizations such as recomputation. One can
set `decompositions` dictionary to decompose the operators into a sequence
of core or simpler operators supported by the backend compilers.
:func:`aot_function` uses a compilation cache, based on input tensor
properties, to detect when there is a need of recompilation. By default, its
behavior is static, i.e., it recompiles if shape of any input tensor
changes.
:attr:`static_argnums` allows user to mark the arguments of the original
:attr:`fn` as static. This is useful when an argument is a non-tensor, e.g.,
``int`` or ``bool``. A change in the actual value of static arg causes
recompilation.
.. warning::
This API is experimental and likely to change.
Args:
fn (Callable): A Python function that takes one ore more arguments. Must
return one or more Tensors.
fw_compiler (Callable): A Python function that accepts an Fx graph with
Aten ops and input args, and returns a Callable that semantically is
equivalent to the input Fx graph.
bw_compiler (Optional[Callable]): A Python function that accepts an
Fx graph with Aten ops and input args, and returns a Callable that
semantically is equivalent to the input Fx graph. Default: None
(when None, it defaults to the :attr:`fw_compiler`)
partition_fn (Callable): A Python function that takes a joint forward
and backward graph, and partitions it into separate forward and
backward graphs.
decompositions (Dict): A dictionary to define the decomposition of
larger Aten ops into simpler or core Aten ops.
static_argnums (Optional[Tuple[Int]]): An option tuple of ints to mark
the arguments of the function as static.
Returns:
Returns a ``Callable`` that retains the eager behavior of the original
:attr:`fn`, but with forward and backward graph compiled via
:attr:`fw_compile` and :attr:`bw_compile`.
A simple example usage of :func:`aot_function` is as follows. This example
will print the forward and backward graphs of the function ``fn``
>>> fn = lambda x : x.sin().cos()
>>> def print_compile_fn(fx_module, args):
>>> print(fx_module)
>>> return fx_module
>>> aot_fn = aot_function(fn, print_compile_fn)
>>> x = torch.randn(4, 5, requires_grad=True)
>>> aot_fn(x)
The static argnums are used to mark the non-tensor arguments as static. An
example is as follows where the dropout probability is as argument to the
original function.
>>> def fn(input, bias, residual, p: float):
>>> a = torch.add(input, bias)
>>> b = torch.nn.functional.dropout(a, p, training=True)
>>> c = b + residual
>>> return c
>>> aot_fn = aot_function(fn, print_compile_fn, static_argnums=(3,))
"""
global compile_cache
if compile_cache is None:
compile_cache = CompileCache()
if bw_compiler is None:
bw_compiler = fw_compiler
aot_config = AOTConfig(
fw_compiler=fw_compiler,
bw_compiler=bw_compiler,
partition_fn=partition_fn,
decompositions=decompositions,
)
cached_res = None
fn_id = id(fn)
fw_compiler_id = id(fw_compiler)
bw_compiler_id = id(bw_compiler)
if isinstance(static_argnums, int):
static_argnums = [static_argnums]
elif static_argnums is not None and len(static_argnums) == 0:
static_argnums = None
elif static_argnums is not None:
static_argnums = list(static_argnums)
static_argnums.sort()
@wraps(fn)
def returned_function(*args, **kwargs):
global compile_cache
nonlocal cached_res
# Separate out static args if static_argnums is present
tensor_args = args
static_args = []
# TODO - move the hashing part of static_args to C++.
static_args_hashed = []
if static_argnums is not None:
(
tensor_args,
static_args,
static_args_hashed,
) = filter_tensor_and_static_args(args, static_argnums)
# Now flatten the tensor args
flat_tensor_args, _ = pytree.tree_flatten((tensor_args, kwargs))
# Check if the fn is already compiled
num_tensor_args = len(flat_tensor_args)
flat_args_for_cache = flat_tensor_args + static_args_hashed
cached_res = compile_cache.at(
fn_id,
fw_compiler_id,
bw_compiler_id,
num_tensor_args,
hasher_type,
*flat_args_for_cache,
)
# Compile the function and save it in the cache
if cached_res is None:
# Save the args_spec for flat_tensor_args to unflatten while tracing
_, tensor_args_spec = pytree.tree_flatten((tensor_args, kwargs))
out_spec = PytreeThunk()
def flat_fn(*flat_tensor_args):
# The input are flattened tensor args. Prepare the args in the
# order that original function expects. Add static args as well.
# They will appear as tensor constants in the traced graph.
nonlocal out_spec, static_args
tensor_args, kwargs = pytree.tree_unflatten(
flat_tensor_args, tensor_args_spec
)
if static_argnums is None:
args = tensor_args
else:
args = rearrange(tensor_args, static_args, static_argnums)
tree_out = fn(*args, **kwargs)
flat_out, spec = pytree.tree_flatten(tree_out)
for i in flat_out:
is_known_type = False
for j in KNOWN_TYPES:
if isinstance(i, j):
is_known_type = True
break
if not is_known_type:
raise RuntimeError(
f"Found {type(i)} in output, which is not a known type. "
"If this type holds tensors, you need to register a pytree for it. "
"See https://github.com/pytorch/functorch/issues/475 for a brief "
"explanation why. If you don't need to register a pytree, please "
"leave a comment explaining your use case and we'll make this more "
"ergonomic to deal with"
)
out_spec.set(spec)
return flat_out
compiled_fn = create_aot_dispatcher_function(
flat_fn,
flat_tensor_args,
aot_config,
)
cached_res = (compiled_fn, out_spec)
# Save the compiled_fn in the cache
compile_cache.insert(
fn_id,
fw_compiler_id,
bw_compiler_id,
num_tensor_args,
hasher_type,
cached_res,
*flat_args_for_cache,
)
cached_fn, out_spec = cached_res
out = cached_fn(flat_tensor_args)
return out_spec.unflatten(out)
return returned_function
def num_of_recompilations():
"""
Returns the numbers of recompilations since the last time cache was cleared.
This is equivalent to the number of entries in the compilation cache.
"""
global compile_cache
if compile_cache is None:
return 0
return compile_cache.size()
def clear_compile_cache():
"""
Clears the compilation cache.
"""
global compile_cache
if compile_cache is not None:
compile_cache.clear()
compile_cache = None
def aot_module(mod: nn.Module, *args, **kwargs) -> nn.Module:
"""
Traces the forward and backward graph of :attr:`mod` using torch dispatch
tracing mechanism. It is wrapper function, that underneath uses
:func:`aot_function` to perform tracing and compilation.
:func:`aot_module` lifts the parameters and buffers of ``nn.Module`` as inputs
to a new callable which is then compiled through :func:`aot_function`.
.. warning::
This API is experimental and likely to change.
Args:
mod (Callable): A ``nn.Module`` module.
args : args to be passed to :func:`aot_function`
kwargs : kwargs to be passed to :func:`aot_function`
Returns:
Returns a ``nn.Module`` that retains the eager behavior of the original
:attr:`mod`, but with forward and backward graph compiled.
"""
def functional_call(named_params, named_buffers, *args, **kwargs):
params_and_buffers = {**named_params, **named_buffers}
return stateless.functional_call(mod, params_and_buffers, args, kwargs)
compiled_f = aot_function(functional_call, *args, **kwargs)
class AOTModule(nn.Module):
def __init__(self):
super(AOTModule, self).__init__()
self.orig_module = mod
def forward(self, *args, **kwargs):
return compiled_f(
dict(_named_parameters(mod, remove_duplicate=False)),
dict(_named_buffers(mod, remove_duplicate=False)),
*args,
**kwargs,
)
return AOTModule()
def aot_module_simplified(mod: nn.Module, *top_args, **top_kwargs) -> nn.Module:
"""
This is the simplified or low overhead version of aot_module. For frontends
like TorchDynamo, the input functions/modules to AOT are static and have
unpacked inputs/outputs. This gives us an opportunity to remove the
(1) pytree overhead to parse inputs/outputs,
(2) AOT Autograd cache,
(3) Reading of params/buffers in every forward call
:func:`aot_module_simplified` removes these overheads.
"""
#########################################################
params = {
**dict(_named_parameters(mod, remove_duplicate=False)),
**dict(_named_buffers(mod, remove_duplicate=False)),
}
params_flat, params_spec = pytree.tree_flatten(params)
params_flat = tuple(params_flat)
params_len = len(params_flat)
def functional_call(*args, **kwargs):
with stateless._reparametrize_module(
mod, pytree.tree_unflatten(args[:params_len], params_spec)
):
if isinstance(mod, torch.fx.GraphModule):
with fx_traceback.override_stack_trace():
out = Interpreter(mod).run(*args[params_len:], **kwargs)
else:
out = mod(*args[params_len:], **kwargs)
if not isinstance(out, (tuple, list)):
raise RuntimeError(
"Graph output must be a tuple(). This is so that we can avoid "
"pytree processing of the ouputs. Please change the module to "
"have tuple outputs or use aot_module instead."
)
return out
def aot_function_simplified(
fn: Callable,
fw_compiler: Callable,
bw_compiler: Optional[Callable] = None,
partition_fn: Callable = default_partition,
decompositions: Optional[Dict] = None,
hasher_type: str = "StaticShapeHasher",
static_argnums: Optional[Tuple[int]] = None,
) -> Callable:
assert static_argnums is None
if bw_compiler is None:
bw_compiler = fw_compiler
aot_config = AOTConfig(
fw_compiler=fw_compiler,
bw_compiler=bw_compiler,
partition_fn=partition_fn,
decompositions=decompositions,
)
compiled_fn = None
@wraps(fn)
def new_func(*args):
nonlocal compiled_fn
if compiled_fn is None:
compiled_fn = create_aot_dispatcher_function(
fn,
args,
aot_config,
)
return compiled_fn(args)
return new_func
compiled_f = aot_function_simplified(functional_call, *top_args, **top_kwargs)
if top_kwargs:
def forward(*args, **kwargs):
return compiled_f(
*params_flat,
*args,
**kwargs,
)
else:
def forward(*args):
return compiled_f(
*params_flat,
*args,
)
forward.zero_grad = mod.zero_grad
return forward
compiled_function = aot_function
compiled_module = aot_module
| pytorch-master | functorch/functorch/_src/aot_autograd.py |
import copy
import logging
import os
import pickle
import random
from functools import partial
from typing import Callable, Optional, Tuple, Union
import torch
import torch.fx as fx
import torch.nn as nn
from .aot_autograd import aot_function, aot_module, make_boxed_compiler
from .compile_utils import strip_overloads
from .decompositions import get_decompositions
from .partitioners import (
default_partition,
draw_graph,
min_cut_rematerialization_partition,
)
# These canonicalizations are needed here (and not decompositions), as the ops
# we're trying to canonicalize to CompositeImplicitAutograd.
def _canonicalize(fx_g):
for node in fx_g.graph.nodes:
if node.target == torch.ops.aten._to_copy:
node.target = torch.ops.aten.to
fx_g.recompile()
return fx_g
@make_boxed_compiler
def ts_compile(fx_g: fx.GraphModule, _) -> Callable:
"""
Compiles the :attr:`fx_g` with Torchscript compiler.
.. warning::
This API is experimental and likely to change.
Args:
fx_g(fx.GraphModule): The input Fx graph module to be compiled.
Returns:
Torch scripted model.
"""
strip_overloads(fx_g)
for node in fx_g.graph.nodes:
if (
node.target == torch.ops.aten._to_copy
and len(node.args) == 1
and len(node.kwargs) == 1
and "dtype" in node.kwargs
):
node.target = torch.ops.aten.to
for node in fx_g.graph.nodes:
new_kwargs = {}
for k, v in node.kwargs.items():
if isinstance(v, torch.device):
v = v.type
new_kwargs[k] = v
node.kwargs = new_kwargs
fx_g.graph.lint()
fx_g.recompile()
f = torch.jit.script(fx_g)
torch._C._jit_pass_remove_mutation(f.graph)
f = torch.jit.freeze(f.eval())
f = torch.jit.optimize_for_inference(f)
return f
@make_boxed_compiler
def _draw_graph_compile(fx_g, _, name, clear_meta=True):
print(fx_g.code)
draw_graph(fx_g, name, clear_meta=clear_meta)
return fx_g
def draw_graph_compile(name):
return partial(_draw_graph_compile, name=name)
@make_boxed_compiler
def nop(fx_g: fx.GraphModule, _) -> Callable:
"""
Returns the :attr:`fx_g` Fx graph module as it is. This is a no-op compiler
and can be used to check accuracy.
.. warning::
This API is experimental and likely to change.
"""
return fx_g
@make_boxed_compiler
def simple_ts_compile(fx_g, _):
strip_overloads(fx_g)
f = torch.jit.script(fx_g)
f = torch.jit.freeze(f.eval())
return f
def nnc_jit(f, static_argnums=None):
return aot_function(f, simple_ts_compile, static_argnums=static_argnums)
aten = torch.ops.aten
default_decompositions = {
aten.detach,
aten.gelu_backward,
aten.leaky_relu_backward,
aten.sigmoid_backward,
aten.threshold_backward,
aten.hardtanh_backward,
aten.hardsigmoid_backward,
aten.hardswish_backward,
aten.tanh_backward,
aten.silu_backward,
aten.elu_backward,
aten.cudnn_batch_norm,
aten.cudnn_batch_norm_backward,
aten.masked_fill.Scalar,
aten.masked_fill.Tensor,
aten.elu,
aten.leaky_relu,
aten.hardtanh,
aten.hardswish,
aten.hardsigmoid,
aten.conj_physical,
aten.is_same_size,
}
default_decompositions = get_decompositions(default_decompositions)
@make_boxed_compiler
def print_compile(fx_g, _):
print(fx_g.code)
return fx_g
def memory_efficient_fusion(
fn: Union[Callable, nn.Module],
static_argnums: Optional[Tuple[int]] = None,
**kwargs,
):
"""
Wrapper function over :func:`aot_function` and :func:`aot_module` to perform
memory efficient fusion. It uses the
:func:`min_cut_rematerialization_partition` partitioner to perform efficient
recomputation. It uses NVFuser to compile the generated forward and backward
graphs.
.. warning::
This API is experimental and likely to change.
Args:
fn (Union[Callable, nn.Module]): A Python function or a ``nn.Module``
that takes one ore more arguments. Must return one or more Tensors.
static_argnums (Optional[Tuple[Int]]): An option tuple of ints to mark
the arguments of the function as static.
**kwargs: Any other overrides you want to make to the settings
Returns:
Returns a ``Callable`` or ``nn.Module`` that retains the eager behavior
of the original :attr:`fn`, but whose forward and backward graphs have
gone through recomputation optimizations, and the graphs have been
compiled with nvfuser.
"""
config = {
"fw_compiler": ts_compile,
"bw_compiler": ts_compile,
"partition_fn": min_cut_rematerialization_partition,
"hasher_type": "StaticShapeHasher",
"decompositions": default_decompositions,
"static_argnums": static_argnums,
}
config.update(kwargs)
if isinstance(fn, torch.nn.Module):
return aot_module(fn, **config)
else:
return aot_function(fn, **config)
def debug_compile(fx_g, inps):
fx_g.to_folder("foo")
print(
f"""
##############################################################
# To minimize FX graph, copy and paste the below and run it #
##############################################################
import torch
import torch.fx as fx
from functorch.compile import minifier, check_nvfuser_subprocess, check_nvfuser_correctness_subprocess
inps = {[(i.shape, i.dtype) for i in inps]}
inps = [torch.ones(shape, dtype=dtype, device='cuda') for (shape, dtype) in inps]
from foo import FxModule
mod = FxModule().cuda()
with torch.jit.fuser("fuser2"):
# check_nvfuser_subprocess can be replaced with check_nvfuser_correctness_subprocess
minifier(fx.symbolic_trace(mod), inps, check_nvfuser_subprocess)
"""
)
from foo import FxModule
FxModule().cuda()(*inps)
return ts_compile(fx_g, inps)
graph_index = 0
def get_inputs(input_data_path):
"""
Return a random input for the given inputs meta generated from _save_fx_default.
"""
inputs = []
with (open(input_data_path, "rb")) as f:
inputs_meta = pickle.load(f)
inputs = []
for meta in inputs_meta:
if len(meta) == 1:
type = meta
input = type(random.rand())
else:
type, shape, stride, dtype, device = meta
if dtype in {
torch.int,
torch.int32,
torch.int64,
torch.bool,
torch.int,
torch.uint8,
int,
float,
}:
input = torch.randint(0, 1, shape, dtype=dtype, device=device)
else:
input = torch.rand(shape, dtype=dtype, device=device)
inputs.append(input)
return inputs
def _save_fx_default(current_name, folder_name, dump_example_input, gm, example_inputs):
"""
The forward, backward, and joint computation graph will be stored in
{folder_name}/{current_name}/{current_name}_forward_{graph_index},
{folder_name}/{current_name}/{current_name}_backward_{graph_index}, and
{folder_name}/{current_name}/{current_name}_joint_{graph_index} respectively.
The input shape of the graphs will be stored in the .input files.
These files can be loaded with pickle,
and is a list of format (type, shape, stride, dtype, device).
In the case of type = int or float, it is just (type,).
For joint graph input, it is a nested list [[],[]]
where the two inner lists have the same format.
If dump_example_input is True, example_inputs will be stored in .pt file.
Since each function might produce multiple graphs,
the graph_index is used to distinguish difference graphs
"""
from functorch.compile import aot_module_simplified
def get_input_meta(args):
input_meta = []
if len(args) > 0 and isinstance(args[0], tuple): # joint input
input_meta += get_input_meta(args[0])
input_meta += get_input_meta(args[1])
return input_meta
for arg in args:
if type(arg) == int or type(arg) == float:
input_meta.append((type(arg),))
else:
input_meta.append(
(type(arg), arg.shape, arg.stride(), arg.dtype, arg.device)
)
return input_meta
def graph_saver_helper(gm_to_save, args, type_name):
global graph_index
if len(gm_to_save.graph.nodes) == 0:
logging.log(
logging.WARNING,
f"No nodes in graph {current_name}_{type_name}_{graph_index}.",
)
return
gm = copy.deepcopy(gm_to_save)
gm.graph.set_codegen(torch.fx.graph.CodeGen()) # remove codegen
gm.recompile()
input_meta = get_input_meta(args)
isExist = os.path.exists(f"{folder_name}/{current_name}")
if not isExist:
os.makedirs(f"{folder_name}/{current_name}")
gm.to_folder(
f"{folder_name}/{current_name}/{current_name}_{type_name}_{graph_index}"
)
pickle.dump(
input_meta,
open(
f"{folder_name}/{current_name}/{current_name}_{type_name}_{graph_index}/{current_name}_{type_name}_{graph_index}.input", # noqa: B950
"wb",
),
) # noqa: E501
if dump_example_input:
torch.save(
args,
f"{folder_name}/{current_name}/{current_name}_{type_name}_{graph_index}/{current_name}_{type_name}_{graph_index}.pt", # noqa: B950
) # noqa: E501
def graph_saver_forward(gm, fw_args):
graph_saver_helper(gm, fw_args, "forward")
return gm
def graph_saver_backward(gm, bw_args):
graph_saver_helper(gm, bw_args, "backward")
global graph_index
graph_index += 1
return gm
def graph_saver_joint(gm, joint_args):
graph_saver_helper(gm, joint_args, "joint")
return default_partition(gm, joint_args)
return aot_module_simplified(
gm,
fw_compiler=graph_saver_forward,
bw_compiler=graph_saver_backward,
partition_fn=graph_saver_joint,
decompositions=default_decompositions,
)
def graph_dumper_aot(current_name, folder_name, dump_example_input=False):
"""
Dump the forward, backward, and joint computation graph.
Example Usage:
save_fx_func = graph_dumper_aot(current_name, folder_name, dump_example_input = False)
optimize_ctx = torchdynamo.optimize(
save_fx_func
)
with torch.enable_grad():
with optimize_ctx:
result = forward_and_backward_pass(model, example_inputs)
"""
global graph_index
graph_index = 0
return partial(_save_fx_default, current_name, folder_name, dump_example_input)
| pytorch-master | functorch/functorch/_src/compilers.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.