python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
from .prepare import prepare
from .convert import convert
from .fuse import fuse
| pytorch-master | torch/ao/quantization/fx/__init__.py |
import copy
import re
import torch
import torch.nn as nn
from torch.ao.quantization import QuantType
from torch.ao.quantization.utils import is_per_tensor, is_per_channel
from torch.ao.quantization.quantize import is_activation_post_process
from torch.fx import GraphModule, map_arg
from torch.fx.graph import (
Graph,
Node,
)
from .custom_config import PrepareCustomConfig
from typing import Callable, Optional, List, Dict, Any, Set, Tuple, Union, Type
from collections import namedtuple
import operator
import warnings
# TODO: revisit this list. Many helper methods shouldn't be public
__all__ = [
"all_node_args_except_first",
"all_node_args_have_no_tensors",
"assert_and_get_unique_device",
"BIAS_INDEX_DICT",
"collect_producer_nodes",
"create_getattr_from_value",
"create_node_from_old_node_preserve_meta",
"create_qparam_nodes",
"EMPTY_ARG_DICT",
"get_custom_module_class_keys",
"get_linear_prepack_op_for_dtype",
"get_new_attr_name_with_prefix",
"get_non_observable_arg_indexes_and_types",
"get_per_tensor_qparams",
"get_qconv_op",
"get_qconv_prepack_op",
"get_quantize_node_info",
"graph_module_from_producer_nodes",
"graph_pretty_str",
"is_get_tensor_info_node",
"maybe_get_next_module",
"NodeInfo",
"node_return_type_is_int",
"NON_OBSERVABLE_ARG_DICT",
"NON_QUANTIZABLE_WEIGHT_OPS",
"quantize_node",
"return_arg_list",
"WEIGHT_INDEX_DICT",
"get_skipped_module_name_and_classes",
]
# A dictionary for querying the weight index for a given op
WEIGHT_INDEX_DICT = {
torch.nn.functional.conv1d : [1],
torch.nn.functional.conv2d : [1],
torch.nn.functional.conv3d : [1],
torch.nn.functional.linear : [1],
torch.nn.functional.layer_norm : [2],
torch.nn.functional.group_norm : [2],
torch.nn.functional.instance_norm : [3],
}
NON_QUANTIZABLE_WEIGHT_OPS = {torch.nn.functional.layer_norm, torch.nn.functional.group_norm, torch.nn.functional.instance_norm}
BIAS_INDEX_DICT = {
torch.nn.functional.conv1d : [2],
torch.nn.functional.conv2d : [2],
torch.nn.functional.conv3d : [2],
torch.nn.functional.linear : [2],
torch.nn.functional.layer_norm : [3],
torch.nn.functional.group_norm : [3],
torch.nn.functional.instance_norm : [4],
}
def graph_pretty_str(g, shorten=True) -> str:
"""Returns a printable representation of the ops in the graph of g.
If shorten is True, tries to abbreviate fields.
"""
built_in_func_re = re.compile('<built-in function (.*)>')
built_in_meth_re = re.compile('<built-in method (.*) of type.*>')
op_dict = {
'placeholder': 'plchdr',
'get_attr': 'gt_prm',
'call_function': 'cl_fun',
'call_module': 'cl_mod',
'call_method': 'cl_meth',
}
max_lens = {}
col_names = ("name", "op", "target", "args", "kwargs")
for s in col_names:
max_lens[s] = len(s)
results = []
for n in g.nodes:
# activation_post_process_0 -> obs_0
name = str(n.name)
if shorten:
name = name.replace("activation_post_process", "obs")
op = str(n.op)
# placeholder -> plchdr, and so on
if shorten and op in op_dict:
op = op_dict[op]
target = str(n.target)
# <built-in function foo> -> <bi_fun foo>, and so on
if shorten:
built_in_func = built_in_func_re.search(target)
if built_in_func:
target = f"<bi_fun {built_in_func.group(1)}>"
built_in_meth = built_in_meth_re.search(target)
if built_in_meth:
target = f"<bi_meth {built_in_meth.group(1)}>"
target = target.replace("activation_post_process", "obs")
args = str(n.args)
if shorten:
args = args.replace("activation_post_process", "obs")
kwargs = str(n.kwargs)
# calculate maximum length of each column, so we can tabulate properly
for k, v in zip(col_names, (name, op, target, args, kwargs)):
max_lens[k] = max(max_lens[k], len(v))
results.append([name, op, target, args, kwargs])
res_str = ""
format_str = "{:<{name}} {:<{op}} {:<{target}} {:<{args}} {:<{kwargs}}\n"
res_str += format_str.format(*col_names, **max_lens)
for result in results:
res_str += format_str.format(*result, **max_lens)
# print an exra note on abbreviations which change attribute names,
# since users will have to un-abbreviate for further debugging
if shorten:
res_str += "*obs_{n} = activation_post_process_{n}\n"
return res_str
def get_per_tensor_qparams(activation_post_process):
assert is_per_tensor(activation_post_process.qscheme), 'Only per tensor quantization is supported'
scale, zero_point = activation_post_process.calculate_qparams()
scale = float(scale)
zero_point = int(zero_point)
dtype = activation_post_process.dtype
return scale, zero_point, dtype
def get_quantize_node_info(activation_post_process: Callable) -> Optional[Tuple[str, Union[Callable, str], Dict[str, Any]]]:
''' Given an activation_post_process module,
return node_type(e.g. call_function), quantize op(e.g. quantize_per_tensor) and a dictionary
of extracted qparams from the module
'''
dtype = activation_post_process.dtype # type: ignore[attr-defined]
compute_dtype = None
if hasattr(activation_post_process, "compute_dtype"):
compute_dtype = activation_post_process.compute_dtype # type: ignore[attr-defined]
quantize_op : Optional[Union[Callable, str]] = None
if dtype in [torch.quint8, torch.qint8]:
node_type = "call_function"
scale, zero_point = activation_post_process.calculate_qparams() # type: ignore[attr-defined]
if is_per_channel(activation_post_process.qscheme): # type: ignore[attr-defined]
ch_axis = int(activation_post_process.ch_axis) # type: ignore[attr-defined]
qparams = {"_scale_": scale, "_zero_point_": zero_point, "_axis_": ch_axis, "_dtype_": dtype}
quantize_op = torch.quantize_per_channel
else:
scale = float(scale)
zero_point = int(zero_point)
qparams = {"_scale_": scale, "_zero_point_": zero_point, "_dtype_": dtype}
quantize_op = torch.quantize_per_tensor
elif dtype == torch.float16:
node_type = "call_method"
quantize_op = "to"
qparams = {"_dtype_": dtype}
elif dtype == torch.float32 and compute_dtype in [torch.quint8, torch.qint8, torch.float16]:
# dynamic quantization
node_type = "call_function"
quantize_op = torch.quantize_per_tensor_dynamic
# TODO: get reduce range from observer
# reduce_range = activation_post_process.reduce_range
reduce_range = torch.backends.quantized.engine == "fbgemm"
qparams = {"_dtype_": compute_dtype, "_reduce_range_": reduce_range}
else:
warnings.warn(f"Unsupported activation_post_process in get_quantize_node_info: {activation_post_process}")
return None
return node_type, quantize_op, qparams
def quantize_node(
in_node: Node,
obs_module: torch.nn.Module,
obs_node: Node,
modules: Dict[str, torch.nn.Module],
quantized_graph: Graph,
node_name_to_scope: Dict[str, Tuple[str, type]],
is_input: bool,
output_prefix: str = "_output") -> Node:
''' Add quantization nodes (eg. quantize_per_tensor/per_channel) for given node to graph
with the qparams calculated from activation_post_process (obs_module).
The observer node (obs_node) is used to find the FQN of the user of act_post_process.
e.g. Given input `node` in `node = self.conv(x)`, insert node:
`quantized_node = torch.quantize_per_tensor(x, self._scale_0, self._zer_point_0, self._dtype_0)`
where self._scale_0, self._zero_point_0 and self._dtype_0 are
calculated from `obs_module`
'''
# Find the first use of the observer node, we use this to get the scope of the module.
if is_input:
# if the quantize function is at the input of op, then we find the first user of the observer_node
# to get the path. If a linear call_function is in the user list, we return the first instance
# of linear node to get the FQN.
users = list(obs_node.users)
first_linear_use_or_first_use = users[0] if users else None
linear_node = None
for n in users:
if n.op == "call_function" and n.target == torch.nn.functional.linear:
linear_node = n
break
if linear_node:
first_linear_use_or_first_use = linear_node
prefix = "_input"
else:
# if the quantize function is at the output of the op, we use the observer input node to get the path
first_linear_use_or_first_use = in_node
prefix = output_prefix
if first_linear_use_or_first_use and first_linear_use_or_first_use.name in node_name_to_scope:
module_path, _ = node_name_to_scope[first_linear_use_or_first_use.name]
else:
# TODO: it's not used, so actually we can skip quantization
# but this requires changing return type of quantize_node
# we can fix it later if needed
module_path = ""
root_module = modules['']
graph = quantized_graph
maybe_quantize_node_info = get_quantize_node_info(obs_module)
assert maybe_quantize_node_info is not None, \
f"Expecting quantize node info not to be None, observer: {obs_module}"
node_type, quantize_op, qparams = maybe_quantize_node_info
inputs = [in_node]
for key, value in qparams.items():
if key in ['_scale_', '_zero_point_']:
# For scale and zero_point values we register them as buffers in the root module.
qparam_node = create_getattr_from_value(root_module, graph, module_path + prefix + key, value)
inputs.append(qparam_node)
else:
# for qparams that are not scale/zero_point (like axis, dtype) we store them as literals in the graph.
inputs.append(value)
return graph.create_node(node_type, quantize_op, tuple(inputs), {})
def get_custom_module_class_keys(custom_module_mapping: Dict[QuantType, Dict[Type, Type]]) -> List[Any]:
r""" Get all the unique custom module keys in the custom config dict
e.g.
Input:
{
QuantType.STATIC: {
CustomModule1: ObservedCustomModule
},
QuantType.DYNAMIC: {
CustomModule2: DynamicObservedCustomModule
},
QuantType.WEIGHT_ONLY: {
CustomModule3: WeightOnlyObservedCustomModule
},
}
Output:
# extract the keys across all inner STATIC, DYNAMIC, and WEIGHT_ONLY dicts
[CustomModule1, CustomModule2, CustomModule3]
"""
# using set to dedup
float_custom_module_classes : Set[Any] = set()
for quant_mode in [QuantType.STATIC, QuantType.DYNAMIC, QuantType.WEIGHT_ONLY]:
quant_mode_custom_module_config = custom_module_mapping.get(quant_mode, {})
quant_mode_custom_module_classes = set(quant_mode_custom_module_config.keys())
float_custom_module_classes |= quant_mode_custom_module_classes
return list(float_custom_module_classes)
def get_linear_prepack_op_for_dtype(dtype):
if dtype == torch.float16:
return torch.ops.quantized.linear_prepack_fp16
elif dtype == torch.qint8:
return torch.ops.quantized.linear_prepack
else:
raise Exception("can't get linear prepack op for dtype:", dtype)
def get_qconv_prepack_op(conv_op: Callable) -> Callable:
prepack_ops = {
torch.nn.functional.conv1d: torch.ops.quantized.conv1d_prepack,
torch.nn.functional.conv2d: torch.ops.quantized.conv2d_prepack,
torch.nn.functional.conv3d: torch.ops.quantized.conv3d_prepack
}
prepack_op = prepack_ops.get(conv_op, None)
assert prepack_op, "Didn't find prepack op for {}".format(conv_op)
return prepack_op
def get_qconv_op(conv_op: Callable, has_relu: bool) -> Callable:
qconv_op = {
# has relu
True: {
torch.nn.functional.conv1d: torch.ops.quantized.conv1d_relu,
torch.nn.functional.conv2d: torch.ops.quantized.conv2d_relu,
torch.nn.functional.conv3d: torch.ops.quantized.conv3d_relu
},
False: {
torch.nn.functional.conv1d: torch.ops.quantized.conv1d,
torch.nn.functional.conv2d: torch.ops.quantized.conv2d,
torch.nn.functional.conv3d: torch.ops.quantized.conv3d
}
}
qconv = qconv_op[has_relu].get(conv_op)
assert qconv, "Can't find corresponding quantized conv op for {} {}".format(conv_op, has_relu)
return qconv
# Returns a function that can get a new attribute name for module with given
# prefix, for example,
# >> get_new_observer_name = get_new_attr_name_with_prefix('_observer')
# >> new_name = get_new_observer_name(module)
# new_name will be an unused attribute name on module, e.g. `_observer_1`
def get_new_attr_name_with_prefix(prefix: str) -> Callable:
prefix = prefix.replace(".", "_")
def get_new_attr_name(module: torch.nn.Module):
def get_attr_name(i: int):
return prefix + str(i)
i = 0
attr_name = get_attr_name(i)
while hasattr(module, attr_name):
i += 1
attr_name = get_attr_name(i)
return attr_name
return get_new_attr_name
def collect_producer_nodes(node: Node) -> Optional[List[Node]]:
r''' Starting from a target node, trace back until we hit inpu or
getattr node. This is used to extract the chain of operators
starting from getattr to the target node, for example
def forward(self, x):
observed = self.observer(self.weight)
return F.linear(x, observed)
collect_producer_nodes(observed) will either return a list of nodes that
produces the observed node or None if we can't extract a self contained
graph without free variables(inputs of the forward function).
'''
nodes = [node]
frontier = [node]
while frontier:
node = frontier.pop()
all_args = list(node.args) + list(node.kwargs.values())
for arg in all_args:
if not isinstance(arg, Node):
continue
if arg.op == 'placeholder':
# hit input, can't fold in this case
return None
nodes.append(arg)
if not (arg.op == 'call_function' and arg.target == getattr):
frontier.append(arg)
return nodes
def graph_module_from_producer_nodes(
root: GraphModule, producer_nodes: List[Node]) -> GraphModule:
r''' Construct a graph module from extracted producer nodes
from `collect_producer_nodes` function
Args:
root: the root module for the original graph
producer_nodes: a list of nodes we use to construct the graph
Return:
A graph module constructed from the producer nodes
'''
assert len(producer_nodes) > 0, 'list of producer nodes can not be empty'
# since we traced back from node to getattrr
producer_nodes.reverse()
graph = Graph()
env: Dict[Any, Any] = {}
def load_arg(a):
return map_arg(a, lambda node: env[node])
for producer_node in producer_nodes:
env[producer_node] = graph.node_copy(producer_node, load_arg)
graph.output(load_arg(producer_nodes[-1]))
graph_module = GraphModule(root, graph)
return graph_module
def assert_and_get_unique_device(module: torch.nn.Module) -> Any:
"""
Returns the unique device for a module, or None if no device is found.
Throws an error if multiple devices are detected.
"""
devices = {p.device for p in module.parameters()} | \
{p.device for p in module.buffers()}
assert len(devices) <= 1, (
"prepare only works with cpu or single-device CUDA modules, "
"but got devices {}".format(devices)
)
device = next(iter(devices)) if len(devices) > 0 else None
return device
def create_getattr_from_value(module: torch.nn.Module, graph: Graph, prefix: str, value: Any) -> Node:
"""
Given a value of any type, creates a getattr node corresponding to the value and
registers the value as a buffer to the module.
"""
get_new_attr_name = get_new_attr_name_with_prefix(prefix)
attr_name = get_new_attr_name(module)
device = assert_and_get_unique_device(module)
new_value = value.clone().detach() if isinstance(value, torch.Tensor) \
else torch.tensor(value, device=device)
module.register_buffer(attr_name, new_value)
# Create get_attr with value
attr_node = graph.create_node("get_attr", attr_name)
return attr_node
def create_qparam_nodes(
node_name: str,
scale: Any,
zero_point: Any,
modules: Dict[str, torch.nn.Module],
quantized_graph: Graph,
node_name_to_scope: Dict[str, Tuple[str, type]]
) -> Tuple[Node, Node]:
"""
Create getattr nodes in the quantized graph for scale and zero point values.
The nodes are registered with the root_module of the model.
"""
root_module = modules['']
module_path, _ = node_name_to_scope[node_name]
scale_node = create_getattr_from_value(root_module, quantized_graph, (module_path + "_scale_"), scale)
zero_point_node = create_getattr_from_value(root_module, quantized_graph, (module_path + "_zero_point_"), zero_point)
return (scale_node, zero_point_node)
def all_node_args_have_no_tensors(node: Node, modules: Dict[str, torch.nn.Module], cache: Dict[Node, bool]) -> bool:
"""
If we know for sure that all of this node's args have no
tensors (are primitives), return True. If we either
find a tensor or are not sure, return False. Note: this
function is not exact.
"""
if cache and node in cache:
return cache[node]
result = False # will be overwritten
if not isinstance(node, Node):
result = True
elif node.op == 'placeholder':
result = False
elif node.op == 'call_module':
assert isinstance(node.target, str)
if is_activation_post_process(modules[node.target]):
result = all_node_args_have_no_tensors(node.args[0], modules, cache) # type: ignore[arg-type]
elif node.op == 'call_module':
result = False
elif node.op == 'call_function' and node.target is operator.getitem:
result = all_node_args_have_no_tensors(node.args[0], modules, cache) # type: ignore[arg-type]
elif node.op == 'get_attr':
result = False
elif node.target is getattr and node.args[1] in ['ndim', 'shape']:
# x1 = x0.ndim
result = True
elif node.op == 'call_method' and node.target == 'size':
# x1 = x0.size(0)
result = True
else:
found_one_tensor = False
for arg in node.args:
if isinstance(arg, list):
for list_el in arg:
if isinstance(list_el, Node):
this_list_el_args_have_no_tensors = \
all_node_args_have_no_tensors(list_el, modules, cache)
found_one_tensor = found_one_tensor or \
(not this_list_el_args_have_no_tensors)
# If found_one_tensor is True, there is no point in
# recursing further as the end result will always
# be True.
# TODO(future PR): remove this entire function and
# change to dtype inference without recursion.
if found_one_tensor:
result = not found_one_tensor
if cache:
cache[node] = result
return result
elif isinstance(arg, int):
pass
else:
if isinstance(arg, Node):
this_arg_args_have_no_tensors = all_node_args_have_no_tensors(arg, modules, cache)
found_one_tensor = found_one_tensor or \
(not this_arg_args_have_no_tensors)
# If found_one_tensor is True, there is no point in
# recursing further as the end result will always
# be True.
# TODO(future PR): remove this entire function and
# change to dtype inference without recursion.
if found_one_tensor:
result = not found_one_tensor
if cache:
cache[node] = result
return result
else:
found_one_tensor = True
result = not found_one_tensor
if cache:
cache[node] = result
return result
def all_node_args_except_first(node: Node) -> List[int]:
"""
Returns all node arg indices after first
"""
return list(range(1, len(node.args)))
def return_arg_list(arg_indices: List[int]) -> Callable[[Node], List[int]]:
"""
Constructs a function that takes a node as arg and returns the arg_indices
that are valid for node.args
"""
def arg_indices_func(node: Node) -> List[int]:
return [i for i in arg_indices if i < len(node.args)]
return arg_indices_func
NodeInfo = namedtuple("NodeInfo", "op target")
# this dict identifies which indices of a node are non tensors
# so that they can be propagated correctly since inserting observers
# for them would cause errors
NON_OBSERVABLE_ARG_DICT: Dict[NodeInfo, Dict[Union[type, torch.dtype], Callable[[Node], List[int]]]] = {
NodeInfo("call_method", "masked_fill") : {
torch.bool: return_arg_list([1]),
float: return_arg_list([2])
},
NodeInfo("call_method", "permute") : {
int: all_node_args_except_first
},
NodeInfo("call_method", "repeat") : {
int: all_node_args_except_first
},
NodeInfo("call_method", "reshape") : {
int: all_node_args_except_first
},
NodeInfo("call_method", "size") : {
int: return_arg_list([1])
},
NodeInfo("call_method", "transpose") : {
int: all_node_args_except_first
},
NodeInfo("call_method", torch.transpose) : {
int: all_node_args_except_first
},
NodeInfo("call_method", "unsqueeze") : {
int: return_arg_list([1])
},
NodeInfo("call_method", "unsqueeze_") : {
int: return_arg_list([1])
},
NodeInfo("call_method", torch.unsqueeze) : {
int: return_arg_list([1])
},
NodeInfo("call_method", "view") : {
int: all_node_args_except_first
},
}
EMPTY_ARG_DICT: Dict[Union[type, torch.dtype], Callable[[Node], List[int]]] = {}
def get_non_observable_arg_indexes_and_types(node: Node) -> Dict[Union[type, torch.dtype], Callable[[Node], List[int]]]:
"""
Returns a dict with of non float tensor types as keys and values which correspond to a
function to retrieve the list (which takes the node as an argument)
"""
info = NodeInfo(node.op, node.target)
return NON_OBSERVABLE_ARG_DICT.get(info, EMPTY_ARG_DICT)
def node_return_type_is_int(node: Node) -> bool:
"""
Returns true if this node results in an integer, even if some of the args
are Tensors.
"""
return node.op == 'call_method' and node.target == 'size'
def is_get_tensor_info_node(node: Node) -> bool:
""" Returns True if this node is a node that takes a Tensor as input and output some
meta information about the Tensor, e.g. shape, size etc.
"""
result: bool = \
node.op == "call_function" and node.target == getattr and node.args[1] == "shape" # type: ignore[assignment]
return result
def maybe_get_next_module(
node: Node,
modules: Dict[str, nn.Module],
target_module_type: Optional[Type[nn.Module]] = None,
target_functional_type: Any = None,
) -> Optional[Node]:
""" Gets the next module that matches what is needed in
is_target_module_type if it exists
Args:
node: The node whose users we want to look at
target_module_type: Module type that we want to check
target_functional_type: Functional type that we want to check
"""
for user, _ in node.users.items():
if user.op == 'call_module' and target_module_type is not None and \
isinstance(modules[str(user.target)], target_module_type):
return user
elif (user.op == 'call_function' and target_functional_type is not None and
user.target == target_functional_type):
return user
return None
def create_node_from_old_node_preserve_meta(
quantized_graph: Graph,
create_node_args: Tuple[Any, ...],
old_node: Node,
) -> Node:
"""
Creates `new_node` and copies the necessary metadata to it from `old_node`.
"""
new_node = quantized_graph.create_node(*create_node_args)
new_node.stack_trace = old_node.stack_trace
return new_node
def get_skipped_module_name_and_classes(
prepare_custom_config: PrepareCustomConfig,
is_standalone_module: bool) -> Tuple[List[str], List[Type[Any]]]:
skipped_module_names = copy.copy(prepare_custom_config.non_traceable_module_names)
skipped_module_classes = copy.copy(prepare_custom_config.non_traceable_module_classes)
if not is_standalone_module:
# standalone module and custom module config are applied in top level module
skipped_module_names += list(prepare_custom_config.standalone_module_names.keys())
skipped_module_classes += list(prepare_custom_config.standalone_module_classes.keys())
skipped_module_classes += get_custom_module_class_keys(prepare_custom_config.float_to_observed_mapping)
return skipped_module_names, skipped_module_classes
| pytorch-master | torch/ao/quantization/fx/utils.py |
from .quantization_patterns import (
QuantizeHandler,
)
# TODO: remove
class CommonQuantizeHandler(QuantizeHandler):
""" Common quantized op, first input and first output will be quantized
"""
pass
| pytorch-master | torch/ao/quantization/fx/common_quantization_patterns.py |
import torch
from torch.ao.quantization.fx.pattern_utils import get_default_quant_patterns, sorted_patterns_dict
from torch.ao.quantization.backend_config import get_native_backend_config
from torch.ao.quantization.backend_config.observation_type import ObservationType
from torch.ao.quantization.quantization_types import (
Pattern,
NodePattern,
QuantizerCls,
)
from torch.ao.quantization.utils import (
activation_dtype,
get_combined_dict,
)
from ..backend_config import BackendConfig
from .quantization_patterns import QuantizeHandler
from .fusion_patterns import DefaultFuseHandler
from typing import Dict, Any, Callable, Optional
def get_quantize_handler_cls(
observation_type,
dtype_configs,
num_tensor_args_to_observation_type,
overwrite_output_fake_quantizer,
overwrite_output_observer,
input_output_observed):
class ConfigurableQuantizeHandler(QuantizeHandler):
def __init__(
self,
node_pattern: NodePattern,
modules: Dict[str, torch.nn.Module],
root_node_getter: Callable = None):
super().__init__(node_pattern, modules, root_node_getter)
if num_tensor_args_to_observation_type:
assert self.num_tensor_args in num_tensor_args_to_observation_type, \
f"Must provide observation_type config for tensor number {self.num_tensor_args}" \
f" in num_tensor_args_to_observation_type for {node_pattern}"
self.observation_type = num_tensor_args_to_observation_type[self.num_tensor_args]
else:
self.observation_type = observation_type
self.dtype_configs = dtype_configs
self.overwrite_output_fake_quantizer = overwrite_output_fake_quantizer
self.overwrite_output_observer = overwrite_output_observer
self.input_output_observed_ = input_output_observed
def is_general_tensor_value_op(self) -> bool:
return self.observation_type == ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT
# TODO: change this to output activation
def get_activation_ctr(
self,
qconfig: Any,
pattern: Pattern,
is_training: bool,
) -> Optional[Callable]:
"""
Returns the constructor for the activation observer which should be
used for the pattern matched to this handler. Some handlers override
this to a different value than what is specified in the qconfig.
"""
act_dtype = activation_dtype(qconfig)
# TODO: change to is_qat
if is_training:
if act_dtype == torch.quint8 and self.overwrite_output_fake_quantizer is not None:
return self.overwrite_output_fake_quantizer
else:
if act_dtype == torch.quint8 and self.overwrite_output_observer is not None:
return self.overwrite_output_observer
return qconfig.activation
# This is temporary, and will be removed soon
def input_output_observed(self):
return self.input_output_observed_
return ConfigurableQuantizeHandler
def get_pattern_to_quantize_handlers(backend_config: BackendConfig) -> Dict[Pattern, QuantizerCls]:
"""
Note: Quantize handler is just a holder for some check methods like
(should_insert_observer_for_output), maybe this can be a enum as well,
we can refactor this after we convert the path for fbgemm/qnnpack fully to the
new path, this is not exposed to backend developers
"""
pattern_to_quantize_handlers = dict()
for pattern, config in backend_config.configs.items():
observation_type = config.observation_type
dtype_configs = config.dtype_configs
num_tensor_args_to_observation_type = config._num_tensor_args_to_observation_type
overwrite_fake_quantizer = config._overwrite_output_fake_quantize
overwrite_observer = config._overwrite_output_observer
input_output_observed = config._input_output_observed
if input_output_observed is None:
input_output_observed = True
pattern_to_quantize_handlers[pattern] = \
get_quantize_handler_cls(
observation_type,
dtype_configs,
num_tensor_args_to_observation_type,
overwrite_fake_quantizer,
overwrite_observer,
input_output_observed)
return pattern_to_quantize_handlers
# TODO: move this to torch/ao/quantization/backend_config/utils.py
def get_fusion_pattern_to_fuse_handler_cls(
backend_config: BackendConfig) -> Dict[Pattern, Callable]:
fusion_pattern_to_fuse_handlers: Dict[Pattern, Callable] = dict()
for pattern, config in backend_config.configs.items():
if config.fuser_method is not None:
# TODO: is this logic right?
fusion_pattern_to_fuse_handlers[pattern] = DefaultFuseHandler
return fusion_pattern_to_fuse_handlers
# TODO: remove when all uses are changed to backend_config
def get_native_quant_patterns(additional_quant_patterns: Dict[Pattern, QuantizerCls] = None) -> Dict[Pattern, QuantizerCls]:
"""
Return a map from pattern to quantize handlers based on the default patterns and the native backend_config.
The returned map is sorted such that longer patterns will be encountered first when iterating through it.
"""
patterns = get_default_quant_patterns()
if additional_quant_patterns is not None:
patterns = get_combined_dict(patterns, additional_quant_patterns)
# TODO: currently we just extend the quantize handlers generated from
# `get_native_backend_config`
# in the future we can just assign backend_config when everything is defined
for pattern, quantize_handler in get_pattern_to_quantize_handlers(get_native_backend_config()).items():
patterns[pattern] = quantize_handler
return sorted_patterns_dict(patterns)
get_fusion_pattern_to_fuse_handler_cls.__module__ = "torch.ao.quantization.fx.backend_config_utils"
get_native_quant_patterns.__module__ = "torch.ao.quantization.fx.backend_config_utils"
get_pattern_to_quantize_handlers.__module__ = "torch.ao.quantization.fx.backend_config_utils"
__all__ = [
"get_fusion_pattern_to_fuse_handler_cls",
"get_native_quant_patterns",
"get_pattern_to_quantize_handlers",
]
| pytorch-master | torch/ao/quantization/fx/backend_config_utils.py |
from collections import OrderedDict
from typing import Dict, Any
from torch.ao.quantization.quantization_types import Pattern
from ..fake_quantize import FixedQParamsFakeQuantize
# from .quantization_patterns import BinaryOpQuantizeHandler
from ..observer import ObserverBase
import copy
# TODO(future PR): fix the typing on QuantizeHandler (currently a circular dependency)
QuantizeHandler = Any
# pattern for conv bn fusion
DEFAULT_FUSION_PATTERNS = OrderedDict()
def register_fusion_pattern(pattern):
def insert(fn):
DEFAULT_FUSION_PATTERNS[pattern] = fn
return fn
return insert
def get_default_fusion_patterns() -> Dict[Pattern, QuantizeHandler]:
return copy.copy(DEFAULT_FUSION_PATTERNS)
DEFAULT_QUANTIZATION_PATTERNS = OrderedDict()
# Mapping from pattern to activation_post_process(observer/fake_quant) constructor for output activation
# e.g. pattern: torch.sigmoid,
# output_activation_post_process: default_fixed_qparams_range_0to1_fake_quant
DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP = dict()
DEFAULT_OUTPUT_OBSERVER_MAP = dict()
# Register pattern for both static quantization and qat
def register_quant_pattern(pattern, fixed_qparams_observer=None):
def insert(fn):
DEFAULT_QUANTIZATION_PATTERNS[pattern] = fn
if fixed_qparams_observer is not None:
DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP[pattern] = FixedQParamsFakeQuantize.with_args(observer=fixed_qparams_observer)
DEFAULT_OUTPUT_OBSERVER_MAP[pattern] = fixed_qparams_observer
return fn
return insert
# Get patterns for both static quantization and qat
def get_default_quant_patterns() -> Dict[Pattern, QuantizeHandler]:
return copy.copy(DEFAULT_QUANTIZATION_PATTERNS)
# a map from pattern to output activation post process constructor
# e.g. torch.sigmoid -> default_affine_fixed_qparam_fake_quant
def get_default_output_activation_post_process_map(is_training) -> Dict[Pattern, ObserverBase]:
if is_training:
return copy.copy(DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP)
else:
return copy.copy(DEFAULT_OUTPUT_OBSERVER_MAP)
# Example use of register pattern function:
# @register_fusion_pattern(torch.nn.ReLU, (torch.nn.BatchNorm2d, torch.nn.Conv2d)))
# class ConvOrLinearBNReLUFusion():
# def __init__(...):
# ...
#
def sorted_patterns_dict(patterns_dict: Dict[Pattern, QuantizeHandler]) -> Dict[Pattern, QuantizeHandler]:
"""
Return a sorted version of the patterns dictionary such that longer patterns are matched first,
e.g. match (F.relu, F.linear) before F.relu.
This works for current use cases, but we may need to have a more clever way to sort
things to address more complex patterns
"""
def get_len(pattern):
""" this will calculate the length of the pattern by counting all the entries
in the pattern.
this will make sure (nn.ReLU, (nn.BatchNorm, nn.Conv2d)) comes before
(nn.BatchNorm, nn.Conv2d) so that we can match the former first
"""
len = 0
if isinstance(pattern, tuple):
for item in pattern:
len += get_len(item)
else:
len += 1
return len
return OrderedDict(sorted(patterns_dict.items(), key=lambda kv: -get_len(kv[0]) if isinstance(kv[0], tuple) else 1))
| pytorch-master | torch/ao/quantization/fx/pattern_utils.py |
import torch
from torch.fx import map_arg, Node
from torch.fx.graph import Graph
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.quantized.dynamic as nniqd
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
import torch.nn.quantized._reference as nnqr
from torch.nn.quantized.modules.utils import WeightedQuantizedModule
from .graph_module import QuantizedGraphModule
from .utils import (
collect_producer_nodes,
get_linear_prepack_op_for_dtype,
get_new_attr_name_with_prefix,
get_qconv_prepack_op,
graph_module_from_producer_nodes,
)
from ..utils import _parent_name
from ..qconfig import QConfigAny
from ..quantization_mappings import get_quantized_operator
from .utils import create_node_from_old_node_preserve_meta
from typing import Dict, Tuple, Type, List, Callable, Any, Union, Set, Optional
import operator
QOP_TO_ARG_NAMES_TO_SKIP = {
torch._ops.ops.quantized.hardswish: ['inplace'],
torch._ops.ops.quantized.elu: ['inplace'],
torch._ops.ops.quantized.dropout: ['inplace'],
torch._ops.ops.quantized.instance_norm:
['running_mean', 'running_var', 'use_input_stats', 'momentum'],
}
def _is_node_in_list(node, modules, func_list, method_list, module_type_list):
is_call_function = node.op == "call_function" and node.target in func_list
is_call_method = node.op == "call_method" and node.target in method_list
is_call_module = node.op == "call_module" and type(modules[str(node.target)]) in module_type_list
return is_call_function, is_call_method, is_call_module
def is_fixed_qparams_node(node, modules):
func_list = [
torch.nn.functional.hardsigmoid,
torch.nn.functional.sigmoid,
torch.sigmoid,
torch.tanh,
]
method_list = [
"hardsigmoid",
"hardsigmoid_",
"sigmoid",
"sigmoid_",
"tanh",
"tanh_",
]
module_type_list = [
torch.nn.Hardsigmoid,
torch.nn.Sigmoid,
torch.nn.Tanh,
torch.nn.Softmax,
]
return _is_node_in_list(node, modules, func_list, method_list, module_type_list)
def is_default_node(node, modules):
func_list = [
torch.nn.functional.elu,
torch.nn.functional.hardswish,
torch.nn.functional.instance_norm,
torch.nn.functional.layer_norm,
torch.nn.functional.leaky_relu,
torch.nn.functional.dropout,
]
method_list: List[Any] = []
module_type_list = [
nnqr.ConvTranspose1d,
nnqr.ConvTranspose2d,
torch.nn.ELU,
torch.nn.LeakyReLU,
torch.nn.Hardswish,
torch.nn.InstanceNorm1d,
torch.nn.InstanceNorm2d,
torch.nn.InstanceNorm3d,
torch.nn.LayerNorm,
torch.nn.Dropout,
torch.nn.PReLU,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.intrinsic.BNReLU2d,
torch.nn.intrinsic.BNReLU3d,
]
return _is_node_in_list(node, modules, func_list, method_list, module_type_list)
def is_copy_node(node, modules):
func_list = [
torch.adaptive_avg_pool1d,
torch.nn.functional.adaptive_avg_pool2d,
torch.nn.functional.adaptive_avg_pool3d,
torch.nn.functional.hardtanh,
torch.nn.functional.hardtanh_,
torch.nn.functional.interpolate,
torch.nn.functional.max_pool1d,
torch.nn.functional.max_pool2d,
torch.nn.functional.max_pool3d,
torch.nn.functional.relu,
torch.nn.functional.relu6,
torch.avg_pool1d,
torch._C._nn.avg_pool2d,
torch._C._nn.avg_pool3d,
torch.clamp,
torch.flatten,
torch.mean,
operator.floordiv,
]
method_list = [
"clamp",
"mean",
"relu",
"relu_",
]
module_type_list = [
torch.nn.AdaptiveAvgPool1d,
torch.nn.AdaptiveAvgPool2d,
torch.nn.AdaptiveAvgPool3d,
torch.nn.AvgPool1d,
torch.nn.AvgPool2d,
torch.nn.AvgPool3d,
torch.nn.Hardtanh,
torch.nn.MaxPool1d,
torch.nn.MaxPool2d,
torch.nn.MaxPool3d,
torch.nn.ReLU,
torch.nn.ReLU6,
]
return _is_node_in_list(node, modules, func_list, method_list, module_type_list)
def is_general_tensor_shape_node(node, modules):
func_list = [
torch.transpose,
torch.repeat_interleave,
torch.squeeze,
torch.stack,
torch.unsqueeze,
]
method_list = [
"contiguous",
"detach",
"detach_",
"permute",
"repeat",
"repeat_interleave",
"reshape",
"resize_",
"shape",
"size",
"squeeze",
"squeeze_",
"transpose",
"unsqueeze",
"unsqueeze_",
"view",
]
module_type_list = [
torch.nn.Identity,
]
return _is_node_in_list(node, modules, func_list, method_list, module_type_list)
def is_other_node(node, modules):
func_list = [
torch.cat,
]
method_list: List[Any] = []
module_type_list: List[Any] = []
return _is_node_in_list(node, modules, func_list, method_list, module_type_list)
def is_special_pattern_node(node, modules):
res_function, res_method, res_module = False, False, False
for checker in [is_fixed_qparams_node, is_default_node, is_copy_node, is_general_tensor_shape_node, is_other_node]:
is_call_function, is_call_method, is_call_module = checker(node, modules)
res_function = res_function or is_call_function
res_method = res_method or is_call_method
res_module = res_module or is_call_module
return res_function, res_method, res_module
def is_dequantize_node(node):
return isinstance(node, Node) and node.op == "call_method" and node.target == "dequantize"
def is_getattr_tensor_metadata_node(node):
return node.op == "call_function" and \
node.target == getattr and \
node.args[1] in ["shape"]
def should_skip_lowering(op: torch.fx.node.Node, qconfig_map: Dict[str, QConfigAny]):
"""
Return True if the op is configured with a None qconfig, False otherwise.
Note: maybe need to generalize this to also check for the dtype, and we
only lower when dtype matches, but right now fbgemm/qnnpack only support
a single dtype, so it is OK for now.
"""
return op.name in qconfig_map and qconfig_map[op.name] is None
# Mapping from reference module class to the replacement static quantized module class for lowering
STATIC_LOWER_MODULE_MAP: Dict[Type[nn.Module], Type[WeightedQuantizedModule]] = {
nnqr.Linear: nnq.Linear,
nnqr.Conv1d: nnq.Conv1d,
nnqr.Conv2d: nnq.Conv2d,
nnqr.Conv3d: nnq.Conv3d,
}
# Mapping from reference module class to the replacement dynamic quantized module class for lowering
DYNAMIC_LOWER_MODULE_MAP: Dict[Type[nn.Module], Type[nn.Module]] = {
nnqr.Linear: nnqd.Linear,
nnqr.GRUCell: nnqd.GRUCell,
nnqr.LSTMCell: nnqd.LSTMCell,
nnqr.RNNCell: nnqd.RNNCell,
nnqr.LSTM: nnqd.LSTM,
}
# Mapping from reference module class to the replacement weight only quantized module class for lowering
# TODO: correct the namespace for these modules
WEIGHT_ONLY_LOWER_MODULE_MAP: Dict[Type[nn.Module], Type[nn.Module]] = {
nnqr.Embedding: nnq.Embedding,
nnqr.EmbeddingBag: nnq.EmbeddingBag,
}
# TODO: merge with STATIC_LOWER_MODULE_MAP after we merge
# _lower_static_weighted_ref_module and special_pattern_replacement
SPECIAL_PATTERN_LOWER_MODULE_MAP = {
nn.BatchNorm2d: nnq.BatchNorm2d,
nn.BatchNorm3d: nnq.BatchNorm3d,
nnqr.ConvTranspose1d: nnq.ConvTranspose1d,
nnqr.ConvTranspose2d: nnq.ConvTranspose2d,
nn.ELU: nnq.ELU,
nn.LeakyReLU: nnq.LeakyReLU,
nn.Hardswish: nnq.Hardswish,
nn.InstanceNorm1d: nnq.InstanceNorm1d,
nn.InstanceNorm2d: nnq.InstanceNorm2d,
nn.InstanceNorm3d: nnq.InstanceNorm3d,
nn.LayerNorm: nnq.LayerNorm,
nn.Dropout: nnq.Dropout,
nn.Softmax: nnq.Softmax,
nn.PReLU: nnq.PReLU,
nni.BNReLU2d: nniq.BNReLU2d,
nni.BNReLU3d: nniq.BNReLU3d,
}
# Mapping from fused module class to a 2-tuple of:
# 1) The inner reference module class
# 2) The replacement static quantized module class for lowering
STATIC_LOWER_FUSED_MODULE_MAP: Dict[Type[nn.Module], Tuple[Type[nn.Module], Type[WeightedQuantizedModule]]] = {
nni.LinearReLU: (nnqr.Linear, nniq.LinearReLU),
nni.ConvReLU1d: (nnqr.Conv1d, nniq.ConvReLU1d),
nni.ConvReLU2d: (nnqr.Conv2d, nniq.ConvReLU2d),
nni.ConvReLU3d: (nnqr.Conv3d, nniq.ConvReLU3d),
}
# Mapping from fused module class to a 2-tuple of:
# 1) The inner reference module class
# 2) The replacement dynamic quantized module class for lowering
DYNAMIC_LOWER_FUSED_MODULE_MAP: Dict[Type[nn.Module], Tuple[Type[nn.Module], Type[nn.Module]]] = {
nni.LinearReLU: (nnqr.Linear, nniqd.LinearReLU),
}
# Mapping from a functional to lower to a 2-tuple of
# 1) The quantized version of the op
# 2) The quantized version of the op fused with relu, if it exists, else None
STATIC_LOWER_FUNCTIONAL_MAP: Dict[Callable, Tuple[Callable, Callable]] = {
F.linear: (torch.ops.quantized.linear, torch.ops.quantized.linear_relu),
F.conv1d: (torch.ops.quantized.conv1d, torch.ops.quantized.conv1d_relu),
F.conv2d: (torch.ops.quantized.conv2d, torch.ops.quantized.conv2d_relu),
F.conv3d: (torch.ops.quantized.conv3d, torch.ops.quantized.conv3d_relu),
}
WEIGHT_PREPACK_OPS: Set[Callable] = {
torch._ops.ops.quantized.linear_prepack,
torch._ops.ops.quantized.linear_prepack_fp16,
torch._ops.ops.quantized.conv1d_prepack,
torch._ops.ops.quantized.conv2d_prepack,
torch._ops.ops.quantized.conv3d_prepack,
}
# Mapping from a functional to a dictionary, where the key is a 2-tuple of
# (activation_compute_dtype, weight_dtype) and the value is a 2-tuple of
# 1) The dynamically quantized version of the op
# 2) The dynamically quantized version of the op fused with relu, if it exists, else None
DYNAMIC_LOWER_FUNCTIONAL_MAP: Dict[Callable, Dict[Tuple[torch.dtype, torch.dtype], Tuple[Callable, Optional[Callable]]]] = {
F.linear: {
(torch.quint8, torch.qint8): (torch.ops.quantized.linear_dynamic,
torch.ops.quantized.linear_relu_dynamic),
(torch.float16, torch.float16): (torch.ops.quantized.linear_dynamic_fp16,
torch.ops.quantized.linear_relu_dynamic_fp16)
},
# dynamic conv + relu is not available yet
F.conv1d: {
(torch.quint8, torch.qint8): (torch.ops.quantized.conv1d_dynamic, None),
},
F.conv2d: {
(torch.quint8, torch.qint8): (torch.ops.quantized.conv2d_dynamic, None),
},
F.conv3d: {
(torch.quint8, torch.qint8): (torch.ops.quantized.conv3d_dynamic, None),
},
}
CONV_FUNCTIONAL_OPS: Set[Callable] = {
F.conv1d,
F.conv2d,
F.conv3d,
}
QBIN_OP_MAPPING: Dict[Union[Callable, str], Callable] = {
operator.add: torch.ops.quantized.add,
torch.add: torch.ops.quantized.add,
operator.mul: torch.ops.quantized.mul,
torch.mul: torch.ops.quantized.mul,
torch.matmul: torch.ops.quantized.matmul,
}
QBIN_RELU_OP_MAPPING: Dict[Union[Callable, str], Callable] = {
operator.add: torch.ops.quantized.add_relu,
torch.add: torch.ops.quantized.add_relu,
operator.mul: torch.ops.quantized.mul_relu,
torch.mul: torch.ops.quantized.mul_relu,
}
def fold_weight(
quantized: QuantizedGraphModule,
node_name_to_scope: Dict[str, Tuple[str, type]]
) -> QuantizedGraphModule:
"""
Trace back from the weight node util we hit getattr, reconstruct the
graph module with the traced nodes and run the graph module to pack the
weight. then replace the original chain of ops with the packed weight.
"""
packed_weights = dict()
# map from folded node name to the prepacked weight name
folded_nodes = dict()
# get packed weights
for node in quantized.graph.nodes:
if node.op == 'call_function' and node.target in WEIGHT_PREPACK_OPS:
nodes_to_fold = collect_producer_nodes(node)
if nodes_to_fold is not None:
for node_to_fold in nodes_to_fold:
folded_nodes[node_to_fold.name] = node
prepacking_module = graph_module_from_producer_nodes(
quantized, nodes_to_fold)
packed_weight = prepacking_module()
packed_weights[node.name] = packed_weight
# remove folded nodes and replace the prepacking node with getattr
folded_graph = Graph()
env: Dict[Any, Any] = {}
def load_arg(a):
return map_arg(a, lambda node: env[node.name])
quantized_root = quantized
quantized_graph = quantized.graph
for node in quantized_graph.nodes:
prepack_node = folded_nodes.get(node.name, None)
if prepack_node is node:
packed_weight = packed_weights[node.name]
# add a prepacked attribute to root
op_node = list(prepack_node.users)[0]
module_path, _ = node_name_to_scope[op_node.name]
get_new_packed_weight_name = \
get_new_attr_name_with_prefix(module_path + '_packed_weight_')
packed_weight_name = get_new_packed_weight_name(quantized_root)
setattr(quantized_root, packed_weight_name, packed_weight)
# replace prepack node with a getattr node
env[node.name] = folded_graph.create_node(
'get_attr', packed_weight_name, (), {})
elif prepack_node is not None:
# remove the foled node
continue
else:
# copy other nodes
env[node.name] = folded_graph.node_copy(node, load_arg)
return QuantizedGraphModule(quantized_root, folded_graph, quantized_root.preserved_attr_names)
def _get_module(node: Node, modules: Dict[str, nn.Module]) -> Optional[nn.Module]:
"""
Return the `torch.nn.Module` that corresponds to the specified node's target.
If no such node exists, return None.
"""
if node.op == "call_module" and str(node.target) in modules:
return modules[str(node.target)]
else:
return None
def _match_static_pattern(
node: Node,
modules: Dict[str, nn.Module],
qconfig_map: Dict[str, QConfigAny],
matching_modules_or_ops: List[Callable],
dequantize_node_arg_indices: List[int]
) -> Union[Tuple[Node, Node, Node], Tuple[None, None, None]]:
"""
Match the pattern (dequantize - ref node - quantize) against the node provided.
If there is a match, return a 3-tuple of:
1) q_node: the quantize node,
2) relu_node: a relu node wrapping the ref_node, and
3) ref_node: a reference module or functional node to replace with its quantized counterpart
Otherwise, if there is no match, return a 3-tuple of (None, None, None).
Parameters:
node: The `torch.fx.Node` to match against.
modules: A mapping from node names to modules in the model graph, used for module lookup.
qconfig_map: A mapping from node names to the qconfigs associated with the nodes.
If the corresponding qconfig for the reference node is None, then return no match.
matching_modules_or_ops: Either a list of functions or a list of `torch.nn.Module`s.
If the reference node is not in this list, then return no match.
dequantize_node_arg_indices: A list of indices in the reference node args where dequantize
nodes may be present. An empty list means skipping the check for dequantize nodes.
"""
SKIP_LOWERING_VALUE = (None, None, None)
# Match quantize node
if node.op != "call_function" or node.target != torch.quantize_per_tensor:
return SKIP_LOWERING_VALUE
q_node = node
ref_node = q_node.args[0]
assert(isinstance(ref_node, Node))
# Handle cases where the node is wrapped in a ReLU
if (ref_node.op == "call_function" and ref_node.target in (F.relu, torch.relu)) or\
(ref_node.op == "call_module" and type(_get_module(ref_node, modules)) == nn.ReLU):
relu_node = ref_node
ref_node = relu_node.args[0]
assert(isinstance(ref_node, Node))
else:
relu_node = None
if should_skip_lowering(ref_node, qconfig_map):
return SKIP_LOWERING_VALUE
# Match reference module or functional
if isinstance(matching_modules_or_ops[0], type) and issubclass(matching_modules_or_ops[0], nn.Module):
expected_op = "call_module"
match_key = type(_get_module(ref_node, modules))
else:
expected_op = "call_function"
match_key = ref_node.target
if ref_node.op != expected_op or match_key not in matching_modules_or_ops:
return SKIP_LOWERING_VALUE
# Match dequantize node(s). Both of the following conditions must pass:
# (1) All `torch.fx.Node`s at the matching indices must be a dequantize node
# (2) There must be at least one dequantize node
matched_dequantize = False
for i in dequantize_node_arg_indices:
assert i < len(ref_node.args),\
"Dequantize index %s exceeded reference node's arg length %s" % (i, len(ref_node.args))
arg = ref_node.args[i]
if is_dequantize_node(arg):
matched_dequantize = True
elif isinstance(arg, Node):
return SKIP_LOWERING_VALUE
if not matched_dequantize:
return SKIP_LOWERING_VALUE
return (q_node, relu_node, ref_node)
def _lower_static_weighted_ref_module(
model: QuantizedGraphModule,
qconfig_map: Dict[str, QConfigAny]):
"""
Traverse the graph and find dequantize - ref module - quantize patterns
and replace them with the quantized version of the ref module.
"""
modules = dict(model.named_modules(remove_duplicate=False))
nodes = list(model.graph.nodes)
for n in model.graph.nodes:
# Step 0: Find nodes that match this pattern (dequantize - ref module - quantize)
matching_modules = list(STATIC_LOWER_MODULE_MAP.keys()) + list(STATIC_LOWER_FUSED_MODULE_MAP.keys())
(q_node, relu_node, ref_node) = _match_static_pattern(
n, modules, qconfig_map, matching_modules, dequantize_node_arg_indices=[0]) # type: ignore[arg-type]
if q_node is None:
continue
assert(ref_node is not None)
(_, scale_node, zero_point_node, _) = q_node.args
ref_module = _get_module(ref_node, modules)
ref_class = type(ref_module)
assert(isinstance(scale_node, Node))
assert(isinstance(zero_point_node, Node))
assert(issubclass(ref_class, nn.Module))
# Step 1: Change this pattern to use the corresponding quantized module
# For fused modules, we also check whether the inner module is a reference module
# If so, we replace the entire fused module with the corresponding quantized module
if ref_class in STATIC_LOWER_FUSED_MODULE_MAP:
inner_ref_class, q_class = STATIC_LOWER_FUSED_MODULE_MAP[ref_class]
if type(ref_module[0]) != inner_ref_class: # type: ignore[index]
continue
else:
q_class = STATIC_LOWER_MODULE_MAP[ref_class]
output_scale = getattr(model, scale_node.target)
output_zero_point = getattr(model, zero_point_node.target)
q_module = q_class.from_reference(ref_module, output_scale, output_zero_point)
# replace reference module with quantized module
parent_name, module_name = _parent_name(ref_node.target)
setattr(modules[parent_name], module_name, q_module)
# Step 2: Remove dq_node, q_node and its args
dq_node = ref_node.args[0]
assert(isinstance(dq_node, Node))
dq_node.replace_all_uses_with(dq_node.args[0])
model.graph.erase_node(dq_node)
q_node.replace_all_uses_with(ref_node)
model.graph.erase_node(q_node)
model.graph.erase_node(scale_node)
model.graph.erase_node(zero_point_node)
def _lower_dynamic_weighted_ref_module(model: QuantizedGraphModule):
"""
Traverse the graph and find quantize_per_tensor_dynamic - dequantize - ref_module patterns
and replace them with the dynamically quantized version of the ref module.
"""
named_modules = dict(model.named_modules(remove_duplicate=False))
for n in model.graph.nodes:
if n.op != "call_module" or \
type(named_modules[str(n.target)]) not in \
set(DYNAMIC_LOWER_MODULE_MAP.keys()).union(
set(DYNAMIC_LOWER_FUSED_MODULE_MAP.keys())):
continue
ref_node = n
dq_node = ref_node.args[0]
if dq_node.op != "call_method" or dq_node.target != "dequantize":
continue
# don't support lowering the pattern when the result of dequantize is used by
# multiple nodes
if len(dq_node.users) > 1:
continue
input_dynamic_q_node = dq_node.args[0]
# don't support lowering the pattern when the result of quantize is used by
# multiple nodes
if len(input_dynamic_q_node.users) > 1:
continue
if input_dynamic_q_node.op != "call_function" or \
input_dynamic_q_node.target != torch.quantize_per_tensor_dynamic:
continue
activation_compute_dtype = input_dynamic_q_node.args[1]
is_fp16 = activation_compute_dtype == torch.float16
is_int8 = activation_compute_dtype in [torch.quint8, torch.qint8]
if not is_int8 and not is_fp16:
continue
ref_module = named_modules[str(ref_node.target)]
ref_class = type(ref_module)
if ref_class in DYNAMIC_LOWER_FUSED_MODULE_MAP:
inner_ref_class, q_class = DYNAMIC_LOWER_FUSED_MODULE_MAP[ref_class]
if type(ref_module[0]) != inner_ref_class:
continue
else:
q_class = DYNAMIC_LOWER_MODULE_MAP.get(ref_class) # type: ignore[assignment]
# TODO: maybe define a WeightedDynamicallyQuantizedModule
q_module = q_class.from_reference(ref_module) # type: ignore[attr-defined]
# replace reference moduel with dynamically quantized module
parent_name, module_name = _parent_name(ref_node.target)
setattr(named_modules[parent_name], module_name, q_module)
# remove q - dq node
dq_node.replace_all_uses_with(input_dynamic_q_node)
model.graph.erase_node(dq_node)
input_dynamic_q_node.replace_all_uses_with(input_dynamic_q_node.args[0])
model.graph.erase_node(input_dynamic_q_node)
def _lower_weight_only_weighted_ref_module(model: QuantizedGraphModule):
"""
Traverse the graph and find ref_module patterns
and replace them with the weight only quantized version of the ref module.
"""
named_modules = dict(model.named_modules(remove_duplicate=False))
for n in model.graph.nodes:
if n.op != "call_module" or \
type(named_modules[str(n.target)]) not in \
set(WEIGHT_ONLY_LOWER_MODULE_MAP.keys()):
continue
ref_node = n
ref_module = named_modules[str(ref_node.target)]
ref_class = type(ref_module)
q_class = WEIGHT_ONLY_LOWER_MODULE_MAP.get(ref_class)
# TODO: WeightedQuantizedModule is currently assuming static quant apis
# with output_scale, output_zero_point in from_reference, we may want to
# relax that, or rename this
# TODO: maybe define a WeightedWeightOnlyQuantizedModule
q_module = q_class.from_reference(ref_module) # type: ignore[union-attr]
# replace reference moduel with dynamically quantized module
parent_name, module_name = _parent_name(ref_node.target)
setattr(named_modules[parent_name], module_name, q_module)
def _lower_static_weighted_ref_functional(
model: QuantizedGraphModule,
qconfig_map: Dict[str, QConfigAny]):
"""
Traverse the graph and replace functional reference patterns with their quantized versions.
"""
modules = dict(model.named_modules(remove_duplicate=False))
nodes = list(model.graph.nodes)
for n in model.graph.nodes:
# Step 0: Find nodes that match this pattern (dequantize - functional op - quantize)
matching_ops = list(STATIC_LOWER_FUNCTIONAL_MAP.keys())
(q_node, relu_node, func_node) = _match_static_pattern(
n, modules, qconfig_map, matching_ops, dequantize_node_arg_indices=[0, 1])
if q_node is None:
continue
assert(func_node is not None)
(_, output_scale_node, output_zp_node, _) = q_node.args
(input_dq_node, weight_dq_node, *remaining_func_args) = func_node.args
assert(isinstance(output_zp_node, Node))
assert(isinstance(input_dq_node, Node))
assert(isinstance(weight_dq_node, Node))
quantized_weight = weight_dq_node.args[0]
assert(isinstance(quantized_weight, Node))
if quantized_weight.op != "call_function" or\
quantized_weight.target not in (torch.quantize_per_tensor, torch.quantize_per_channel):
continue
# Step 1: Replace quantized weights with packed weights, which will be folded later
# Use the right prepack op and prepare the corresponding args
# Linear prepack args: (quantized weights[, bias])
# Conv prepack args: (quantized weights[, bias, stride, padding, dilation, groups])
prepack_args = [quantized_weight] + remaining_func_args
if func_node.target == F.linear:
weight_dtype = quantized_weight.args[-1]
prepack_op = get_linear_prepack_op_for_dtype(weight_dtype)
elif func_node.target in CONV_FUNCTIONAL_OPS:
prepack_op = get_qconv_prepack_op(func_node.target) # type: ignore[arg-type]
# For conv1d, the stride, padding, and dilation args may be ints,
# in which case we need to convert them to tuples
if func_node.target == F.conv1d:
for i in [2, 3, 4]:
if len(prepack_args) > i and isinstance(prepack_args[i], int):
prepack_args[i] = (prepack_args[i],)
else:
raise ValueError("Lowering is not supported for op '%s'" % func_node.target)
with model.graph.inserting_before(output_scale_node):
packed_weight = model.graph.create_node("call_function", prepack_op, tuple(prepack_args), {})
# Step 2: Replace reference pattern with the corresponding quantized op
(q_func, q_relu_func) = STATIC_LOWER_FUNCTIONAL_MAP[func_node.target] # type: ignore[index]
func_node.target = q_relu_func if relu_node is not None else q_func
func_node.args = (input_dq_node.args[0], packed_weight, output_scale_node, output_zp_node)
q_node.replace_all_uses_with(func_node)
# Move func_node after output_zp_node in the graph
output_zp_node.append(func_node)
# Clean up: Remove dequantize and quantize nodes, and the relu node if it exists
for dqn in [input_dq_node, weight_dq_node]:
dqn_input = dqn.args[0]
dqn.replace_all_uses_with(dqn_input)
model.graph.erase_node(dqn)
model.graph.erase_node(q_node)
if relu_node is not None:
model.graph.erase_node(relu_node)
def _lower_dynamic_weighted_ref_functional(
model: QuantizedGraphModule,
qconfig_map: Dict[str, QConfigAny]):
"""
Traverse the graph and replace functional reference patterns with their dynamically
quantized versions.
Examples:
quantize_per_tensor_dynamic - dequantize - functional linear --> linear_dynamic
to(torch.float16) - dequantize - functional linear --> linear_dynamic_fp16
"""
modules = dict(model.named_modules(remove_duplicate=False))
nodes = list(model.graph.nodes)
# we want to search in reserved order so that we can match the larger patterns first
# e.g. we want to match linear - relu before linear.
for n in reversed(model.graph.nodes):
# Step 0: Find nodes that match this pattern
# (quantize_per_tensor_dynamic - dequantize - dynamically quantized op)
# We search for the pattern backwards, starting with the quantize node
# Quantize node args: (func, scale, zp, dtype)
func_node = n
# Handle cases where the functional op is wrapped in a ReLU
if func_node.op == "call_function" and func_node.target == F.relu or \
func_node.op == "call_module" and \
type(modules[str(func_node.target)]) == torch.nn.ReLU:
relu_node = func_node
func_node = relu_node.args[0]
else:
relu_node = None
if should_skip_lowering(func_node, qconfig_map):
continue
# Linear args: (dequantized inputs, dequantized weights[, bias])
# Conv args: (dequantized inputs, dequantized weights[, bias, stride, padding, dilation, groups])
if func_node.op != "call_function" or func_node.target not in DYNAMIC_LOWER_FUNCTIONAL_MAP:
continue
(input_dq_node, weight_dq_node, *remaining_func_args) = func_node.args
if input_dq_node.op != "call_method" or input_dq_node.target != "dequantize" or \
weight_dq_node.op != "call_method" or weight_dq_node.target != "dequantize":
continue
input_dynamic_q_node = input_dq_node.args[0]
# don't support lowering the pattern when the result of quantize is used by
# multiple nodes
if len(input_dynamic_q_node.users) > 1:
continue
if input_dynamic_q_node.op != "call_function" or \
input_dynamic_q_node.target != torch.quantize_per_tensor_dynamic:
continue
reduce_range_node = None
(pattern_input, activation_compute_dtype, reduce_range_node) = input_dynamic_q_node.args
is_fp16 = activation_compute_dtype == torch.float16
is_int8 = activation_compute_dtype in [torch.quint8, torch.qint8]
if not is_int8 and not is_fp16:
continue
quantized_weight = weight_dq_node.args[0]
weight_dtype = quantized_weight.args[-1]
# Step 1: Try to select reference pattern with the corresponding quantized op
dynamic_quant_dtype_key = (activation_compute_dtype, weight_dtype)
if dynamic_quant_dtype_key not in DYNAMIC_LOWER_FUNCTIONAL_MAP[func_node.target]:
print(f"Didn't find dtype combination {dynamic_quant_dtype_key} during "
f"dynamic quantized op lowering for {func_node.target}")
continue
(q_func, q_relu_func) = DYNAMIC_LOWER_FUNCTIONAL_MAP[func_node.target][dynamic_quant_dtype_key]
if q_func is None or q_relu_func is None:
print("Didn't find corresponding quantized function or quantized relu function "
f"for {func_node.target}, {dynamic_quant_dtype_key}")
continue
# Step 2: Replace quantized weights with packed weights, which will be folded later
# Use the right prepack op and prepare the corresponding args
# Linear prepack args: (quantized weights[, bias])
# Conv prepack args: (quantized weights[, bias, stride, padding, dilation, groups])
prepack_args = [quantized_weight] + remaining_func_args
if func_node.target == F.linear:
prepack_op = get_linear_prepack_op_for_dtype(weight_dtype)
elif func_node.target in CONV_FUNCTIONAL_OPS:
prepack_op = get_qconv_prepack_op(func_node.target)
# For conv1d, the stride, padding, and dilation args may be ints,
# in which case we need to convert them to tuples
if func_node.target == F.conv1d:
for i in [2, 3, 4]:
if len(prepack_args) > i and isinstance(prepack_args[i], int):
prepack_args[i] = (prepack_args[i],)
else:
raise ValueError("Lowering is not supported for op '%s'" % func_node.target)
with model.graph.inserting_before(func_node):
packed_weight = model.graph.create_node("call_function", prepack_op, tuple(prepack_args), {})
# Step 3: Replace reference pattern with the corresponding quantized op
func_node.target = q_relu_func if relu_node is not None else q_func
if is_int8:
func_node.args = (pattern_input, packed_weight, reduce_range_node)
else:
func_node.args = (pattern_input, packed_weight)
if relu_node is not None:
relu_node.replace_all_uses_with(func_node)
# Step 4: Remove dequantize and quantize nodes, and the relu node if it exists
for dqn in [input_dq_node, weight_dq_node]:
dqn_input = dqn.args[0]
dqn.replace_all_uses_with(dqn_input)
model.graph.erase_node(dqn)
model.graph.erase_node(input_dynamic_q_node)
if relu_node is not None:
model.graph.erase_node(relu_node)
def _lower_quantized_binary_op(
model: QuantizedGraphModule,
qconfig_map: Dict[str, QConfigAny]):
binary_ops_to_lower: List[Callable] = [operator.add, torch.add, operator.mul, torch.mul, torch.matmul]
modules = dict(model.named_modules(remove_duplicate=False))
for n in model.graph.nodes:
# Step 0: Find nodes that match this pattern (dequantize - ref module - quantize)
(q_node, relu_node, bop_node) = _match_static_pattern(
n, modules, qconfig_map, binary_ops_to_lower, dequantize_node_arg_indices=[0, 1])
if q_node is None:
continue
assert(bop_node is not None)
(_, scale_node, zero_point_node, _) = q_node.args
# Step 1: Remove dequant nodes
num_dq_nodes = 0
for arg in bop_node.args:
if not is_dequantize_node(arg):
continue
dq_node = arg
assert(isinstance(dq_node, Node))
dn_input = dq_node.args[0]
dq_node.replace_all_uses_with(dn_input)
model.graph.erase_node(dq_node)
num_dq_nodes += 1
assert(num_dq_nodes > 0)
# Step 2: Swap binary op to quantized binary op
assert bop_node.target in QBIN_OP_MAPPING
binop_to_qbinop = QBIN_OP_MAPPING if relu_node is None else QBIN_RELU_OP_MAPPING
qbin_op = binop_to_qbinop[bop_node.target]
# prepare the args for quantized bianry op
# (x, y)
qop_node_args = list(bop_node.args)
# (x, y, scale, zero_point)
# add scale and zero_point arguments for Tensor - Tensor operation
if num_dq_nodes == 2:
qop_node_args.extend([scale_node, zero_point_node])
# insert a call to quantized binary op and remove the original binary op
with model.graph.inserting_after(q_node):
qop_node = create_node_from_old_node_preserve_meta(
model.graph,
("call_function", qbin_op, tuple(qop_node_args), {}),
bop_node)
q_node.replace_all_uses_with(qop_node)
# Step 3: Remove quantize node, binary op node, and relu node if any
model.graph.erase_node(q_node)
if relu_node is not None:
model.graph.erase_node(relu_node)
model.graph.erase_node(bop_node)
def special_pattern_replacement(model: QuantizedGraphModule):
modules = dict(model.named_modules(remove_duplicate=False))
for n in model.graph.nodes:
q_node = n
is_quantize = q_node.target == torch.quantize_per_tensor
is_to_fp16 = q_node.op == "call_method" and q_node.target == "to" and \
len(q_node.args) == 2 and q_node.args[1] == torch.float16
if not (is_quantize or is_to_fp16):
continue
ref_node = q_node.args[0]
# get output scale/zero_point/dtype from the quantize node
# ref_node, scale_node, zero_point_node, dtype = q_node.args
# TODO: add safety checks that users for the ref_node and dq_node needs to be one
is_call_function, is_call_method, is_call_module = is_fixed_qparams_node(ref_node, modules)
if is_to_fp16 and (is_call_function or is_call_method or is_call_module):
# TODO: add a warning or error out here? (bc-breaking if error out)
# warnings.warn(
# "Only reference patterns are currently supported for {dtype} dtype with {op} op"
# "".format(dtype=dtypes, op=ref_node))
continue
is_call_function, is_call_method, is_call_module = is_default_node(ref_node, modules)
if is_to_fp16 and (is_call_function or is_call_method or is_call_module):
# TODO: add a warning or error out here? (bc-breaking if error out)
continue
# This check includes all supported ops
is_call_function, is_call_method, is_call_module = is_special_pattern_node(ref_node, modules)
if not (is_call_module or is_call_function or is_call_method):
continue
dq_node_or_nodes = ref_node.args[0]
assert isinstance(dq_node_or_nodes, Node) or isinstance(dq_node_or_nodes, (tuple, list))
is_dequantize = False
if isinstance(dq_node_or_nodes, Node):
is_dequantize = dq_node_or_nodes.op == 'call_method' and \
dq_node_or_nodes.target == 'dequantize'
elif isinstance(dq_node_or_nodes, (tuple, list)):
is_dequantize = all(
x.op == 'call_method' and x.target == 'dequantize'
for x in dq_node_or_nodes)
if not is_dequantize:
continue
# TODO: enable we have patterns that needs to swap the modules
if is_call_module:
ref_module = modules[ref_node.target]
if type(ref_module) in SPECIAL_PATTERN_LOWER_MODULE_MAP and is_quantize:
qmodule_cls = SPECIAL_PATTERN_LOWER_MODULE_MAP.get(type(ref_module))
scale_node = q_node.args[1]
zero_point_node = q_node.args[2]
output_scale = getattr(model, scale_node.target)
output_zero_point = getattr(model, zero_point_node.target)
qmodule = qmodule_cls.from_reference(ref_module, output_scale, output_zero_point) # type:ignore[union-attr]
# replace reference module with quantized module
parent_name, module_name = _parent_name(ref_node.target)
setattr(modules[parent_name], module_name, qmodule)
# remove dq node:
dq_nodes: List[Node] = []
if isinstance(dq_node_or_nodes, Node):
dq_nodes = [dq_node_or_nodes]
elif isinstance(dq_node_or_nodes, (tuple, list)):
dq_nodes = list(dq_node_or_nodes)
for dq_node in dq_nodes:
dn_input = dq_node.args[0]
dq_node.replace_all_uses_with(dn_input)
model.graph.erase_node(dq_node)
# store q node args
qnode_qparams = list(q_node.args)[1:]
# replace uses of q node with input and remove q node
q_node_input = q_node.args[0]
q_node.replace_all_uses_with(q_node_input)
model.graph.erase_node(q_node)
is_call_function, is_call_method, is_call_module = is_default_node(ref_node, modules)
if is_call_function:
# pass scale/zer_point arguments from quantize_per_tensor to the default node operator
# insert an op after the zero_point node so that the scale/zero_point
# nodes are is available
qop = get_quantized_operator(ref_node.target)
args = list(ref_node.args)
kwargs = dict(ref_node.kwargs)
if qop in QOP_TO_ARG_NAMES_TO_SKIP:
args_to_skip = QOP_TO_ARG_NAMES_TO_SKIP[qop]
for arg in args_to_skip:
if arg in kwargs:
kwargs.pop(arg)
kwargs["output_scale"] = qnode_qparams[0]
kwargs["output_zero_point"] = qnode_qparams[1]
with model.graph.inserting_after(qnode_qparams[1]):
qop_node = create_node_from_old_node_preserve_meta(
model.graph,
("call_function", qop, tuple(args), kwargs),
ref_node)
ref_node.replace_all_uses_with(qop_node)
model.graph.erase_node(ref_node)
else:
# remove scale/zero_point node for quantize node
for n in qnode_qparams:
if isinstance(n, Node):
model.graph.erase_node(n)
return model
def _lower_getattr_tensor_metadta_op(model: QuantizedGraphModule):
""" Modified the graph of the model inplace, to skip extra dequantize op before
the general tensor shape ops when possible
"""
for n in model.graph.nodes:
if is_getattr_tensor_metadata_node(n):
maybe_dq = n.args[0]
if maybe_dq.op != "call_method" or maybe_dq.target != "dequantize":
continue
# skip the dequantize node
args = list(n.args)
args[0] = n.args[0].args[0]
n.args = tuple(args)
def _lower_to_native_backend(
model: QuantizedGraphModule,
qconfig_map: Dict[str, QConfigAny],
node_name_to_scope: Dict[str, Tuple[str, type]]
) -> QuantizedGraphModule:
""" Lower a quantized reference model (with reference quantized operator patterns)
to the native backend in PyTorch (fbgemm/qnnpack), both backends shares the same
operator signature so they can be lowered with the same function
"""
_lower_static_weighted_ref_module(model, qconfig_map)
_lower_dynamic_weighted_ref_module(model)
_lower_weight_only_weighted_ref_module(model)
_lower_static_weighted_ref_functional(model, qconfig_map)
_lower_dynamic_weighted_ref_functional(model, qconfig_map)
_lower_quantized_binary_op(model, qconfig_map)
_lower_getattr_tensor_metadta_op(model)
special_pattern_replacement(model)
model = fold_weight(model, node_name_to_scope)
model.graph.eliminate_dead_code()
model.recompile()
model.graph.lint()
return model
| pytorch-master | torch/ao/quantization/fx/_lower_to_native_backend.py |
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Type
from torch.ao.quantization import QConfigMapping
from torch.ao.quantization.backend_config import BackendConfig
from torch.ao.quantization.quant_type import QuantType, _quant_type_from_str, quant_type_to_str
__all__ = [
"ConvertCustomConfig",
"FuseCustomConfig",
"PrepareCustomConfig",
"StandaloneModuleConfigEntry",
]
# TODO: replace all usages with these constants
STANDALONE_MODULE_NAME_DICT_KEY = "standalone_module_name"
STANDALONE_MODULE_CLASS_DICT_KEY = "standalone_module_class"
FLOAT_TO_OBSERVED_DICT_KEY = "float_to_observed_custom_module_class"
OBSERVED_TO_QUANTIZED_DICT_KEY = "observed_to_quantized_custom_module_class"
NON_TRACEABLE_MODULE_NAME_DICT_KEY = "non_traceable_module_name"
NON_TRACEABLE_MODULE_CLASS_DICT_KEY = "non_traceable_module_class"
INPUT_QUANTIZED_INDEXES_DICT_KEY = "input_quantized_idxs"
OUTPUT_QUANTIZED_INDEXES_DICT_KEY = "output_quantized_idxs"
PRESERVED_ATTRIBUTES_DICT_KEY = "preserved_attributes"
@dataclass
class StandaloneModuleConfigEntry:
# qconfig_mapping for the prepare function called in the submodule,
# None means use qconfig from parent qconfig_mapping
qconfig_mapping: Optional[QConfigMapping]
example_inputs: Tuple[Any, ...]
prepare_custom_config: Optional[PrepareCustomConfig]
backend_config: Optional[BackendConfig]
class PrepareCustomConfig:
"""
Custom configuration for :func:`~torch.ao.quantization.quantize_fx.prepare_fx` and
:func:`~torch.ao.quantization.quantize_fx.prepare_qat_fx`.
The user can set custom configuration using the following methods:
`set_standalone_module_name`: sets the config for preparing a standalone module for quantization, identified by name
`set_standalone_module_class`: sets the config for preparing a standalone module for quantization, identified by class
`set_float_to_observed_mapping`: sets the mapping from a float module class to an observed module class
`set_non_traceable_module_names`: sets modules that are not symbolically traceable, identified by name
`set_non_traceable_module_classes`: sets modules that are not symbolically traceable, identified by class
`set_input_quantized_indexes`: sets the indexes of the inputs of the graph that should be quantized.
`set_output_quantized_indexes`: sets the indexes of the outputs of the graph that should be quantized.
`set_preserved_attributes`: sets the names of the attributes that will persist in the graph module even
if they are not used in the model's `forward` method
Example usage::
prepare_custom_config = PrepareCustomConfig() \
.set_standalone_module_name("module1", qconfig_mapping, example_inputs, \
child_prepare_custom_config, backend_config) \
.set_standalone_module_class(MyStandaloneModule, qconfig_mapping, example_inputs, \
child_prepare_custom_config, backend_config) \
.set_float_to_observed_mapping(FloatCustomModule, ObservedCustomModule) \
.set_non_traceable_module_names(["module2", "module3"]) \
.set_non_traceable_module_classes([NonTraceableModule1, NonTraceableModule2]) \
.set_input_quantized_indexes([0]) \
.set_output_quantized_indexes([0]) \
.set_preserved_attributes(["attr1", "attr2"])
"""
def __init__(self):
self.standalone_module_names: Dict[str, StandaloneModuleConfigEntry] = {}
self.standalone_module_classes: Dict[Type, StandaloneModuleConfigEntry] = {}
self.float_to_observed_mapping: Dict[QuantType, Dict[Type, Type]] = {}
self.non_traceable_module_names: List[str] = []
self.non_traceable_module_classes: List[Type] = []
self.input_quantized_indexes: List[int] = []
self.output_quantized_indexes: List[int] = []
self.preserved_attributes: List[str] = []
def set_standalone_module_name(
self,
module_name: str,
qconfig_mapping: Optional[QConfigMapping],
example_inputs: Tuple[Any, ...],
prepare_custom_config: Optional[PrepareCustomConfig],
backend_config: Optional[BackendConfig]) -> PrepareCustomConfig:
"""
Set the configuration for running a standalone module identified by `module_name`.
If `qconfig_mapping` is None, the parent `qconfig_mapping` will be used instead.
If `prepare_custom_config` is None, an empty `PrepareCustomConfig` will be used.
If `backend_config` is None, the parent `backend_config` will be used instead.
"""
self.standalone_module_names[module_name] = \
StandaloneModuleConfigEntry(qconfig_mapping, example_inputs, prepare_custom_config, backend_config)
return self
def set_standalone_module_class(
self,
module_class: Type,
qconfig_mapping: Optional[QConfigMapping],
example_inputs: Tuple[Any, ...],
prepare_custom_config: Optional[PrepareCustomConfig],
backend_config: Optional[BackendConfig]) -> PrepareCustomConfig:
"""
Set the configuration for running a standalone module identified by `module_class`.
If `qconfig_mapping` is None, the parent `qconfig_mapping` will be used instead.
If `prepare_custom_config` is None, an empty `PrepareCustomConfig` will be used.
If `backend_config` is None, the parent `backend_config` will be used instead.
"""
self.standalone_module_classes[module_class] = \
StandaloneModuleConfigEntry(qconfig_mapping, example_inputs, prepare_custom_config, backend_config)
return self
def set_float_to_observed_mapping(
self,
float_class: Type,
observed_class: Type,
quant_type: QuantType = QuantType.STATIC) -> PrepareCustomConfig:
"""
Set the mapping from a custom float module class to a custom observed module class.
The observed module class must have a `from_float` class method that converts the float module class
to the observed module class. This is currently only supported for static quantization.
"""
if quant_type != QuantType.STATIC:
raise ValueError("set_float_to_observed_mapping is currently only supported for static quantization")
if quant_type not in self.float_to_observed_mapping:
self.float_to_observed_mapping[quant_type] = {}
self.float_to_observed_mapping[quant_type][float_class] = observed_class
return self
def set_non_traceable_module_names(self, module_names: List[str]) -> PrepareCustomConfig:
"""
Set the modules that are not symbolically traceable, identified by name.
"""
self.non_traceable_module_names = module_names
return self
def set_non_traceable_module_classes(self, module_classes: List[Type]) -> PrepareCustomConfig:
"""
Set the modules that are not symbolically traceable, identified by class.
"""
self.non_traceable_module_classes = module_classes
return self
def set_input_quantized_indexes(self, indexes: List[int]) -> PrepareCustomConfig:
"""
Set the indexes of the inputs of the graph that should be quantized.
Inputs are otherwise assumed to be in fp32 by default instead.
"""
self.input_quantized_indexes = indexes
return self
def set_output_quantized_indexes(self, indexes: List[int]) -> PrepareCustomConfig:
"""
Set the indexes of the outputs of the graph that should be quantized.
Outputs are otherwise assumed to be in fp32 by default instead.
"""
self.output_quantized_indexes = indexes
return self
def set_preserved_attributes(self, attributes: List[str]) -> PrepareCustomConfig:
"""
Set the names of the attributes that will persist in the graph module even if they are not used in
the model's `forward` method.
"""
self.preserved_attributes = attributes
return self
# TODO: remove this
@classmethod
def from_dict(cls, prepare_custom_config_dict: Dict[str, Any]) -> PrepareCustomConfig:
"""
Create a `PrepareCustomConfig` from a dictionary with the following items:
"standalone_module_name": a list of (module_name, qconfig_mapping, example_inputs,
child_prepare_custom_config, backend_config) tuples
"standalone_module_class" a list of (module_class, qconfig_mapping, example_inputs,
child_prepare_custom_config, backend_config) tuples
"float_to_observed_custom_module_class": a nested dictionary mapping from quantization
mode to an inner mapping from float module classes to observed module classes, e.g.
{"static": {FloatCustomModule: ObservedCustomModule}}
"non_traceable_module_name": a list of modules names that are not symbolically traceable
"non_traceable_module_class": a list of module classes that are not symbolically traceable
"input_quantized_idxs": a list of indexes of graph inputs that should be quantized
"output_quantized_idxs": a list of indexes of graph outputs that should be quantized
"preserved_attributes": a list of attributes that persist even if they are not used in `forward`
This function is primarily for backward compatibility and may be removed in the future.
"""
def _get_qconfig_mapping(obj: Any, dict_key: str) -> Optional[QConfigMapping]:
"""
Convert the given object into a QConfigMapping if possible, else throw an exception.
"""
if isinstance(obj, QConfigMapping) or obj is None:
return obj
if isinstance(obj, Dict):
return QConfigMapping.from_dict(obj)
raise ValueError("Expected QConfigMapping in prepare_custom_config_dict[\"%s\"], got '%s'" %
(dict_key, type(obj)))
def _get_prepare_custom_config(obj: Any, dict_key: str) -> Optional[PrepareCustomConfig]:
"""
Convert the given object into a PrepareCustomConfig if possible, else throw an exception.
"""
if isinstance(obj, PrepareCustomConfig) or obj is None:
return obj
if isinstance(obj, Dict):
return PrepareCustomConfig.from_dict(obj)
raise ValueError("Expected PrepareCustomConfig in prepare_custom_config_dict[\"%s\"], got '%s'" %
(dict_key, type(obj)))
def _get_backend_config(obj: Any, dict_key: str) -> Optional[BackendConfig]:
"""
Convert the given object into a BackendConfig if possible, else throw an exception.
"""
if isinstance(obj, BackendConfig) or obj is None:
return obj
if isinstance(obj, Dict):
return BackendConfig.from_dict(obj)
raise ValueError("Expected BackendConfig in prepare_custom_config_dict[\"%s\"], got '%s'" %
(dict_key, type(obj)))
conf = cls()
for (module_name, qconfig_dict, example_inputs, _prepare_custom_config_dict, backend_config_dict) in\
prepare_custom_config_dict.get(STANDALONE_MODULE_NAME_DICT_KEY, []):
qconfig_mapping = _get_qconfig_mapping(qconfig_dict, STANDALONE_MODULE_NAME_DICT_KEY)
prepare_custom_config = _get_prepare_custom_config(_prepare_custom_config_dict, STANDALONE_MODULE_NAME_DICT_KEY)
backend_config = _get_backend_config(backend_config_dict, STANDALONE_MODULE_NAME_DICT_KEY)
conf.set_standalone_module_name(
module_name, qconfig_mapping, example_inputs, prepare_custom_config, backend_config)
for (module_class, qconfig_dict, example_inputs, _prepare_custom_config_dict, backend_config_dict) in\
prepare_custom_config_dict.get(STANDALONE_MODULE_CLASS_DICT_KEY, []):
qconfig_mapping = _get_qconfig_mapping(qconfig_dict, STANDALONE_MODULE_CLASS_DICT_KEY)
prepare_custom_config = _get_prepare_custom_config(_prepare_custom_config_dict, STANDALONE_MODULE_CLASS_DICT_KEY)
backend_config = _get_backend_config(backend_config_dict, STANDALONE_MODULE_CLASS_DICT_KEY)
conf.set_standalone_module_class(
module_class, qconfig_mapping, example_inputs, prepare_custom_config, backend_config)
for quant_type_name, custom_module_mapping in prepare_custom_config_dict.get(FLOAT_TO_OBSERVED_DICT_KEY, {}).items():
quant_type = _quant_type_from_str(quant_type_name)
for float_class, observed_class in custom_module_mapping.items():
conf.set_float_to_observed_mapping(float_class, observed_class, quant_type)
conf.set_non_traceable_module_names(prepare_custom_config_dict.get(NON_TRACEABLE_MODULE_NAME_DICT_KEY, []))
conf.set_non_traceable_module_classes(prepare_custom_config_dict.get(NON_TRACEABLE_MODULE_CLASS_DICT_KEY, []))
conf.set_input_quantized_indexes(prepare_custom_config_dict.get(INPUT_QUANTIZED_INDEXES_DICT_KEY, []))
conf.set_output_quantized_indexes(prepare_custom_config_dict.get(OUTPUT_QUANTIZED_INDEXES_DICT_KEY, []))
conf.set_preserved_attributes(prepare_custom_config_dict.get(PRESERVED_ATTRIBUTES_DICT_KEY, []))
return conf
def to_dict(self) -> Dict[str, Any]:
"""
Convert this `PrepareCustomConfig` to a dictionary with the items described in
:func:`~torch.ao.quantization.fx.custom_config.PrepareCustomConfig.from_dict`.
"""
def _make_tuple(key: Any, e: StandaloneModuleConfigEntry):
qconfig_dict = e.qconfig_mapping.to_dict() if e.qconfig_mapping else None
prepare_custom_config_dict = e.prepare_custom_config.to_dict() if e.prepare_custom_config else None
return (key, qconfig_dict, e.example_inputs, prepare_custom_config_dict, e.backend_config)
d: Dict[str, Any] = {}
for module_name, sm_config_entry in self.standalone_module_names.items():
if STANDALONE_MODULE_NAME_DICT_KEY not in d:
d[STANDALONE_MODULE_NAME_DICT_KEY] = []
d[STANDALONE_MODULE_NAME_DICT_KEY].append(_make_tuple(module_name, sm_config_entry))
for module_class, sm_config_entry in self.standalone_module_classes.items():
if STANDALONE_MODULE_CLASS_DICT_KEY not in d:
d[STANDALONE_MODULE_CLASS_DICT_KEY] = []
d[STANDALONE_MODULE_CLASS_DICT_KEY].append(_make_tuple(module_class, sm_config_entry))
for quant_type, float_to_observed_mapping in self.float_to_observed_mapping.items():
if FLOAT_TO_OBSERVED_DICT_KEY not in d:
d[FLOAT_TO_OBSERVED_DICT_KEY] = {}
d[FLOAT_TO_OBSERVED_DICT_KEY][quant_type_to_str(quant_type)] = float_to_observed_mapping
if len(self.non_traceable_module_names) > 0:
d[NON_TRACEABLE_MODULE_NAME_DICT_KEY] = self.non_traceable_module_names
if len(self.non_traceable_module_classes) > 0:
d[NON_TRACEABLE_MODULE_CLASS_DICT_KEY] = self.non_traceable_module_classes
if len(self.input_quantized_indexes) > 0:
d[INPUT_QUANTIZED_INDEXES_DICT_KEY] = self.input_quantized_indexes
if len(self.output_quantized_indexes) > 0:
d[OUTPUT_QUANTIZED_INDEXES_DICT_KEY] = self.output_quantized_indexes
if len(self.preserved_attributes) > 0:
d[PRESERVED_ATTRIBUTES_DICT_KEY] = self.preserved_attributes
return d
class ConvertCustomConfig:
"""
Custom configuration for :func:`~torch.ao.quantization.quantize_fx.convert_fx`.
The user can set custom configuration using the following methods:
`set_observed_to_quantized_mapping`: sets the mapping from an observed module class to a quantized module class
`set_preserved_attributes`: sets the names of the attributes that will persist in the graph module even if they
are not used in the model's `forward` method
Example usage::
convert_custom_config = ConvertCustomConfig() \
.set_observed_to_quantized_mapping(ObservedCustomModule, QuantizedCustomModule) \
.set_preserved_attributes(["attr1", "attr2"])
"""
def __init__(self):
self.observed_to_quantized_mapping: Dict[QuantType, Dict[Type, Type]] = {}
self.preserved_attributes: List[str] = []
def set_observed_to_quantized_mapping(
self,
observed_class: Type,
quantized_class: Type,
quant_type: QuantType = QuantType.STATIC) -> ConvertCustomConfig:
"""
Set the mapping from a custom observed module class to a custom quantized module class.
The quantized module class must have a `from_observed` class method that converts the observed module class
to the quantized module class.
"""
if quant_type not in self.observed_to_quantized_mapping:
self.observed_to_quantized_mapping[quant_type] = {}
self.observed_to_quantized_mapping[quant_type][observed_class] = quantized_class
return self
def set_preserved_attributes(self, attributes: List[str]) -> ConvertCustomConfig:
"""
Set the names of the attributes that will persist in the graph module even if they are not used in
the model's `forward` method.
"""
self.preserved_attributes = attributes
return self
# TODO: remove this
@classmethod
def from_dict(cls, convert_custom_config_dict: Dict[str, Any]) -> ConvertCustomConfig:
"""
Create a `ConvertCustomConfig` from a dictionary with the following items:
"observed_to_quantized_custom_module_class": a nested dictionary mapping from quantization
mode to an inner mapping from observed module classes to quantized module classes, e.g.
{
"static": {FloatCustomModule: ObservedCustomModule},
"dynamic": {FloatCustomModule: ObservedCustomModule},
"weight_only": {FloatCustomModule: ObservedCustomModule}
}
"preserved_attributes": a list of attributes that persist even if they are not used in `forward`
This function is primarily for backward compatibility and may be removed in the future.
"""
conf = cls()
for quant_type_name, custom_module_mapping in convert_custom_config_dict.get(OBSERVED_TO_QUANTIZED_DICT_KEY, {}).items():
quant_type = _quant_type_from_str(quant_type_name)
for observed_class, quantized_class in custom_module_mapping.items():
conf.set_observed_to_quantized_mapping(observed_class, quantized_class, quant_type)
conf.set_preserved_attributes(convert_custom_config_dict.get(PRESERVED_ATTRIBUTES_DICT_KEY, []))
return conf
def to_dict(self) -> Dict[str, Any]:
"""
Convert this `ConvertCustomConfig` to a dictionary with the items described in
:func:`~torch.ao.quantization.fx.custom_config.ConvertCustomConfig.from_dict`.
"""
d: Dict[str, Any] = {}
for quant_type, observed_to_quantized_mapping in self.observed_to_quantized_mapping.items():
if OBSERVED_TO_QUANTIZED_DICT_KEY not in d:
d[OBSERVED_TO_QUANTIZED_DICT_KEY] = {}
d[OBSERVED_TO_QUANTIZED_DICT_KEY][quant_type_to_str(quant_type)] = observed_to_quantized_mapping
if len(self.preserved_attributes) > 0:
d[PRESERVED_ATTRIBUTES_DICT_KEY] = self.preserved_attributes
return d
class FuseCustomConfig:
"""
Custom configuration for :func:`~torch.ao.quantization.quantize_fx.fuse_fx`.
The user can set custom configuration using the following method:
`set_preserved_attributes`: sets the names of the attributes that will persist in the graph module
even if they are not used in the model's `forward` method
Example usage::
fuse_custom_config = FuseCustomConfig().set_preserved_attributes(["attr1", "attr2"])
"""
def __init__(self):
self.preserved_attributes: List[str] = []
def set_preserved_attributes(self, attributes: List[str]) -> FuseCustomConfig:
"""
Set the names of the attributes that will persist in the graph module even if they are not used in
the model's `forward` method.
"""
self.preserved_attributes = attributes
return self
# TODO: remove this
@classmethod
def from_dict(cls, fuse_custom_config_dict: Dict[str, Any]) -> FuseCustomConfig:
"""
Create a `ConvertCustomConfig` from a dictionary with the following items:
"preserved_attributes": a list of attributes that persist even if they are not used in `forward`
This function is primarily for backward compatibility and may be removed in the future.
"""
conf = cls()
conf.set_preserved_attributes(fuse_custom_config_dict.get(PRESERVED_ATTRIBUTES_DICT_KEY, []))
return conf
def to_dict(self) -> Dict[str, Any]:
"""
Convert this `FuseCustomConfig` to a dictionary with the items described in
:func:`~torch.ao.quantization.fx.custom_config.ConvertCustomConfig.from_dict`.
"""
d: Dict[str, Any] = {}
if len(self.preserved_attributes) > 0:
d[PRESERVED_ATTRIBUTES_DICT_KEY] = self.preserved_attributes
return d
| pytorch-master | torch/ao/quantization/fx/custom_config.py |
from torch.fx import (
GraphModule,
Node,
map_arg
)
from torch.fx.graph import Graph
from .graph_module import (
FusedGraphModule
)
from .match_utils import (
is_match,
MatchAllNode,
)
from .pattern_utils import (
sorted_patterns_dict,
)
from ..backend_config import (
BackendConfig,
get_native_backend_config,
)
from ..backend_config.utils import (
get_fuser_method_mapping,
get_fusion_pattern_to_root_node_getter,
get_fusion_pattern_to_extra_inputs_getter,
)
from .backend_config_utils import get_fusion_pattern_to_fuse_handler_cls
from .custom_config import FuseCustomConfig
from .fusion_patterns import * # noqa: F401,F403
from typing import Any, Callable, Dict, List, Tuple, Union
import warnings
from torch.ao.quantization.quantization_types import Pattern, NodePattern
__all__ = [
"fuse",
]
def fuse(
model: GraphModule,
is_qat: bool,
fuse_custom_config: Union[FuseCustomConfig, Dict[str, Any], None] = None,
backend_config: Union[BackendConfig, Dict[str, Any], None] = None,
) -> GraphModule:
if fuse_custom_config is None:
fuse_custom_config = FuseCustomConfig()
if isinstance(fuse_custom_config, Dict):
warnings.warn(
"Passing a fuse_custom_config_dict to fuse is deprecated and will not be supported "
"in a future version. Please pass in a FuseCustomConfig instead.")
fuse_custom_config = FuseCustomConfig.from_dict(fuse_custom_config)
if isinstance(backend_config, Dict):
warnings.warn(
"Passing a backend_config_dict to prepare is deprecated and will not be supported "
"in a future version. Please pass in a BackendConfig instead.")
backend_config = BackendConfig.from_dict(backend_config)
input_root = model
input_graph = model.graph
named_modules = dict(input_root.named_modules())
if backend_config is None:
backend_config = get_native_backend_config()
fusion_pattern_to_fuse_handler_cls = sorted_patterns_dict(get_fusion_pattern_to_fuse_handler_cls(backend_config))
fuser_method_mapping = get_fuser_method_mapping(backend_config)
fusion_pattern_to_root_node_getter = get_fusion_pattern_to_root_node_getter(backend_config)
fusion_pattern_to_extra_inputs_getter = get_fusion_pattern_to_extra_inputs_getter(backend_config)
# find fusion
fusion_pairs = _find_matches(
input_root, input_graph, fusion_pattern_to_fuse_handler_cls)
fused_graph = Graph()
env: Dict[Any, Any] = {}
def load_arg(a):
return map_arg(a, lambda node: env[node.name])
def default_root_node_getter(node_pattern):
while not isinstance(node_pattern[-1], Node):
node_pattern = node_pattern[-1]
return node_pattern[-1]
for node in input_graph.nodes:
maybe_last_node, pattern, matched_node_pattern, obj, node_to_subpattern = \
fusion_pairs.get(node.name, (None, None, None, None, None))
# get the corresponding subpattern for the current node
if node_to_subpattern is not None:
node_subpattern = node_to_subpattern.get(node, None)
else:
node_subpattern = None
if maybe_last_node is node:
assert obj is not None
root_node_getter = fusion_pattern_to_root_node_getter.get(pattern, default_root_node_getter)
root_node = root_node_getter(matched_node_pattern) # type: ignore[index]
extra_inputs_getter = fusion_pattern_to_extra_inputs_getter.get(pattern, None)
extra_inputs = []
if extra_inputs_getter is not None:
extra_inputs = extra_inputs_getter(matched_node_pattern)
# TODO: add validation that root_node is a module and has the same type
# as the root_module in the configuration
env[node.name] = obj.fuse(
load_arg, named_modules, fused_graph, root_node, extra_inputs, matched_node_pattern, # type: ignore[arg-type]
fuse_custom_config, fuser_method_mapping, is_qat)
elif maybe_last_node is None or node_subpattern is MatchAllNode:
env[node.name] = fused_graph.node_copy(node, load_arg)
# node matched in patterns and is not root is removed here
preserved_attributes = set(fuse_custom_config.preserved_attributes)
model = FusedGraphModule(input_root, fused_graph, preserved_attributes)
return model
def _find_matches(
root: GraphModule, graph: Graph,
patterns: Dict[Pattern, Callable]
) -> Dict[str, Tuple[Node, Pattern, NodePattern, FuseHandler, Dict[Node, Any]]]:
modules = dict(root.named_modules())
# node name -> (root_node, match_value)
match_map : Dict[
str, Tuple[Node, Pattern, NodePattern, FuseHandler, Dict[Node, Any]]] = {}
# a map from node to the matched subpattern
node_to_subpattern: Dict[Node, Any] = {}
# TODO: dedup with quantization matching function in match_utils.py
def apply_match(pattern, node, match, matched_node_pattern, node_to_subpattern):
if isinstance(pattern, tuple):
s, *args = pattern
current_node_pattern: List[Node] = []
apply_match(s, node, match, current_node_pattern, node_to_subpattern)
for subpattern, arg in zip(args, node.args):
apply_match(subpattern, arg, match, current_node_pattern, node_to_subpattern)
matched_node_pattern.append(tuple(current_node_pattern))
else:
# the first pattern matches will take precedence
if node.name not in match_map:
matched_node_pattern.append(node)
# MatchAllNode here is actually MatchAllInputNode which should not
# be added to match_map
if pattern is not MatchAllNode:
node_to_subpattern[node] = pattern
root_node, pattern, handler = match
match_map[node.name] = (root_node, pattern, matched_node_pattern, handler, node_to_subpattern)
for node in reversed(graph.nodes):
if node.name not in match_map:
for pattern, value in patterns.items():
matched_node_pattern: List[Node] = []
if is_match(modules, node, pattern):
apply_match(pattern, node, (node, pattern, value(node)), matched_node_pattern, node_to_subpattern)
break
return match_map
| pytorch-master | torch/ao/quantization/fx/fuse.py |
import sys
import torch
from torch.fx.graph import (
Graph,
Node,
)
from torch.ao.quantization.quantization_types import Pattern
from .quantization_patterns import (
QuantizeHandler,
)
from ..qconfig import (
QConfigAny,
)
from ..utils import (
MatchAllNode
)
from .graph_module import (
is_observed_standalone_module,
)
from torch.nn.utils.parametrize import type_before_parametrizations
from typing import Any, Dict, List, Callable, Optional, Tuple, Type, Set
# TODO: revisit this list. Many helper methods shouldn't be public
__all__ = [
"is_match",
"find_matches",
]
MatchResult = Tuple[Node, List[Node], Optional[Pattern], QuantizeHandler]
_MatchResultWithQConfig = Tuple[Node, List[Node], Optional[Pattern], QuantizeHandler,
QConfigAny]
# Note: The order of patterns is important! match function will take whatever is matched first, so we'll
# need to put the fusion patterns before single patterns. For example, add_relu should be registered come before relu.
# decorators are applied in the reverse order we see. Also when we match the nodes in the graph with these patterns,
# we'll start from the last node of the graph and traverse back.
def is_match(modules, node, pattern, max_uses=sys.maxsize):
""" Matches a node in fx against a pattern
"""
if isinstance(pattern, tuple):
self_match, *arg_matches = pattern
if self_match is getattr:
assert len(pattern) == 2, 'Expecting getattr pattern to have two elements'
arg_matches = []
else:
self_match = pattern
arg_matches = []
if isinstance(self_match, type) and issubclass(self_match, MatchAllNode):
return True
if not isinstance(node, Node) or len(node.users) > max_uses:
return False
if isinstance(self_match, type) and issubclass(self_match, torch.nn.Module):
if node.op != 'call_module':
return False
if not type_before_parametrizations(modules[node.target]) == self_match:
return False
elif callable(self_match):
if node.op != 'call_function' or node.target is not self_match:
return False
elif node.target is getattr:
if node.args[1] != pattern[1]:
return False
elif isinstance(self_match, str):
if node.op != 'call_method' or node.target != self_match:
return False
elif node.target != self_match:
return False
if not arg_matches:
return True
if len(arg_matches) != len(node.args):
return False
return all(is_match(modules, node, arg_match, max_uses=1) for node, arg_match in zip(node.args, arg_matches))
def find_matches(
graph: Graph,
modules: Dict[str, torch.nn.Module],
patterns: Dict[Pattern, QuantizeHandler],
root_node_getter_mapping: Dict[Pattern, Callable],
standalone_module_names: List[str] = None,
standalone_module_classes: List[Type] = None,
custom_module_classes: List[Any] = None) -> Dict[str, MatchResult]:
"""
Matches the nodes in the input graph to quantization patterns, and
outputs the information needed to quantize them in future steps.
Inputs:
- graph: an fx.Graph object
- modules: a mapping of fully qualified module name to instance,
for example, {'foo': ModuleFoo, ...}
- patterns: a mapping from a tuple of nodes in reverse order to
uninitialized QuantizeHandler subclass.
Outputs a map of
node_name ->
(node, matched_values, matched_pattern, QuantizeHandler instance,
qconfig)
For example, {
'relu_1': (relu_1, [relu_1], torch.nn.functional.relu,
<CopyNodeQuantizeHandler instance>, QConfig(...)),
...
}
"""
if custom_module_classes is None:
custom_module_classes = []
if standalone_module_classes is None:
standalone_module_classes = []
if standalone_module_names is None:
standalone_module_names = []
match_map: Dict[str, MatchResult] = {}
all_matched : Set[str] = set()
def _recursive_record_node_in_match_map(
last_node,
match_map,
node_pattern,
matched_node_pattern,
pattern,
match_value):
if isinstance(node_pattern, Node):
match_map[node_pattern.name] = (
last_node, matched_node_pattern, pattern, match_value)
else:
for n in node_pattern:
_recursive_record_node_in_match_map(last_node, match_map, n, matched_node_pattern, pattern, match_value)
# TODO: 1. merge with fuse matcher 2. document the code
def record_match(
pattern,
node,
last_node,
matched_node_pattern,
match_map):
if isinstance(pattern, tuple):
s, *args = pattern
current_node_pattern: List[Node] = []
record_match(
s,
node,
last_node,
matched_node_pattern,
match_map)
if pattern[0] is not getattr:
for subpattern, arg in zip(args, node.args):
record_match(
subpattern,
arg,
node,
current_node_pattern,
match_map)
if len(current_node_pattern) > 1:
matched_node_pattern.append(tuple(current_node_pattern))
else:
matched_node_pattern.append(current_node_pattern[0])
else:
matched_node_pattern.append(node)
for node in reversed(graph.nodes):
if node.name not in match_map and node.name not in all_matched:
for pattern, quantize_handler_cls in patterns.items():
root_node_getter = root_node_getter_mapping.get(pattern, None)
if is_match(modules, node, pattern) and node.name not in match_map:
matched_node_pattern: List[Node] = []
record_match(
pattern,
node,
node,
matched_node_pattern,
match_map)
quantize_handler = quantize_handler_cls( # type: ignore[operator]
matched_node_pattern,
modules,
root_node_getter)
last_node = node
# record the match for all nodes in the pattern
_recursive_record_node_in_match_map(
last_node,
match_map,
# we need to record all nodes in the matched pattern in the match_map
matched_node_pattern,
# this is a part of the value corresponding to the node
matched_node_pattern,
pattern,
quantize_handler)
break
# add custom module instances to the match result
assert modules is not None
for node in graph.nodes:
if node.op == 'call_module' and \
type(modules[node.target]) in custom_module_classes:
match_map[node.name] = (
node, node, None, QuantizeHandler(node, modules, is_custom_module=True))
def is_standalone_module(node_target: str, modules: Dict[str, torch.nn.Module]):
assert modules is not None
return (
node_target in standalone_module_names or # type: ignore[operator]
type(modules[node_target]) in standalone_module_classes # type: ignore[operator]
)
# add standalone modules to the match
for node in graph.nodes:
if node.op == 'call_module' and \
(is_standalone_module(node.target, modules) or
is_observed_standalone_module(modules[node.target])):
# add node to matched nodes
match_map[node.name] = (
node, node, None,
QuantizeHandler(node, modules, is_standalone_module=True))
return match_map
| pytorch-master | torch/ao/quantization/fx/match_utils.py |
import copy
import torch
import operator
import warnings
from torch.fx import (
GraphModule,
)
from torch.fx.graph import (
Graph,
Node,
)
from torch.fx.node import Argument
from ..quantize import (
propagate_qconfig_,
)
from ..observer import (
ObserverBase,
)
from ..qconfig import (
obs_or_fq_ctr_equals,
float16_dynamic_qconfig,
float16_static_qconfig,
is_reuse_input_qconfig,
QConfigAny,
)
from ..qconfig_mapping import (
_FIXED_QPARAMS_OP_TO_OBSERVER,
QConfigMapping,
)
from ..qconfig_mapping_utils import (
get_flattened_qconfig_dict,
update_qconfig_for_qat,
)
from .qconfig_utils import (
generate_qconfig_map,
update_qconfig_for_fusion,
)
from .quantization_patterns import (
QuantizeHandler,
)
from torch.ao.quantization.quantization_types import (
Pattern,
NodePattern,
)
from torch.ao.quantization import FixedQParamsFakeQuantize
from ._equalize import (
is_equalization_observer,
node_supports_equalization,
)
from .graph_module import (
ObservedGraphModule,
ObservedStandaloneGraphModule,
)
from .pattern_utils import (
sorted_patterns_dict,
)
from .match_utils import (
_MatchResultWithQConfig,
find_matches,
)
from ..utils import _parent_name
from .utils import (
get_custom_module_class_keys,
all_node_args_have_no_tensors,
assert_and_get_unique_device,
get_non_observable_arg_indexes_and_types,
get_new_attr_name_with_prefix,
NON_QUANTIZABLE_WEIGHT_OPS,
)
from torch.ao.quantization.quantize import (
is_activation_post_process,
convert
)
from ..utils import (
get_qconfig_dtypes,
get_swapped_custom_module_class,
activation_is_statically_quantized,
activation_is_int8_quantized,
)
from ..backend_config.utils import (
get_pattern_to_dtype_configs,
get_module_to_qat_module,
get_fusion_pattern_to_root_node_getter,
)
from ..backend_config import (
BackendConfig,
DTypeConfig,
get_native_backend_config,
)
from .backend_config_utils import (
get_pattern_to_quantize_handlers,
)
from .custom_config import (
PrepareCustomConfig,
StandaloneModuleConfigEntry,
)
from typing import Any, Dict, List, Optional, Set, Tuple, Type, Union
from collections import defaultdict
# TODO: revisit this list. Many helper methods shouldn't be public
__all__ = [
"DO_NOT_OBS_DTYPE_LIST",
"add_matched_node_name_to_set",
"get_arg_target_compute_dtype_as_input_to_node",
"get_arg_target_dtype_as_input_to_node",
"get_arg_target_dtype_as_output",
"get_target_activation_dtype_for_node",
"get_standalone_module_configs",
"insert_observer",
"insert_observers_for_model",
"is_activation_post_process_node",
"is_input_arg_dtype_supported_by_backend",
"is_observer_in_same_graph",
"is_output_dtype_supported_by_backend",
"is_pattern_dtype_config_supported_by_backend",
"maybe_insert_input_equalization_observers_for_node",
"maybe_insert_input_observer_for_arg_or_kwarg",
"maybe_insert_input_observers_for_node",
"maybe_insert_observers_before_graph_output",
"maybe_insert_output_observer_for_node",
"maybe_make_input_output_share_observers",
"maybe_propagate_dtype_for_node",
"node_arg_is_bias",
"node_arg_is_weight",
"prepare",
"propagate_dtypes_for_known_nodes",
"qat_swap_modules",
"remove_output_observer",
"run_prepare_fx_on_standalone_modules",
"save_state",
"swap_custom_module_to_observed",
]
# list of dtypes to not add observers to
DO_NOT_OBS_DTYPE_LIST = [int, float, torch.bool, None]
def is_activation_post_process_node(node: Node, modules: Dict[str, torch.nn.Module]) -> bool:
return isinstance(node, torch.fx.Node) and node.op == "call_module" and \
is_activation_post_process(modules[str(node.target)])
def node_arg_is_weight(node: Node, arg: Any, backend_config: BackendConfig) -> bool:
if isinstance(node, Node) and node.op == "call_function" and node.target in backend_config.configs:
weight_index = backend_config.configs[node.target]._input_type_to_index.get("weight")
if weight_index is not None and weight_index < len(node.args) and node.args[weight_index] is arg:
return True
return node.kwargs.get("weight") is arg
return False
def node_arg_is_bias(node: Node, arg: Any, backend_config: BackendConfig) -> bool:
if isinstance(node, Node) and node.op == "call_function" and node.target in backend_config.configs:
bias_index = backend_config.configs[node.target]._input_type_to_index.get("bias")
if bias_index is not None and bias_index < len(node.args) and node.args[bias_index] is arg:
return True
return node.kwargs.get("bias") is arg
return False
def is_input_arg_dtype_supported_by_backend(
arg: Argument,
node: Node,
node_name_to_target_dtype: Dict[str, Dict[str, Optional[Union[torch.dtype, type]]]],
dtype_config: DTypeConfig,
backend_config: BackendConfig,
) -> bool:
""" Check if the configured qconfig for the argument
is supported by the backend or not
"""
if isinstance(arg, (list, tuple)):
return all(is_input_arg_dtype_supported_by_backend(a, node, node_name_to_target_dtype,
dtype_config, backend_config) for a in arg)
if not isinstance(arg, Node):
return True
# TODO: support check for standalone module
is_weight = node_arg_is_weight(node, arg, backend_config)
is_bias = node_arg_is_bias(node, arg, backend_config)
is_activation = not is_weight and not is_bias
if is_activation:
is_dynamic = dtype_config.is_dynamic
if is_dynamic:
input_activation_dtype = dtype_config.input_dtype
# TODO: change this after the is_dynamic refactor is landed
compute_dtype = node_name_to_target_dtype[node.name].get("input_activation_compute_dtype", None)
return input_activation_dtype is None or \
compute_dtype == input_activation_dtype
else:
input_activation_dtype = dtype_config.input_dtype
return input_activation_dtype is None or \
node_name_to_target_dtype[node.name]["input_activation_dtype"] == input_activation_dtype
elif is_weight:
weight_dtype = dtype_config.weight_dtype
return weight_dtype is None or node_name_to_target_dtype[node.name]["weight_dtype"] == weight_dtype
else: # bias
bias_dtype = dtype_config.bias_dtype
return bias_dtype is None or node_name_to_target_dtype[node.name]["bias_dtype"] == bias_dtype
def is_output_dtype_supported_by_backend(
node: Node,
node_name_to_target_dtype: Dict[str, Dict[str, Optional[Union[torch.dtype, type]]]],
dtype_config: DTypeConfig,
) -> bool:
""" Check if the configured qconfig for the output
is supported by the backend or not
"""
output_dtype = dtype_config.output_dtype
return output_dtype is None or \
output_dtype == node_name_to_target_dtype[node.name]["output_activation_dtype"]
def is_observer_in_same_graph(node, modules, node_name_to_target_dtype):
""" Check if observer in same graph
when the node output is not fp32 and input is 'placeholder'
the input is assumed to be quantized, so it is observed
in a different place rather than not observed.
"""
node_output_dtype = get_arg_target_dtype_as_output(node, modules, node_name_to_target_dtype)
if len(node.args) > 0 and isinstance(node.args[0], Node):
if node_output_dtype == torch.quint8 and node.args[0].op == 'placeholder':
return False
return True
def is_pattern_dtype_config_supported_by_backend(
pattern: Optional[Pattern],
matched_node_pattern: Optional[NodePattern],
node_name_to_target_dtype: Dict[str, Dict[str, Optional[Union[torch.dtype, type]]]],
backend_config: BackendConfig,
) -> bool:
""" Check is the dtype configuration of a pattern is supported by
the backend or not
"""
if backend_config is None or pattern is None:
return True
assert matched_node_pattern is not None and len(matched_node_pattern) >= 1
pattern_to_dtype_configs = get_pattern_to_dtype_configs(backend_config)
dtype_configs: List[DTypeConfig] = pattern_to_dtype_configs.get(pattern, [])
# TODO: this only works for one input and one output patterns, need to generalize to multiple
# inputs/output
root_node = _default_root_node_getter(matched_node_pattern)
input_node = root_node
output_node = matched_node_pattern[0]
for dtype_config in dtype_configs:
# check if arg dtype are supported
supported = True
for arg in input_node.args:
supported = supported and \
is_input_arg_dtype_supported_by_backend(
arg, input_node, node_name_to_target_dtype, dtype_config, backend_config)
for k, arg in input_node.kwargs.items():
supported = supported and \
is_input_arg_dtype_supported_by_backend(
arg, input_node, node_name_to_target_dtype, dtype_config, backend_config)
# check if output dtype is supported
supported = supported and is_output_dtype_supported_by_backend(
output_node, node_name_to_target_dtype, dtype_config)
if supported:
return True
return False
def get_standalone_module_configs(
node: Node,
modules: Dict[str, torch.nn.Module],
prepare_custom_config: PrepareCustomConfig,
parent_qconfig: QConfigAny,
parent_backend_config: Optional[BackendConfig],
) -> Tuple[QConfigMapping, Tuple[Any, ...], PrepareCustomConfig, Optional[BackendConfig]]:
"""
Returns the standalone module QConfigMapping and PrepareCustomConfig
for `node`, assuming that the module pointed to by `node` is
a standalone modules.
"""
module_name = str(node.target)
module_type = type(modules[module_name]) # type: ignore[index]
# name config has precedence over type config
config_entry = StandaloneModuleConfigEntry(None, (), None, None)
config_entry = prepare_custom_config.standalone_module_classes.get(module_type, config_entry)
config_entry = prepare_custom_config.standalone_module_names.get(module_name, config_entry)
# fallback to use parent module's qconfig if user didn't specify qconfig dict
qconfig_mapping = config_entry.qconfig_mapping or QConfigMapping().set_global(parent_qconfig)
example_inputs = config_entry.example_inputs
prepare_custom_config = config_entry.prepare_custom_config or PrepareCustomConfig()
backend_config = config_entry.backend_config or parent_backend_config
return (qconfig_mapping, example_inputs, prepare_custom_config, backend_config)
def qat_swap_modules(
root: torch.nn.Module,
module_to_qat_module: Dict[Pattern, Type[torch.nn.Module]]) -> None:
convert(root, mapping=module_to_qat_module, inplace=True, remove_qconfig=False)
def add_matched_node_name_to_set(matched_node_pattern: NodePattern, s: Set[str]):
if isinstance(matched_node_pattern, Node):
s.add(matched_node_pattern.name)
elif isinstance(matched_node_pattern, (list, tuple)):
for maybe_node in matched_node_pattern:
add_matched_node_name_to_set(maybe_node, s)
# this is temporary, will be removed soon
def _default_root_node_getter(node_pattern):
while not isinstance(node_pattern, Node):
node_pattern = node_pattern[-1]
return node_pattern
def insert_observer(
node: Node,
observer: ObserverBase,
model: torch.nn.Module,
modules: Dict[str, torch.nn.Module],
graph: Graph,
) -> Node:
"""
Attaches `observer` to `model`, and creates a node which calls
`observer` on the output of `node`.
"""
model_device = assert_and_get_unique_device(model)
if model_device:
observer.to(model_device)
# add observer module as attribute
if is_equalization_observer(observer):
prefix = node.name + '_equalization_process_'
else:
prefix = 'activation_post_process_'
get_new_observer_name = get_new_attr_name_with_prefix(prefix)
observer_name = get_new_observer_name(model)
setattr(model, observer_name, observer)
modules[observer_name] = observer
with graph.inserting_after(node):
new_obs = graph.create_node(
'call_module', observer_name, (node,), {})
return new_obs
def get_target_activation_dtype_for_node(
node: Node,
qconfig: QConfigAny,
inputs_seen_counter: int,
outputs_seen_counter: int,
input_quantized_idxs: List[int],
output_quantized_idxs: List[int],
qhandler: Optional[QuantizeHandler],
modules: Dict[str, torch.nn.Module],
cache_for_no_tensor_check: Dict[Node, bool],
) -> Dict[str, Optional[Union[torch.dtype, type]]]:
"""
Returns the expected dtype of the input and output of this node after
convert. If the value is not None, it represents the dtype of the
Tensor. If the value is None, it means the value is not a Tensor.
Note: this is for activations only, weight dtypes are not handled here.
TODO(future PR, if needed): explicitly spell out the non-Tensor
dtypes.
"""
if node.op == 'placeholder':
if inputs_seen_counter in input_quantized_idxs:
return {
"input_activation_dtype": torch.quint8,
"output_activation_dtype": torch.quint8,
}
else:
# if dtype is fp32 (default), do nothing
# note: other dtypes are not supported
return {
"input_activation_dtype": torch.float,
"output_activation_dtype": torch.float,
}
elif node.op in ('call_module', 'call_method', 'call_function'):
args_have_no_tensors = \
all_node_args_have_no_tensors(
node, modules, cache_for_no_tensor_check)
if args_have_no_tensors:
return {
"input_activation_dtype": None,
"output_activation_dtype": None,
}
# TODO(future PR): consider stopping matching getitem
is_getitem = node.op == 'call_function' and \
node.target == operator.getitem
if is_getitem:
return {
"input_activation_dtype": torch.float,
"output_activation_dtype": torch.float,
}
# get qconfig to determine the eventual dtype of this node
if qconfig is not None:
if qhandler is not None and qhandler.input_output_observed():
act_dtype, weight_dtype, act_compute_dtype = \
get_qconfig_dtypes(qconfig)
bias_dtype = torch.float16 \
if act_dtype == torch.float16 and weight_dtype == torch.float16 \
else torch.float
return {
"input_activation_dtype": act_dtype,
"input_activation_compute_dtype": act_compute_dtype,
"weight_dtype": weight_dtype,
"bias_dtype": bias_dtype,
"output_activation_dtype": act_dtype,
}
return {
"input_activation_dtype": torch.float,
"output_activation_dtype": torch.float,
}
elif node.op == 'get_attr':
return {
"input_activation_dtype": torch.float,
"output_activation_dtype": torch.float,
}
elif node.op == 'output':
if outputs_seen_counter in output_quantized_idxs:
return {
"input_activation_dtype": torch.quint8,
"output_activation_dtype": torch.quint8
}
else:
# if dtype is fp32 (default), do nothing
# note: other dtypes are not supported
return {
"input_activation_dtype": torch.float,
"output_activation_dtype": torch.float,
}
else:
raise AssertionError(f'need to handle {node.format_node()}')
def get_arg_target_dtype_as_output(
arg: Node,
modules: Dict[str, torch.nn.Module],
node_name_to_target_dtype: Dict[str, Dict[str, Optional[Union[torch.dtype, type]]]],
) -> Optional[Union[torch.dtype, type]]:
""" Get the target output activation dtype for
the argumnet in the original graph, skipping inserted observers
We are assuming that the observers are inserted correctly, and the dtype for
argument in quantized graph will match what is specified by the qconfig
"""
assert isinstance(arg, Node)
if is_activation_post_process_node(arg, modules):
observed_arg = arg.args[0]
assert isinstance(observed_arg, Node), "Currently we only support observing Node"
return node_name_to_target_dtype[observed_arg.name]["output_activation_dtype"]
else:
return node_name_to_target_dtype[arg.name]["output_activation_dtype"]
def get_arg_target_dtype_as_input_to_node(
arg: Node,
node: Node,
modules: Dict[str, torch.nn.Module],
node_name_to_target_dtype: Dict[str, Dict[str, Optional[Union[torch.dtype, type]]]],
backend_config: BackendConfig,
) -> Optional[Union[torch.dtype, type]]:
""" Get the target argument dtype for the argument `arg`, as input
to node `node`
"""
assert isinstance(arg, Node)
is_weight = node_arg_is_weight(node, arg, backend_config)
is_bias = node_arg_is_bias(node, arg, backend_config)
is_activation = not is_weight and not is_bias
if is_activation:
return node_name_to_target_dtype[node.name]["input_activation_dtype"]
elif is_weight:
if node.target in NON_QUANTIZABLE_WEIGHT_OPS:
return None
else:
return node_name_to_target_dtype[node.name]["weight_dtype"]
else:
return node_name_to_target_dtype[node.name]["bias_dtype"]
def get_arg_target_compute_dtype_as_input_to_node(
arg: Node,
node: Node,
modules: Dict[str, torch.nn.Module],
node_name_to_target_dtype: Dict[str, Dict[str, Union[torch.dtype, type, None]]],
backend_config: BackendConfig,
) -> Union[torch.dtype, type, None]:
""" Get the target argument dtype for the argument `arg`, as input
to node `node`
"""
assert isinstance(arg, Node)
is_weight = node_arg_is_weight(node, arg, backend_config)
is_bias = node_arg_is_bias(node, arg, backend_config)
is_activation = not is_weight and not is_bias
if is_activation and \
"input_activation_compute_dtype" in node_name_to_target_dtype[node.name]:
return node_name_to_target_dtype[node.name]["input_activation_compute_dtype"]
else:
return None
def maybe_insert_input_observer_for_arg_or_kwarg(
node: Union[Node, Any],
arg: Argument,
qconfig: QConfigAny,
model: torch.nn.Module,
modules: Dict[str, torch.nn.Module],
graph: Graph,
node_name_to_target_dtype: Dict[str, Dict[str, Optional[Union[torch.dtype, type]]]],
qhandler: Optional[QuantizeHandler],
prepare_custom_config: PrepareCustomConfig,
backend_config: BackendConfig,
) -> Argument:
"""
Given a `node` and an `arg`, inserts an input observer between
`node` and `arg` if necessary.
"""
# for ops such as torch.cat([x0, x1]),
# traverse through the list
if isinstance(arg, (list, tuple)):
new_arg_to_return = []
for inner_arg in arg:
new_inner_arg = maybe_insert_input_observer_for_arg_or_kwarg(
node, inner_arg, qconfig, model, modules,
graph, node_name_to_target_dtype,
qhandler,
prepare_custom_config,
backend_config)
new_arg_to_return.append(new_inner_arg)
return type(arg)(new_arg_to_return)
if not isinstance(arg, Node):
return arg
assert isinstance(arg, Node)
# default (no observer)
new_arg = arg
is_standalone_module = qhandler is not None and qhandler.is_standalone_module()
assert qconfig is not None
if not is_standalone_module:
# regular flow for most nodes, except standalone modules
is_weight = node_arg_is_weight(node, arg, backend_config)
is_reuse_input_qconfig_ = is_reuse_input_qconfig(qconfig)
act_post_process_ctr = qconfig.weight if is_weight else \
qconfig.activation
arg_as_output_target_dtype = get_arg_target_dtype_as_output(arg, modules, node_name_to_target_dtype)
arg_as_input_target_dtype = get_arg_target_dtype_as_input_to_node(arg,
node,
modules,
node_name_to_target_dtype,
backend_config)
arg_as_input_target_compute_dtype = \
get_arg_target_compute_dtype_as_input_to_node(
arg, node, modules, node_name_to_target_dtype, backend_config)
needs_obs = (
# if the dtypes are different, we need an observer
(arg_as_output_target_dtype != arg_as_input_target_dtype) and
# except if the second dtype is float, a dequant will be inserted
# without an observer in convert
# TODO(future PR): change this so a placeholder is inserted for
# future dequants, to make the logic easier to understand
(arg_as_input_target_dtype != torch.float) and
# if arg output dtype is in DO_NOT_OBS_DTYPE_LIST do not insert observer
(arg_as_output_target_dtype not in DO_NOT_OBS_DTYPE_LIST) and
# if qconfig is reuse_input qconfig, we won't insert extra observer for input
not is_reuse_input_qconfig_ or
# need to add input observer for dynamic quantization
# only add observer for first input for now, we may need to extend
# qconfig_dict and backend_config to support more general configurations
# of dynamic quantization, e.g. dynamically quantizing second input, third
# input etc.
(arg_as_input_target_compute_dtype in [torch.quint8, torch.int8, torch.float16]) and arg is node.args[0]
)
else:
# custom flow for standalone modules
_, _, sm_prepare_custom_config, _ = \
get_standalone_module_configs(
node, modules, prepare_custom_config, qconfig, backend_config)
sm_input_quantized_idxs = sm_prepare_custom_config.input_quantized_indexes
# for args, this is set to the index of the current arg
# for kwargs, this is left at None
cur_input_idx = None
for arg_idx, arg_to_check in enumerate(node.args):
if arg_to_check is arg:
cur_input_idx = arg_idx
break
if cur_input_idx is None:
needs_obs = False
else:
arg_as_output_target_dtype = get_arg_target_dtype_as_output(arg, modules, node_name_to_target_dtype)
arg_as_input_target_dtype = torch.quint8 if cur_input_idx in sm_input_quantized_idxs \
else torch.float
needs_obs = (
(arg_as_output_target_dtype != arg_as_input_target_dtype) and
(arg_as_input_target_dtype != torch.float)
)
act_post_process_ctr = qconfig.activation
if needs_obs:
new_obs_mod = act_post_process_ctr()
existing_obs_node = None
# Before using the new observer, check if an observer
# of the correct type already exists. If it does, use it.
# This prevents duplicate observer insertions if a node is
# used by multiple nodes.
# TODO: this is looking into how the value is used in the future
# we should remove this
# removing this means we insert one observer for each use, even if they
# have the same dtype, we can have an extra pass that removes the extra observers
for maybe_obs_node, _ in arg.users.items():
if maybe_obs_node.op == 'call_module':
maybe_obs_mod = modules[maybe_obs_node.target] # type: ignore[index]
if (
type(maybe_obs_mod) == type(new_obs_mod) and
maybe_obs_mod.dtype == arg_as_input_target_dtype
):
existing_obs_node = maybe_obs_node
break
if existing_obs_node is None:
new_obs_node = insert_observer(
arg, new_obs_mod, model, modules, graph)
# override this arg to be the observed arg
new_arg = new_obs_node
else:
new_arg = existing_obs_node
return new_arg
def maybe_insert_input_observers_for_node(
node: Node,
qconfig: QConfigAny,
model: torch.nn.Module,
modules: Dict[str, torch.nn.Module],
graph: Graph,
node_name_to_target_dtype: Dict[str, Dict[str, Optional[Union[torch.dtype, type]]]],
qhandler: Optional[QuantizeHandler],
prepare_custom_config: PrepareCustomConfig,
backend_config: BackendConfig,
) -> None:
"""
If needed, inserts observers to the input args and kwargs of `node`.
Note: modifies `node` inplace.
For example, if cur_node needs an observer after prev_node, we change from
prev_node -> cur_node
To
prev_node -> obs -> cur_node
"""
if qconfig is None:
# if quantization is turned off for this node, we do not need
# to insert input observers
return
assert qconfig is not None
# Look through every input arg. If that arg's target dtype does not
# match the current node's target dtype, insert an observer.
new_args = []
for arg in node.args:
new_arg = maybe_insert_input_observer_for_arg_or_kwarg(
node, arg, qconfig, model, modules, graph,
node_name_to_target_dtype,
qhandler,
prepare_custom_config,
backend_config)
new_args.append(new_arg)
new_kwargs = {}
for k, kwarg in node.kwargs.items():
new_kwarg = maybe_insert_input_observer_for_arg_or_kwarg(
node, kwarg, qconfig, model, modules, graph,
node_name_to_target_dtype,
qhandler,
prepare_custom_config,
backend_config)
new_kwargs[k] = new_kwarg
# assign the new args and kwargs to the node, inplace
node.args = tuple(new_args)
node.kwargs = new_kwargs
def maybe_insert_input_equalization_observers_for_node(
node: Node,
equalization_qconfig: Any,
model: torch.nn.Module,
modules: Dict[str, torch.nn.Module],
graph: Graph,
node_name_to_target_dtype: Dict[str, Dict[str, Optional[Union[torch.dtype, type]]]],
is_branch: bool,
backend_config: BackendConfig,
) -> None:
"""
If `node` needs to be equalized, find the input/weight observers it needs in
`equalization_qconfig`, creates them, and inserts it into `graph`.
If `node` does not need an equalization observer, returns None.
"""
if equalization_qconfig is None or not node_supports_equalization(node, modules):
return
if is_branch:
warnings.warn(
f"Cannot equalize {node} because it is part of a branch."
)
return
new_args = []
for arg in node.args:
if not isinstance(arg, Node) or node_arg_is_bias(node, arg, backend_config):
new_args.append(arg)
continue
is_weight = node_arg_is_weight(node, arg, backend_config)
act_eq_process_ctr = equalization_qconfig.weight if is_weight else \
equalization_qconfig.input_activation
new_eq_obs_mod = act_eq_process_ctr()
new_eq_obs_node = insert_observer(
arg, new_eq_obs_mod, model, modules, graph)
new_args.append(new_eq_obs_node)
# assign the new args and kwargs to the node, inplace
node.args = tuple(new_args)
def maybe_insert_output_observer_for_node(
node: Node,
model: torch.nn.Module,
modules: Dict[str, torch.nn.Module],
graph: Graph,
matches: Dict[str, _MatchResultWithQConfig],
node_name_to_target_dtype: Dict[str, Dict[str, Optional[Union[torch.dtype, type]]]],
matched_pattern: Any,
qhandler: Optional[QuantizeHandler],
is_qat: bool,
) -> Optional[Node]:
"""
If `node` needs an output observer, creates it, inserts it into `graph`
and returns it.
If `node` does not need an output observer, returns None.
"""
root_node, _, pattern, qhandler, qconfig = matches.get(
node.name, (None, None, None, None, None))
if qhandler is None:
return None
assert qconfig is not None
assert node.op != 'output', 'observer insertion for outputs is handled elsewhere'
is_standalone_module = qhandler is not None and qhandler.is_standalone_module()
dtype = node_name_to_target_dtype[node.name]["output_activation_dtype"]
should_insert_observer = dtype not in DO_NOT_OBS_DTYPE_LIST + [torch.float]
# TODO(future PR): move the following logic to
# should_insert_observer_for_output
should_insert_observer = should_insert_observer and \
activation_is_statically_quantized(qconfig)
# we never insert observers to output of standalone module, we assume
# if needed, they are inserted inside the standalone module
should_insert_observer = should_insert_observer and \
(not is_standalone_module)
if should_insert_observer:
act_post_process_ctr = qconfig.activation
if activation_is_int8_quantized(qconfig):
act_post_process_ctr = qhandler.get_activation_ctr(
qconfig,
matched_pattern,
is_qat)
observer = act_post_process_ctr()
new_obs = insert_observer(node, observer, model, modules, graph)
return new_obs
else:
return None
def maybe_insert_observers_before_graph_output(
graph_output_node: Node,
output_quantized_idxs: List[int],
node_name_to_target_dtype: Dict[str, Dict[str, Optional[Union[torch.dtype, type]]]],
qconfig_map: Dict[str, QConfigAny],
model: torch.nn.Module,
modules: Dict[str, torch.nn.Module],
graph: Graph,
) -> None:
"""
If the output needs to be quantized and there are any nodes
in the output which are not already observed, inserts observers
for those nodes.
"""
# TODO(future PR): update the output_quantized_idxs API to match
# arbitrary data structures. There is always a single output, and
# that output can have arbitrary nesting of values. List[int] is
# not the right data type for this.
assert output_quantized_idxs == [0] or output_quantized_idxs == [], \
'unrecognized format of output_quantized_idxs'
# Currently dequants are inserted in the convert step. So, we only
# have to do anything if the output is hardcoded to be quantized
if output_quantized_idxs == []:
return
# TODO(future PR): support more dtypes in model outputs, if necessary
output_target_dtype = torch.quint8
def _recursive_maybe_replace_node_with_obs(
maybe_node: Argument,
target_dtype: torch.dtype,
node_name_to_target_dtype: Dict[str, Dict[str, Optional[Union[torch.dtype, type]]]],
qconfig_map: Dict[str, QConfigAny],
model: torch.nn.Module,
modules: Dict[str, torch.nn.Module],
graph: Graph,
) -> Argument:
"""
Navigate an arbitrary data structure of lists, tuples, dicts.
For each container type, recurse on all inputs. Once any Node
is found, insert an observer if needed and do not recurse further.
For example, given a structure of
{'foo1': [[bar1]], 'foo2': {'foo3': [[[bar3]]]}}
we recurse down to bar1 and bar3, observe them if necessary,
and if we inserted an observer then replace the original node
with its observer.
Returns the data structure with all nodes needing observation being
replaced by their observers.
"""
if isinstance(maybe_node, Node):
# check dtype of this node
this_node_dtype = get_arg_target_dtype_as_output(
maybe_node, modules, node_name_to_target_dtype)
if this_node_dtype != target_dtype:
# insert observer
qconfig = qconfig_map.get(maybe_node.name)
# TODO(future PR): see if we need to allow specifying qconfig
# on output nodes, to remove the restriction below.
assert qconfig is not None, \
'Quantizing the output node without a qconfig is not supported'
observer_mod = qconfig.activation()
observer_node = insert_observer(
maybe_node, observer_mod, model, modules, graph)
return observer_node
else:
return maybe_node
elif isinstance(maybe_node, (list, tuple)):
results = []
for inner_node in maybe_node:
results.append(_recursive_maybe_replace_node_with_obs(
inner_node, target_dtype, node_name_to_target_dtype,
qconfig_map, model, modules, graph))
if isinstance(maybe_node, list):
return results
else:
return tuple(results)
elif isinstance(maybe_node, dict):
results_dict = {}
for k, inner_v in maybe_node.items():
results_dict[k] = _recursive_maybe_replace_node_with_obs(
inner_v, target_dtype, node_name_to_target_dtype,
qconfig_map, model, modules, graph)
return results_dict
else:
return results
new_args = []
for old_arg in graph_output_node.args:
new_args.append(
_recursive_maybe_replace_node_with_obs(
old_arg, output_target_dtype, node_name_to_target_dtype,
qconfig_map, model, modules, graph))
graph_output_node.args = tuple(new_args) # type: ignore[assignment]
def maybe_propagate_dtype_for_node(
node: Node,
target_dtype: Union[torch.dtype, type],
node_name_to_target_dtype: Dict[str, Dict[str, Optional[Union[torch.dtype, type]]]],
matches: Dict[str, _MatchResultWithQConfig],
) -> None:
"""
Assigns `target_dtype` to `node`. If `node` is a general tensor shape op
(see GeneralTensorShapeOpQuantizeHandler in quantization_patterns.py for more details)
also call this function recursively on
the first argument, to propagate the dtype to the caller.
"""
node_name_to_target_dtype[node.name]["input_activation_dtype"] = target_dtype
node_name_to_target_dtype[node.name]["output_activation_dtype"] = target_dtype
# if this is a copy node, propagate to first arg
root_node, _, pattern, qhandler, qconfig = matches.get(
node.name, (None, None, None, None, None))
if qhandler is not None and qhandler.is_general_tensor_value_op():
prev_node = node.args[0]
if isinstance(prev_node, Node):
maybe_propagate_dtype_for_node(
prev_node, target_dtype, node_name_to_target_dtype, matches)
def propagate_dtypes_for_known_nodes(
graph: Graph,
node_name_to_target_dtype: Dict[str, Dict[str, Optional[Union[torch.dtype, type]]]],
matches: Dict[str, _MatchResultWithQConfig],
) -> None:
"""
Currently we assume that inputs to the graph are either `torch.float` or
`torch.quint8`, which is not always correct. For ops such as
`x.masked_fill(mask, value)`, we know that the dtype of `mask` is a
`BoolTensor`. Propagate this information throughout the graph.
Note: not all dtypes in the graph will be correct after this pass, but a
higher percentage of them will be correct. Hopefully in the future we can
replace this with a better way to reason about dtypes of tensors.
"""
for node in graph.nodes:
non_observable_arg_dict = get_non_observable_arg_indexes_and_types(node)
for arg_type in non_observable_arg_dict:
non_observable_indices = non_observable_arg_dict[arg_type](node)
for index in non_observable_indices:
arg = node.args[index]
# when an argument is a tuple, it does not show up as another node so we need to go through
# all elements of the tuple manually
if isinstance(arg, tuple) or isinstance(arg, list):
arg_list = list(arg)
else:
arg_list = [arg]
for cur_arg in arg_list:
# hard coded arguments show up but aren't `Node` typed and do not need dtype propgated
if isinstance(cur_arg, torch.fx.node.Node):
maybe_propagate_dtype_for_node(
cur_arg, arg_type, node_name_to_target_dtype, matches)
def maybe_make_input_output_share_observers(
node: Node,
model: torch.nn.Module,
modules: Dict[str, torch.nn.Module],
) -> bool:
"""
Ensures that we share an observer
for all input arguments as well as the output argument. In detail, given
a graph of
x0 -> obs0 -> op -> x2
/
x1 -> obs1 /
where node obs0 points to observer instance observer0,
obs1 points to observer1 and obs2 points to observer2, we make nodes obs1
and ob2 point to observer0.
Returns: whether the operation succeeded or not
"""
first_arg = None
# find the first non-Tensor arg
for i in range(len(node.args)):
if isinstance(node.args[i], (Node, list, tuple)):
first_arg = node.args[i]
break
# if there is no non-Tensor arg, return directly
if first_arg is None:
return False
if isinstance(first_arg, (list, tuple)):
first_arg_arg = first_arg[0]
elif isinstance(first_arg, Node):
first_arg_arg = first_arg
else:
return False
# if we have a graph such as
# observed_node -> non_observed_node -> cat
# we need to navigate up to the first observer
iteration_guard = 0
while not is_activation_post_process_node(first_arg_arg, modules):
if not isinstance(first_arg_arg, Node):
return False
# did not find an activation_post_process for the op
if first_arg_arg.op == "placeholder":
return False
# trace back the args until we found the first Tensor/Node
trace_back_node = None
for i in range(len(first_arg_arg.args)):
trace_back_node = first_arg_arg.args[i]
if isinstance(trace_back_node, Node):
break
if trace_back_node is None:
return False
first_arg_arg = trace_back_node
iteration_guard += 1
if iteration_guard > 10000:
raise AssertionError('Unable to find observer of previous node')
assert isinstance(first_arg_arg, Node)
target_to_use = first_arg_arg.target
assert isinstance(target_to_use, str)
obs_mod_to_use = modules[target_to_use]
if isinstance(first_arg, (list, tuple)):
# set all other input observer nodes to use that module
for input_idx, input_arg in enumerate(first_arg):
if input_idx == 0:
continue
iteration_guard = 0
while not is_activation_post_process_node(input_arg, modules):
# failed to trace back since no input arg for the current node
if len(input_arg.args) < 1:
return False
input_arg = input_arg.args[0]
iteration_guard += 1
if iteration_guard > 10000:
raise AssertionError('Unable to find observer of previous node')
parent_name, name = _parent_name(input_arg.target)
setattr(modules[parent_name], name, obs_mod_to_use)
# set the output observer node to use that module
for output_obs_node, _ in node.users.items():
assert is_activation_post_process_node(output_obs_node, modules)
parent_name, name = _parent_name(output_obs_node.target)
setattr(modules[parent_name], name, obs_mod_to_use)
# TODO(future PR): delete the orphaned observer modules
return True
def remove_output_observer(
node: Node,
model: torch.nn.Module,
modules: Dict[str, torch.nn.Module]):
items = list(node.users.items())
for output_obs_node, _ in items:
assert is_activation_post_process_node(output_obs_node, modules)
output_obs_node.replace_all_uses_with(node)
model.graph.erase_node(output_obs_node) # type: ignore[union-attr, operator]
def swap_custom_module_to_observed(
node: Node,
qconfig: QConfigAny,
modules: Dict[str, torch.nn.Module],
prepare_custom_config: PrepareCustomConfig):
custom_module = modules[node.target] # type: ignore[index]
custom_module_class_mapping = prepare_custom_config.float_to_observed_mapping
observed_custom_module_class = \
get_swapped_custom_module_class(
custom_module, custom_module_class_mapping, qconfig)
observed_custom_module = \
observed_custom_module_class.from_float(custom_module)
parent_name, name = _parent_name(node.target)
setattr(modules[parent_name], name, observed_custom_module)
def insert_observers_for_model(
model: GraphModule,
modules: Dict[str, torch.nn.Module],
matches: Dict[str, _MatchResultWithQConfig],
qconfig_map: Dict[str, QConfigAny],
graph: Graph,
prepare_custom_config: PrepareCustomConfig,
equalization_config_map: Dict[str, Any],
input_quantized_idxs: List[int],
output_quantized_idxs: List[int],
backend_config: BackendConfig,
observed_node_names: Set[str],
is_qat: bool,
) -> Optional[Node]:
"""
Inserts observers, using the following high level algorithm:
For each node in the graph:
1. determine the target dtype of this node in the quantized graph, and save
it for future steps
2. determine the target dtype or all args and kwargs of this node
3. if any arg or kwarg's target dtype does not match the current node's
dtype, insert an observer
4. if the current node needs an output observer, insert it
For example:
- starting graph:
x0 -> linear -> x1
- observed graph after processing x0:
x0(fp32)
- observed graph after processing linear:
x0(fp32) -> x0_obs0(int8) -> linear(int8) -> linear_obs0(int8)
- observed graph after processing x1:
x0(fp32) -> x0_obs0(int8) -> linear(int8) -> linear_obs0(int8) -> x1
After a node is processed, the naive observer placement is guaranteed to be
complete for that node and all of its predecessors. There can be future
passes which optimize the graph by deduplicating observers, etc.
"""
# name of Node in original FX Graph to the target dtype information
# that's derived from qconfig for the Node, for example, if we have
# a conv2d node that has a qconfig
# {
# # information for input and bias node omitted
# # for getattr node
# # weight = getattr(self, 'weight')
# 'weight': {
# 'output_activation_dtype': torch.float,
# }
# # for conv2d node
# # conv2d = call_function[target=torch.nn.functional.conv2d](
# # args=(input, weight, bias))
# 'conv2d': {
# 'input_activation_dtype': torch.quint8,
# 'weight_dtype': torch.qint8,
# 'bias_dtype': torch.float,
# 'output_activation_dtype': torch.quint8,
# }
# }
#
# TODO: rename this to node_name_to_target_dtype_info
node_name_to_target_dtype: Dict[str, Dict[str, Optional[Union[torch.dtype, type]]]] = defaultdict(dict)
cache_for_no_tensor_check: Dict[Node, bool] = dict()
inputs_seen_counter = 0
outputs_seen_counter = 0
# first, populate the dtype map based only on qconfig and qhandler
# this assumes:
# graph inputs are fp32 by default, and int8 where overriden
# other nodes output dtype is specified by the qconfig
modules = dict(model.named_modules(remove_duplicate=False))
for node in model.graph.nodes:
root_node, _, pattern, qhandler, qconfig = matches.get(
node.name, (None, None, None, None, None))
node_name_to_target_dtype[node.name] = get_target_activation_dtype_for_node(
node, qconfig, inputs_seen_counter, outputs_seen_counter,
input_quantized_idxs, output_quantized_idxs, qhandler,
modules, cache_for_no_tensor_check)
if node.op == "placeholder":
inputs_seen_counter += 1
if node.op == "output":
outputs_seen_counter += 1
# Second, for nodes with known input dtypes, propagate them throughout the
# graph. For example, if there is a call such as
# x1 = x0.masked_fill(mask, 1)
# we propagate the type of mask to be torch.bool
propagate_dtypes_for_known_nodes(
model.graph, node_name_to_target_dtype, matches)
# After this point, the current node and all of its arguments
# have a dtype assigned. Now, we insert observers for inputs
# of this node (if needed for this node), and the output of this node
# (if needed for this node).
# Since we are mutating the graph as we go, we iterate over the original
# nodes before observer insertion, instead of model.graph.nodes.
nodes_before_observation = list(model.graph.nodes)
# reset inputs/outputs counters
inputs_seen_counter = 0
outputs_seen_counter = 0
results_node = None
for node in nodes_before_observation:
if node.op == 'placeholder':
# if a graph input is in fp32, it does not need observation
# if a graph input is in int8, we assume the observation happens
# outside of the graph, and no additional observation is needed
pass
elif node.op in ('call_module', 'call_method', 'call_function', 'output'):
# check for matches
last_node, matched_node_pattern, pattern, qhandler, qconfig = matches.get(
node.name, (None, None, None, None, None))
equalization_qconfig = equalization_config_map.get(node.name, None)
this_node_dtype = node_name_to_target_dtype[node.name]
output_not_a_tensor = this_node_dtype is None
# TODO(future PR): consider stopping matching getitem
is_getitem = node.op == 'call_function' and \
node.target == operator.getitem
skip_inserting_observers = (
(qconfig is None) or
output_not_a_tensor or
is_getitem
) and (
not node.op == 'output'
)
is_supported_by_backend = is_pattern_dtype_config_supported_by_backend(
pattern, matched_node_pattern, node_name_to_target_dtype, backend_config)
if not skip_inserting_observers and is_supported_by_backend:
modules = dict(model.named_modules(remove_duplicate=False))
if node.op != 'output':
assert matched_node_pattern is not None
# add matched nodes to the observed node name set
add_matched_node_name_to_set(matched_node_pattern, observed_node_names)
# This is currently only used for equalization.
# Checks if the current node is in a branch in which the two
# first layers are both being quantized.
#
# ex. conv2
# /
# x -> conv1
#
# If this is the case, we will not apply equalization to the
# initial two layers.
is_quantized_branch = False
if (
len(node.args) > 0 and
isinstance(node.args[0], Node) and
len(node.args[0].users) > 1
):
for user in node.args[0].users:
# Checks if there exists another user being quantized
is_user_quantized = (
qconfig_map.get(user.name, None) is not None or
(user.op == 'call_module' and isinstance(modules[str(user.target)], ObserverBase))
)
if user != node and is_user_quantized:
is_quantized_branch = True
# TODO: this only works for sequential fusion right now, extend it
# it to automatically detect all input nodes based on the pattern
# need to change find_matches function to return this information
root_node = _default_root_node_getter(matched_node_pattern)
is_input_node_of_the_pattern = node is root_node
if is_input_node_of_the_pattern:
# this modifies node inplace
maybe_insert_input_observers_for_node(
node, qconfig, model, modules, graph,
node_name_to_target_dtype,
qhandler,
prepare_custom_config,
backend_config)
# Insert equalization input observers if needed
maybe_insert_input_equalization_observers_for_node(
node, equalization_qconfig, model, modules, graph,
node_name_to_target_dtype, is_quantized_branch, backend_config)
is_last_node_of_pattern = node is last_node
is_general_tensor_value_op = \
(qhandler is not None and qhandler.is_general_tensor_value_op())
is_reuse_input_qconfig_ = is_reuse_input_qconfig(qconfig)
if is_last_node_of_pattern:
# this returns the new observer node if it was needed
maybe_output_obs_node = maybe_insert_output_observer_for_node(
node, model, modules, graph, matches,
node_name_to_target_dtype, pattern, qhandler, is_qat)
if maybe_output_obs_node is not None:
# Update users of original node to use the output observer
# instead. For example, change
#
# next_node
# /
# cur_node -> obs
#
# to
#
# next_node
# /
# cur_node -> obs
#
# We need to save orig users before updating uses because
# the list of users will change as we update uses
orig_users = list(node.users.keys())
for user_node in orig_users:
if user_node is maybe_output_obs_node:
continue
user_node.replace_input_with(node, maybe_output_obs_node)
is_observer_in_same_graph_ = is_observer_in_same_graph(node, modules, node_name_to_target_dtype)
# for general tensor value ops, we modify the graph
# to make all inputs and outputs use the first input's
# observer
if (is_general_tensor_value_op and is_observer_in_same_graph_) or \
is_reuse_input_qconfig_:
if not maybe_make_input_output_share_observers(node, model, modules):
remove_output_observer(node, model, modules)
if qhandler is not None and qhandler.is_custom_module():
swap_custom_module_to_observed(node, qconfig, modules, prepare_custom_config)
else: # output
maybe_insert_observers_before_graph_output(
node, output_quantized_idxs,
node_name_to_target_dtype, qconfig_map,
model, modules, graph)
#
# After this point, the current node has input and output observers
# that it needs for itself inserted.
#
# increment the counters, so future inputs and outputs are assigned
# correct dtypes
if node.op == 'placeholder':
inputs_seen_counter += 1
elif node.op == 'output':
outputs_seen_counter += 1
results_node = node
return results_node
def _validate_fixed_qparams_qconfigs(model: GraphModule, qconfig_map: Dict[str, QConfigAny]):
"""
Validate whether the correct observers are configured for fixed qparams ops in the model, if any.
"""
# TODO: handle fp16 qconfigs properly
allowed_observer_ctrs = [
float16_dynamic_qconfig.activation,
float16_static_qconfig.activation,
]
named_modules = dict(model.named_modules(remove_duplicate=False))
for node in model.graph.nodes:
if node.op == "call_function":
module_type_or_function_or_method = node.target
elif node.op == "call_module":
module_type_or_function_or_method = type(named_modules[node.target])
else:
module_type_or_function_or_method = None
if module_type_or_function_or_method in _FIXED_QPARAMS_OP_TO_OBSERVER:
bad_observer = True
qconfig = qconfig_map.get(node.name, None)
if qconfig is None:
bad_observer = False
else:
for observer_ctr in allowed_observer_ctrs + [_FIXED_QPARAMS_OP_TO_OBSERVER[module_type_or_function_or_method]]:
if obs_or_fq_ctr_equals(
qconfig.activation,
FixedQParamsFakeQuantize.with_args(observer=observer_ctr)) or \
obs_or_fq_ctr_equals(qconfig.activation, observer_ctr):
bad_observer = False
if bad_observer:
raise ValueError("QConfigMapping must specify fixed qparams observer for fixed qparams op "
"'%s' type: '%s'. Please use torch.ao.quantization.get_default_qconfig_mapping or "
"torch.ao.quantization.get_default_qat_qconfig_mapping"
" instead." % (node.format_node(), module_type_or_function_or_method))
def run_prepare_fx_on_standalone_modules(
model: torch.nn.Module,
is_qat: bool,
modules: Dict[str, torch.nn.Module],
matches: Any,
prepare_custom_config: PrepareCustomConfig,
backend_config: BackendConfig,
) -> None:
"""
Runs prepare_fx on each standalone module. Note: this does
not modify the graph, it just replaces the unobserved modules with
their observed versions.
"""
for (
node_name,
(root_node, _, pattern, qhandler, qconfig),
) in matches.items():
if qhandler is None:
continue
elif not qhandler.is_standalone_module():
continue
sm_qconfig_mapping, sm_example_inputs, sm_prepare_custom_config, \
sm_backend_config = get_standalone_module_configs(
root_node, modules, prepare_custom_config, qconfig, backend_config)
standalone_module = modules[root_node.target]
prepare = \
torch.ao.quantization.quantize_fx._prepare_standalone_module_fx # type: ignore[attr-defined]
observed_standalone_module = \
prepare(
standalone_module,
sm_qconfig_mapping,
is_qat,
example_inputs=sm_example_inputs,
prepare_custom_config=sm_prepare_custom_config,
backend_config=sm_backend_config)
preserved_attributes = set(sm_prepare_custom_config.preserved_attributes)
observed_standalone_module = ObservedStandaloneGraphModule(
observed_standalone_module, observed_standalone_module.graph,
preserved_attributes)
parent_name, name = _parent_name(root_node.target)
setattr(modules[parent_name], name,
observed_standalone_module)
modules[root_node.target] = observed_standalone_module
def save_state(
observed: GraphModule,
qconfig_map: Dict[str, QConfigAny],
node_name_to_scope: Dict[str, Tuple[str, type]],
prepare_custom_config: PrepareCustomConfig,
equalization_qconfig_map: Dict[str, Any],
qconfig_mapping: QConfigMapping,
is_qat: bool,
observed_node_names: Set[str],
) -> None:
observed._qconfig_map = qconfig_map # type: ignore[assignment]
observed._prepare_custom_config = prepare_custom_config # type: ignore[assignment]
observed._node_name_to_scope = node_name_to_scope # type: ignore[assignment]
observed._equalization_qconfig_map = equalization_qconfig_map # type: ignore[assignment]
observed._qconfig_mapping = qconfig_mapping # type: ignore[assignment]
observed._is_qat = is_qat # type: ignore[assignment]
observed._observed_node_names = observed_node_names # type: ignore[assignment]
def prepare(
model: GraphModule,
qconfig_mapping: Union[QConfigMapping, Dict[str, Any]],
is_qat: bool,
node_name_to_scope: Dict[str, Tuple[str, type]],
example_inputs: Tuple[Any, ...],
prepare_custom_config: Union[PrepareCustomConfig, Dict[str, Any], None] = None,
_equalization_config: Union[QConfigMapping, Dict[str, Any], None] = None,
backend_config: Union[BackendConfig, Dict[str, Any], None] = None,
is_standalone_module: bool = False) -> ObservedGraphModule:
""" standalone_module means it a submodule that is not inlined in
parent module, and will be quantized separately as one unit.
How the standalone module is observed is specified by `input_quantized_idxs` and
`output_quantized_idxs` in the prepare_custom_config for the standalone module
Args:
node_name_to_scope: mapping from node name to the scope of the module which contains the node.
The scope is a tuple of fully qualified path of the module and the type of the module
Returns:
model(GraphModule): prepared standalone module
attributes:
_standalone_module_input_quantized_idxs(List[Int]): a list of
indexes for the graph input that is expected to be quantized,
same as input_quantized_idxs configuration provided
for the standalone module
_standalone_module_output_quantized_idxs(List[Int]): a list of
indexs for the graph output that is quantized
same as input_quantized_idxs configuration provided
for the standalone module
"""
if prepare_custom_config is None:
prepare_custom_config = PrepareCustomConfig()
if _equalization_config is None:
_equalization_config = QConfigMapping()
if isinstance(qconfig_mapping, Dict):
warnings.warn(
"Passing a QConfig dictionary to prepare is deprecated and will not be supported "
"in a future version. Please pass in a QConfigMapping instead.")
qconfig_mapping = QConfigMapping.from_dict(qconfig_mapping)
if isinstance(_equalization_config, Dict):
warnings.warn(
"Passing a QConfig dictionary to prepare for equalization is deprecated and will not "
"be supported in a future version. Please pass in a QConfigMapping instead.")
_equalization_config = QConfigMapping.from_dict(_equalization_config)
if isinstance(prepare_custom_config, Dict):
warnings.warn(
"Passing a prepare_custom_config_dict to prepare is deprecated and will not be supported "
"in a future version. Please pass in a PrepareCustomConfig instead.")
prepare_custom_config = PrepareCustomConfig.from_dict(prepare_custom_config)
if isinstance(backend_config, Dict):
warnings.warn(
"Passing a backend_config_dict to prepare is deprecated and will not be supported "
"in a future version. Please pass in a BackendConfig instead.")
backend_config = BackendConfig.from_dict(backend_config)
assert(isinstance(qconfig_mapping, QConfigMapping))
assert(isinstance(_equalization_config, QConfigMapping))
qconfig_mapping = copy.deepcopy(qconfig_mapping)
_equalization_config = copy.deepcopy(_equalization_config)
# mapping from a tuple of nodes in reverse order to uninitialized
# QuantizeHandler subclass. For example,
# {
# # match a single node
# (<class 'torch.nn.modules.conv.Conv3d'>:
# <class 'torch.ao.quantization.fx.quantize.ConvRelu'>),
# # match multiple nodes in reverse order
# ((<function relu at 0x7f766a7360d0>, <built-in function add>):
# <class 'torch.ao.quantization.fx.quantize.Add'>),
# }
pattern_to_quantize_handler: Dict[Pattern, QuantizeHandler] = {}
if backend_config is None:
backend_config = get_native_backend_config()
pattern_to_quantize_handler = get_pattern_to_quantize_handlers(backend_config)
pattern_to_quantize_handler = sorted_patterns_dict(pattern_to_quantize_handler)
root_node_getter_mapping = \
get_fusion_pattern_to_root_node_getter(backend_config)
update_qconfig_for_fusion(model, qconfig_mapping)
update_qconfig_for_fusion(model, _equalization_config)
flattened_qconfig_dict = get_flattened_qconfig_dict(qconfig_mapping)
# TODO: support regex as well
propagate_qconfig_(model, flattened_qconfig_dict, prepare_custom_config.to_dict())
if is_qat:
module_to_qat_module = get_module_to_qat_module(backend_config)
qat_swap_modules(model, module_to_qat_module)
update_qconfig_for_qat(qconfig_mapping, {})
# mapping from fully qualified module name to module instance
# for example,
# {
# '': Model(...),
# 'linear': Linear(...),
# 'linear.weight_fake_quant': PerChannelMinMaxObserver(...),
# }
modules = dict(model.named_modules(remove_duplicate=False))
# fill qconfig_map, a map from node name to qconfig, used in find_matches
equalization_qconfig_map = generate_qconfig_map(
model, modules, model.graph, _equalization_config, node_name_to_scope)
qconfig_map = generate_qconfig_map(model, modules, model.graph, qconfig_mapping, node_name_to_scope)
_validate_fixed_qparams_qconfigs(model, qconfig_map)
# match the patterns that will get quantized
standalone_module_names = list(prepare_custom_config.standalone_module_names.keys())
standalone_module_classes = list(prepare_custom_config.standalone_module_classes.keys())
custom_module_classes = get_custom_module_class_keys(prepare_custom_config.float_to_observed_mapping)
matches_without_qconfig = find_matches(
model.graph, modules, pattern_to_quantize_handler, root_node_getter_mapping,
standalone_module_names, standalone_module_classes, custom_module_classes)
# map qconfig instances to matches
matches = {}
for node_name, match_without_qconfig in matches_without_qconfig.items():
match_with_qconfig = (*match_without_qconfig, qconfig_map[node_name])
matches[node_name] = match_with_qconfig
input_quantized_idxs: List[int] = prepare_custom_config.input_quantized_indexes
output_quantized_idxs: List[int] = prepare_custom_config.output_quantized_indexes
run_prepare_fx_on_standalone_modules(
model, is_qat, modules, matches, prepare_custom_config, backend_config)
# record names for the set of observed node, so that in convert step
# we know whether we need to convert a floating point module to reference
# quantized module or not
observed_node_names: Set[str] = set()
result_node = insert_observers_for_model(
model, modules, matches, qconfig_map,
model.graph, prepare_custom_config,
equalization_qconfig_map,
input_quantized_idxs,
output_quantized_idxs,
backend_config,
observed_node_names,
is_qat)
save_state(model, qconfig_map, node_name_to_scope,
prepare_custom_config, equalization_qconfig_map, qconfig_mapping, is_qat, observed_node_names)
preserved_attributes = set(prepare_custom_config.preserved_attributes)
model = ObservedGraphModule(model, model.graph, preserved_attributes)
if is_standalone_module:
assert result_node is not None
assert isinstance(result_node.args[0], Node), \
"standalone module only supports returning simple value currently"\
"(not tuple, dict etc.)"
# these inputs are observed in parent
# converting List[int] to Tensor since module attribute is
# Union[Tensor, Module]
model._standalone_module_input_quantized_idxs = \
torch.tensor(input_quantized_idxs)
model._standalone_module_output_quantized_idxs = torch.tensor(output_quantized_idxs)
return model
| pytorch-master | torch/ao/quantization/fx/prepare.py |
import torch
from torch.fx._symbolic_trace import Tracer
from torch.fx.node import Target, Node, Argument
from torch.nn.intrinsic import _FusedModule
from typing import List, Callable, Tuple, Any, Dict, Optional
__all__ = [
"QuantizationTracer",
]
class Scope(object):
""" Scope object that records the module path and the module type
of a module. Scope is used to track the information of the module
that contains a Node in a Graph of GraphModule. For example::
class Sub(torch.nn.Module):
def forward(self, x):
# This will be a call_method Node in GraphModule,
# scope for this would be (module_path="sub", module_type=Sub)
return x.transpose(1, 2)
class M(torch.nn.Module):
def __init__(self):
self.sub = Sub()
def forward(self, x):
# This will be a call_method Node as well,
# scope for this would be (module_path="", None)
x = x.transpose(1, 2)
x = self.sub(x)
return x
"""
def __init__(self, module_path: str, module_type: Any):
super().__init__()
self.module_path = module_path
self.module_type = module_type
class ScopeContextManager(object):
""" A context manager to track the Scope of Node during symbolic tracing.
When entering a forward function of a Module, we'll update the scope information of
the current module, and when we exit, we'll restore the previous scope information.
"""
def __init__(
self, scope: Scope, current_module: torch.nn.Module, current_module_path: str
):
super().__init__()
self.prev_module_type = scope.module_type
self.prev_module_path = scope.module_path
self.scope = scope
self.scope.module_path = current_module_path
self.scope.module_type = type(current_module)
def __enter__(self):
return
def __exit__(self, *args):
self.scope.module_path = self.prev_module_path
self.scope.module_type = self.prev_module_type
return
class QuantizationTracer(Tracer):
def __init__(
self, skipped_module_names: List[str], skipped_module_classes: List[Callable]
):
super().__init__()
self.skipped_module_names = skipped_module_names
self.skipped_module_classes = skipped_module_classes
# NB: initialized the module_type of top level module to None
# we are assuming people won't configure the model with the type of top level
# module here, since people can use "" for global config
# We can change this if there is a use case that configures
# qconfig using top level module type
self.scope = Scope("", None)
self.node_name_to_scope: Dict[str, Tuple[str, type]] = {}
self.record_stack_traces = True
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:
return (
(
m.__module__.startswith("torch.nn")
and not isinstance(m, torch.nn.Sequential)
)
or module_qualified_name in self.skipped_module_names
or type(m) in self.skipped_module_classes
or isinstance(m, _FusedModule)
)
def call_module(
self,
m: torch.nn.Module,
forward: Callable[..., Any],
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
) -> Any:
module_qualified_name = self.path_of_module(m)
# Creating scope with information of current module
# scope will be restored automatically upon exit
with ScopeContextManager(self.scope, m, module_qualified_name):
return super().call_module(m, forward, args, kwargs)
def create_node(
self,
kind: str,
target: Target,
args: Tuple[Argument, ...],
kwargs: Dict[str, Argument],
name: Optional[str] = None,
type_expr: Optional[Any] = None,
) -> Node:
node = super().create_node(kind, target, args, kwargs, name, type_expr)
self.node_name_to_scope[node.name] = (
self.scope.module_path,
self.scope.module_type,
)
return node
| pytorch-master | torch/ao/quantization/fx/tracer.py |
import torch
from torch.fx.graph import (
Node,
)
from .utils import (
all_node_args_have_no_tensors,
)
from torch.ao.quantization.quantization_types import (
Pattern,
NodePattern,
)
from abc import ABC
from typing import Any, Callable, Dict, Optional
def _default_root_node_getter(node_pattern):
if node_pattern is None:
return node_pattern
while not isinstance(node_pattern, Node):
node_pattern = node_pattern[-1]
return node_pattern
# -------------------------
# Pattern Registrations
# -------------------------
# 1. Post Training Static Quantization and Quantization Aware Training Patterns
# Base Pattern Handler
class QuantizeHandler(ABC):
""" Base handler class for the quantizer patterns
"""
def __init__(
self,
node_pattern: NodePattern,
modules: Dict[str, torch.nn.Module],
root_node_getter: Callable = None,
is_custom_module=False,
is_standalone_module=False):
""" Records pattern information in __init__, which will be used
in convert
"""
self.node_pattern = node_pattern
self.modules = modules
if root_node_getter is None:
root_node_getter = _default_root_node_getter
self.root_node = root_node_getter(node_pattern)
self.is_custom_module_ = is_custom_module
self.is_standalone_module_ = is_standalone_module
self.num_tensor_args = 0
# determine how many of the first two args are Tensors (versus scalars)
# this distinguishes things like "x + y" from "x + 2" or "2 + x"
if isinstance(self.root_node, Node):
cache_for_no_tensor_check: Dict[Node, bool] = dict()
for arg_idx in range(len(self.root_node.args)):
arg = self.root_node.args[arg_idx]
if isinstance(arg, Node) and (
not all_node_args_have_no_tensors(
arg, self.modules, cache_for_no_tensor_check)):
self.num_tensor_args += 1
# TODO: can remove after the is_dynamic flag is defined, so that we can
# move embedding op to backend_config_dict
def input_output_observed(self) -> bool:
"""
Returns True if the pattern matched to this qhandler could be
be observed, and False it it should not be observed.
"""
return True
def is_general_tensor_value_op(self) -> bool:
"""
Returns True if the operator works for both floating point and
quantized input, and does some computation based on the input Tensor,
or the ops that only re-arranges the Tensor values or query some metadata
about the Tensor
so we need to insert observer/fake_quant for the output of the
operator (same observer instance as input)
since the distribution of values is different for input and output
Tensors (for HistogramObserver) while they share the same quantization
parameters
Example operator: avgpool2d, reshape, transpose, maxpool2d
Example observed operator:
observer_0 - avgpool2d - observer_0 (same observer instance as input)
"""
return False
def get_activation_ctr(
self,
qconfig: Any,
pattern: Pattern,
is_training: bool,
) -> Optional[Callable]:
"""
Returns the constructor for the activation observer which should be
used for the pattern matched to this handler. Some handlers override
this to a different value than what is specified in the qconfig.
"""
return qconfig.activation
def is_custom_module(self):
return self.is_custom_module_
def is_standalone_module(self):
return self.is_standalone_module_
# TODO: remove this class, this is still exposed in torch.quantization
# but we should be able to break bc
class BinaryOpQuantizeHandler(QuantizeHandler):
pass
class CatQuantizeHandler(QuantizeHandler):
pass
# TODO: remove this class
class ConvReluQuantizeHandler(QuantizeHandler):
pass
# TODO: remove this class
class LinearReLUQuantizeHandler(QuantizeHandler):
pass
# TODO: remove this class
class BatchNormQuantizeHandler(QuantizeHandler):
pass
# TODO: remove this class
class EmbeddingQuantizeHandler(QuantizeHandler):
pass
# TODO: remove this class
class RNNDynamicQuantizeHandler(QuantizeHandler):
pass
# TODO: remove this class
class DefaultNodeQuantizeHandler(QuantizeHandler):
""" Common quantized op, first input and first output will be quantized
"""
pass
# TODO: remove this class
class FixedQParamsOpQuantizeHandler(QuantizeHandler):
pass
# TODO: remove
class CopyNodeQuantizeHandler(QuantizeHandler):
pass
# TODO: remove
class GeneralTensorShapeOpQuantizeHandler(QuantizeHandler):
pass
# TODO: not used, can be removed after torch.quantization namespace is deprecated
class CustomModuleQuantizeHandler(QuantizeHandler):
pass
# TODO: not used, can be removed after torch.quantization namespace is deprecated
class StandaloneModuleQuantizeHandler(QuantizeHandler):
pass
| pytorch-master | torch/ao/quantization/fx/quantization_patterns.py |
from typing import Any, Dict, Set, Tuple, Callable, List
import torch
import torch.nn as nn
import torch.nn.qat as nnqat
from abc import ABC, abstractmethod
from torch.ao.quantization.fake_quantize import FakeQuantize
from torch.ao.quantization.fx.graph_module import GraphModule
from torch.ao.quantization.observer import ObserverBase
from torch.ao.quantization.fx._model_report.model_report_observer import ModelReportObserver
from torch.ao.quantization.qconfig import QConfig
from torch.ao.quantization.quantize import is_activation_post_process
# Names for observer insert keys
DETECTOR_TARGET_NODE_KEY = "target_node"
DETECTOR_OBS_TO_INSERT_KEY = "observer_to_insert"
DETECTOR_IS_POST_OBS_KEY = "is_post_observer"
DETECTOR_OBS_ARGS_KEY = "observer_args"
# Adding base class for detectors
class DetectorBase(ABC):
r""" Base Detector Module
Any detector class should derive from this class.
Concrete detectors should follow the same general API, which includes:
- A method to calculate and return observer insertion points
- Should return both the fqns and the Observer class to insert
- A method to return a report based on the the detector
- Should return a str-based report and dict info in Tuple[str,Dict] format
"""
def __init__(self):
super().__init__()
@abstractmethod
def determine_observer_insert_points(self, model) -> Dict:
r"""
Args
model (nn.Module or subclass): model to find observer insertion points
Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict.
This dict maps string keys to detector specific information
"""
pass
@abstractmethod
def get_detector_name(self) -> str:
r""" Returns the name of the current detector """
pass
def _get_targeting_node(self, prepared_fx_model: GraphModule, target_fqn: str) -> torch.fx.node.Node:
r"""
Takes in a GraphModule and the target_fqn and finds the node whose target is this fqn.
If it's not found, it means it is most likely inside a fused layer
We just go one layer up in terms of the fqn we are searching for until we find parent node
If we get to empty string, then we know that it doesn't exist
The reason for the recursion is that if the model that we are looking for got fused,
we will have module fqn as e.g. x.linear.0 but the graph will only have a node for the fused module,
which would have fqn as x.linear so they will not match.
To handle this, if we don't match, we then take off the last bit of the fqn e.g. x.linear.0 -> x.linear,
or more generally foo.bar.baz -> foo.bar and search again, this will allow us to locate the correct module
even in cases with fusion
Args:
prepared_fx_model (GraphModule): The prepared Fx GraphModule
target_fqn (str): The fqn of the layer we are trying to target
Returns the node object we are trying to add observers around
"""
for node in prepared_fx_model.graph.nodes:
# if the node's target is our target, return it
if node.target == target_fqn:
return node
# getting here means node not found
# if no "." we are already at base and failed
parent_fqn_sep_index = target_fqn.rfind(".")
if parent_fqn_sep_index == -1:
raise ValueError("passed in target_fqn not found in graph's targets.")
else:
# recursively call it with parent fqn
return self._get_targeting_node(prepared_fx_model, target_fqn[:parent_fqn_sep_index])
@abstractmethod
def generate_detector_report(self, model) -> Tuple[str, Dict[str, Any]]:
r"""
Args
model (nn.Module or subclass): model to find observer insertion points
Returns a Tuple of two elements:
Str: string report of the suggested improvements
Dict: contains useful data collected by the observer pertinent to this report
"""
pass
class PerChannelDetector(DetectorBase):
r""" This class is used to detect if any Linear or Conv layers in a model utilize per_channel quantization.
Only Linear and Conv layers can use per_channel as of now so only these two are currently checked.
per_channel quantization can lead to major benefits in the form of accuracy.
Therefore, if the backend used by the user supports it, it is recommended to use
Args:
backend (str, optional): the backend the user wishes to use in production
Default value is current torch.backends.quantized.engine
"""
# Keys for return dictionary
BACKEND_KEY = "backend"
PER_CHAN_SUPPORTED_KEY = "per_channel_quantization_supported"
PER_CHAN_USED_KEY = "per_channel_quantization_used"
# Default map for representing supported per channel quantization modules for different backends
DEFAULT_BACKEND_PER_CHANNEL_SUPPORTED_MODULES: Dict[str, Set[Any]] = {
"fbgemm": set([nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nnqat.Linear, nnqat.Conv1d, nnqat.Conv2d, nnqat.Conv3d]),
"qnnpack": set([nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nnqat.Linear, nnqat.Conv1d, nnqat.Conv2d, nnqat.Conv3d]),
"onednn": set([nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nnqat.Linear, nnqat.Conv1d, nnqat.Conv2d, nnqat.Conv3d]),
}
def __init__(self, backend: str = torch.backends.quantized.engine):
super().__init__()
# store the backend information
self.backend_chosen = backend
self.supported_modules = set([])
if self.backend_chosen in self.DEFAULT_BACKEND_PER_CHANNEL_SUPPORTED_MODULES:
self.supported_modules = self.DEFAULT_BACKEND_PER_CHANNEL_SUPPORTED_MODULES[self.backend_chosen]
else:
raise ValueError("Not configured to work with {}. Try a different default backend".format(self.backend_chosen))
def get_detector_name(self) -> str:
r""" returns the string name of this detector"""
return "per_channel_detector"
def determine_observer_insert_points(self, model: nn.Module) -> Dict:
r"""
There is no observers inserted for the PerChannelDetector.
Returns an empty dictionary since no observers are added or needed
"""
return {}
def _detect_per_channel_helper(self, model: nn.Module):
r"""
determines if per_channel quantization is supported in modules and submodules.
Returns a dictionary in the higher level _detect_per_channel function.
Each entry maps the fully-qualified-name to information on whether per_channel quantization.
Args:
module: The current module that is being checked to see if it is per_channel qunatizable
Returns dictionary mapping fqns to if per_channel quantization is possible
"""
# create dict we will return
per_channel_info: Dict = {}
# get the fully qualified name and check if in list of modules to include and list of modules to ignore
for fqn, module in model.named_modules():
is_in_include_list = sum(list(map(lambda x: isinstance(module, x), self.supported_modules))) > 0
# check if the module per_channel is supported
# based on backend
per_channel_supported = False
if is_in_include_list:
per_channel_supported = True
# assert statement for MyPy
q_config_file = module.qconfig
assert isinstance(q_config_file, QConfig)
# this object should either be fake quant or observer
q_or_s_obj = module.qconfig.weight.p.func()
assert isinstance(q_or_s_obj, FakeQuantize) or isinstance(q_or_s_obj, ObserverBase)
per_channel_used = False # will be true if found in qconfig
if hasattr(q_or_s_obj, "ch_axis"): # then we know that per_channel quantization used
# all fake quants have channel axis so need to check is_per_channel
if isinstance(q_or_s_obj, FakeQuantize):
if hasattr(q_or_s_obj, "is_per_channel") and q_or_s_obj.is_per_channel:
per_channel_used = True
elif isinstance(q_or_s_obj, ObserverBase):
# should be an observer otherwise
per_channel_used = True
else:
raise ValueError("Should be either observer or fake quant")
per_channel_info[fqn] = {
self.PER_CHAN_SUPPORTED_KEY: per_channel_supported,
self.PER_CHAN_USED_KEY: per_channel_used,
self.BACKEND_KEY: self.backend_chosen
}
return per_channel_info
def generate_detector_report(self, model: nn.Module) -> Tuple[str, Dict[str, Any]]:
r"""Checks if any Linear or Conv layers in the model utilize per_channel quantization.
Only Linear and Conv layers can use per_channel as of now so only these two are currently checked.
Looks at q_config format and backend to determine if per_channel can be utilized.
Uses the DEFAULT_BACKEND_PER_CHANNEL_SUPPORTED_MODULES structure to determine support
Args:
model: The prepared and calibrated model we want to check if using per_channel
Returns a tuple with two elements:
String report of potential actions to improve model (if per_channel quantization is available in backend)
Dictionary mapping per_channel quantizable elements to:
whether per_channel quantization is supported by the backend
if it is being utilized in the current model
"""
# run the helper function to populate the dictionary
per_channel_info = self._detect_per_channel_helper(model)
# String to let the user know of further optimizations
further_optims_str = "Further Optimizations for backend {}: \n".format(self.backend_chosen)
optimizations_possible = False
for fqn in per_channel_info:
fqn_dict = per_channel_info[fqn]
if fqn_dict[self.PER_CHAN_SUPPORTED_KEY] and not fqn_dict[self.PER_CHAN_USED_KEY]:
optimizations_possible = True
further_optims_str += "Module {module_fqn} can be configured to use per_channel quantization.\n".format(
module_fqn=fqn
)
if optimizations_possible:
further_optims_str += (
"To use per_channel quantization, make sure the qconfig has a per_channel weight observer."
)
else:
further_optims_str += "No further per_channel optimizations possible."
# return the string and the dictionary form of same information
return (further_optims_str, per_channel_info)
class DynamicStaticDetector(DetectorBase):
r"""
Determines whether dynamic or static quantization is more appropriate for a given module.
Takes advantage of the ModelReportObserver that records range information.
Stationary distribution of data are strictly above tolerance level for the comparison statistic:
S = average_batch_activation_range/epoch_activation_range
Nonstationary distributions are below or at the tolerance level for this metric.
If the distribution of data right after the module is non-stationary, recommend dynamic quantization
Otherwise recommend static quantization
Args:
tolerance (float, optional): The threshold where S metric is stationary above and non-stationary otherwise. Default: 0.5
"""
# names for the pre and post observers that are inserted
DEFAULT_PRE_OBSERVER_NAME = "model_report_pre_observer"
DEFAULT_POST_OBSERVER_NAME = "model_report_post_observer"
# naming conventions for stationary vs non-stationary data
STATIONARY_STR = "stationary"
NON_STATIONARY_STR = "non-stationary"
# naming for activation
INPUT_ACTIVATION_PREFIX = "input_activation_"
OUTPUT_ACTIVATION_PREFIX = "output_activation_"
# naming conventions for the keys of the return module info
TOLERANCE_KEY = "dynamic_static_tolerance"
DEFAULT_DYNAMIC_REC_KEY = "dynamic_recommended"
PRE_OBS_COMP_STAT_KEY = INPUT_ACTIVATION_PREFIX + "dynamic_static_comp_stat"
POST_OBS_COMP_STAT_KEY = OUTPUT_ACTIVATION_PREFIX + "dynamic_static_comp_stat"
PRE_OBS_DATA_DIST_KEY = INPUT_ACTIVATION_PREFIX + "dynamic_static_data_classification"
POST_OBS_DATA_DIST_KEY = OUTPUT_ACTIVATION_PREFIX + "dynamic_static_data_classification"
IS_CURRENTLY_SUPPORTED_KEY = "is_dynamic_supported"
# modules that are supported both dynamic and static for this report function
DEFAULT_DYNAMIC_STATIC_CHECK_SUPPORTED = set([nn.Linear])
# modules that will be supported soon for both
DEFAULT_DYNAMIC_STATIC_FUTURE_SUPPORTED = set([nn.Conv1d, nn.Conv2d, nn.Conv3d])
def __init__(self, tolerance=0.5):
super().__init__()
# set tolerance level and initialize a set to keep track of useful fqn locations
self.tolerance = tolerance
self.useful_observer_fqns: Set[str] = set([])
def determine_observer_insert_points(self, prepared_fx_model: GraphModule) -> Dict[str, Dict[str, Any]]:
r"""
Determines where observers need to be inserted for the Dynamic vs Static detector.
For this detector, we want to place observers on either side of linear layers in the model.
Currently inserts observers for:
linear layers
Args:
prepared_fx_model (GraphModule): The prepared Fx GraphModule
Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict with:
key "target_node" -> the node we are trying to observe with this observer (torch.fx.node.Node)
key "observer_to_insert" -> the observer we wish to insert (ObserverBase)
key "is_post_observer" -> True if this is meant to be a post-observer for target_node, False if pre-observer
key "observer_args" -> The arguments that are meant to be passed into the observer
"""
# observer for this detector is ModelReportObserver
obs_ctr = ModelReportObserver
# return dict
obs_fqn_to_info: Dict[str, Dict[str, Any]] = {}
for fqn, module in prepared_fx_model.named_modules():
# make sure module is supported
if self._is_supported(module, insert=True):
# if it's a supported type, we want to get node and add observer insert locations
targeted_node = self._get_targeting_node(prepared_fx_model, fqn)
# add entry for pre-observer
pre_obs_fqn = fqn + "." + self.DEFAULT_PRE_OBSERVER_NAME
obs_fqn_to_info[pre_obs_fqn] = {
DETECTOR_TARGET_NODE_KEY: targeted_node,
DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(),
DETECTOR_IS_POST_OBS_KEY: False,
DETECTOR_OBS_ARGS_KEY: targeted_node.args
}
# add entry for post-observer
post_obs_fqn = fqn + "." + self.DEFAULT_POST_OBSERVER_NAME
obs_fqn_to_info[post_obs_fqn] = {
DETECTOR_TARGET_NODE_KEY: targeted_node,
DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(),
DETECTOR_IS_POST_OBS_KEY: True,
DETECTOR_OBS_ARGS_KEY: (targeted_node,)
}
return obs_fqn_to_info
def get_detector_name(self) -> str:
r""" returns the string name of this detector"""
return "dynamic_vs_static_detector"
def _is_supported(self, module: nn.Module, insert: bool = False) -> bool:
r"""Returns whether the given module is supported for observers
Args
module: The module to check and ensure is supported
insert: True if this is check for observer insertion, false if for report gen
Returns True if the module is supported by observer, False otherwise
"""
# check to see if module is of a supported type
is_supported_type = sum(list(map(lambda x: isinstance(module, x), self.DEFAULT_DYNAMIC_STATIC_CHECK_SUPPORTED))) > 0
# check if it will be supported
future_supported_type = sum(list(map(lambda x: isinstance(module, x), self.DEFAULT_DYNAMIC_STATIC_FUTURE_SUPPORTED))) > 0
# supported
supported = is_supported_type or future_supported_type
# this is check for observer insertion
if insert:
return supported
else:
# this is for report gen and we also need to check if it contains observers
has_obs = hasattr(module, self.DEFAULT_PRE_OBSERVER_NAME) and hasattr(module, self.DEFAULT_POST_OBSERVER_NAME)
return supported and has_obs
def _generate_dict_info(self, model: GraphModule) -> Dict[str, Any]:
r"""
Helper function for generate_detector_report that does the generation of the dictionary.
This process is done as specified in generate_detector_report documentation
Args:
model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers
Returns a Dictionary mapping modules with ModelReportObservers around them to:
whether dynamic quantization is recommended
their S metric of input to module
whether input to module is stationary or non-stationary
their S metric of output of module
whether output of module is stationary or non-stationary
the tolerance level to decided whether input/output is stationary or non-stationary
whether it is currently supported or planned for the future
"""
# store modules dynamic vs static information
module_dynamic_static_info = {}
# This for loop goes through the modules, and extracts all relavent information into module_dynamic_static_info
# This information primary includes whether the data distributions around a supported module is stationary or not
# Based on this, it is recorded whether dynamic or static quantization is recommended
# loop through all submodules included nested ones
for fqn, module in model.named_modules():
# if module is Linear has the ModelReportObserver attached to it
if self._is_supported(module):
# get pre and post observers for the module
pre_obs = getattr(module, self.DEFAULT_PRE_OBSERVER_NAME)
post_obs = getattr(module, self.DEFAULT_POST_OBSERVER_NAME)
# get the statistics for each module
pre_stat = pre_obs.get_batch_to_epoch_ratio()
post_stat = post_obs.get_batch_to_epoch_ratio()
# record module, pre and post stat, and whether to do dynamic or static based off it
# true if post observer data distribution is non-stationary, false if it's stationary
dynamic_recommended = post_stat <= self.tolerance
# specify the classifications for whether data distributions considered stationary or non-stationary
pre_obs_dist_classif = self.STATIONARY_STR if pre_stat > self.tolerance else self.NON_STATIONARY_STR
post_obs_dist_classif = self.STATIONARY_STR if post_stat > self.tolerance else self.NON_STATIONARY_STR
# check if current support or future support
is_supported_type = sum(list(map(lambda x: isinstance(module, x), self.DEFAULT_DYNAMIC_STATIC_CHECK_SUPPORTED))) > 0
# store the set of important information for this module
module_info = {
self.TOLERANCE_KEY: self.tolerance,
self.DEFAULT_DYNAMIC_REC_KEY: dynamic_recommended,
self.PRE_OBS_COMP_STAT_KEY: pre_stat,
self.PRE_OBS_DATA_DIST_KEY: pre_obs_dist_classif,
self.POST_OBS_COMP_STAT_KEY: post_stat,
self.POST_OBS_DATA_DIST_KEY: post_obs_dist_classif,
self.IS_CURRENTLY_SUPPORTED_KEY: is_supported_type,
}
module_dynamic_static_info[fqn] = module_info
return module_dynamic_static_info
def generate_detector_report(self, model: GraphModule) -> Tuple[str, Dict[str, Any]]:
r"""
Determines whether dynamic or static quantization is more appropriate for a given module.
Takes advantage of the ModelReportObserver that records range information.
Stationary distribution of data are strictly above tolerance level for the comparison statistic:
S = average_batch_activation_range/epoch_activation_range
Nonstationary distributions are below or at the tolerance level for this metric.
If the distribution of data right after the module is non-stationary, recommend dynamic quantization
Otherwise recommend static quantization
This will then generate suggestions for dynamic vs static quantization focused around Linear.
Args:
model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers
Returns a tuple with two elements:
String report of of whether dynamic or static quantization is recommended for certain modules
Dictionary mapping modules with ModelReportObservers around them to:
whether dynamic quantization is recommended
their S metric of input to module
whether input to module is stationary or non-stationary
their S metric of output of module
whether output of module is stationary or non-stationary
the tolerance level to decided whether input/output is stationary or non-stationary
whether it is currently supported or planned for the future
"""
# get the dictionary of the information to format the string report
module_dynamic_static_info = self._generate_dict_info(model)
dynamic_vs_static_string = "Dynamic vs. Static Quantization suggestions: \n"
modules_added: bool = False # check to make sure at least 1 module added.
dynamic_benefit = " You will get more accurate results if you use dynamic quantization"
static_benefit = " You can increase model efficiency if you use static quantization"
future_support_str = ". This layer is not yet supported for dynamic quantization"
# This for loop goes through the information collected in module_dynamic_static_info and:
# Populates the string based report with the information from module_dynamic_static_info
# Compiles the complete report by appending relavent formatted strings
for module_fqn in module_dynamic_static_info.keys():
# there is at least 1 module for suggestion
modules_added = True
module_info = module_dynamic_static_info[module_fqn]
suggestion_string_template = "For module {} it is suggested to use {} quantization because {}.\n"
# decide what string formatting values will be
quantization_type = ""
quantization_reasoning = "the distribution of data before {} is {} and the distribution after is {}."
benefit_str = ""
# strings for if dynamic quantized per tensor is needed
recommend_per_tensor = ". We recommend to add a {} before this module if it is static."
rec_lay_to_add = "dynamic quantize per tensor layer"
dynamic_per_tensor_string = recommend_per_tensor.format(rec_lay_to_add)
dynamic_per_tensor_reasoning_string = (
" This is because the input to this module has a non-stationary distribution"
)
# start composing explanation
if module_info[self.DEFAULT_DYNAMIC_REC_KEY]:
quantization_type = "dynamic"
# check if currently supported or future supported
benefit_str = dynamic_benefit
if not module_info[self.IS_CURRENTLY_SUPPORTED_KEY]:
benefit_str += future_support_str
else:
quantization_type = "static"
benefit_str = static_benefit
# now set the quantization explanation string
quantization_reasoning = (
quantization_reasoning.format(
module_fqn, module_info[self.PRE_OBS_DATA_DIST_KEY], module_info[self.POST_OBS_DATA_DIST_KEY]
)
+ benefit_str
)
# if we have a non-stationary input -> linear -> stationary we suggested static
# however, we want to also recommend they add a dynamic quantize per tensor right if this change is made
if (
module_info[self.PRE_OBS_DATA_DIST_KEY] == self.NON_STATIONARY_STR
and module_info[self.POST_OBS_DATA_DIST_KEY] == self.STATIONARY_STR
):
quantization_reasoning = (
quantization_reasoning + dynamic_per_tensor_string + dynamic_per_tensor_reasoning_string
)
# format the overall suggestion string with the specific inputs
module_suggestion_string = suggestion_string_template.format(
module_fqn, quantization_type, quantization_reasoning
)
# append to overall suggestion
dynamic_vs_static_string += module_suggestion_string
if not modules_added:
dynamic_vs_static_string += "No applicable layers for suggestions. Only linear and conv are valid.\n"
# return the string as well as the dictionary of information
return (dynamic_vs_static_string, module_dynamic_static_info)
class InputWeightEqualizationDetector(DetectorBase):
r"""
Determines whether input-weight equalization can help improve quantization for certain modules.
Specifically, this list of modules includes:
linear
conv
Determines whether input-weight equalization is recommended based on the comp stat:
s_c = sqrt(w_c/W)/sqrt(i_c/I)
where:
w_c is range of weight for channel c, W is range of weight over all channels
i_c is range of input for channel c, I is range of input over all channels
if s_c >= threshold or <= 1 / threshold, recommends input-weight equalization
Args:
ratio_threshold (float): The threshold for s_c to determine if input-weight equalization is sugggested
Should be between 0 and 1 (both non-inclusive)
ch_axis (int, optional): The channel axis being observed to determine input weight equalization
Default: 1
* :attr:`ratio_threshold`: The threshold for s_c to determine if input-weight equalization is sugggested
Should be between 0 and 1
* :attr:`ch_axis`: The channel axis being observed to determine input weight equalization
* :attr:`SUPPORTED_MODULES`: This specifies the modules that are supported for input-weight equalization
* :attr:`DEFAULT_PRE_OBSERVER_NAME`: The name of the pre-observer to be inserted for this detector
"""
SUPPORTED_MODULES: Set[Callable] = set(
[nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nnqat.Linear, nnqat.Conv1d, nnqat.Conv2d, nnqat.Conv3d]
)
# names for the pre and post observers that are inserted
DEFAULT_PRE_OBSERVER_NAME: str = "model_report_pre_observer"
# weight / activation prefix for each of the below info
WEIGHT_PREFIX = "weight_"
ACTIVATION_PREFIX = "input_activation_"
# string names for keys of info dictionaries
PER_CHANNEL_MAX_KEY = "per_channel_max"
PER_CHANNEL_MIN_KEY = "per_channel_min"
GLOBAL_MAX_KEY = "global_max"
GLOBAL_MIN_KEY = "global_min"
# keys for return dict of recommendations
RECOMMENDED_KEY = "input_weight_equalization_recommended"
COMP_METRIC_KEY = "input_weight_channel_comparison_metrics"
THRESHOLD_KEY = "input_weight_threshold"
CHANNEL_KEY = "input_weight_channel_axis"
# default weight and info strings
WEIGHT_STR = "weight"
INPUT_STR = "input"
# default for what ratio we recommend input weight
DEFAULT_RECOMMEND_INPUT_WEIGHT_CHANNEL_RATIO = 0.4
def __init__(self, ratio_threshold: float, ch_axis: int = 1):
# ensure passed in inputs are valid
if ratio_threshold <= 0 or ratio_threshold >= 1:
raise ValueError("Make sure threshold is > 0 and < 1")
# intialize attributes based on args
self.ratio_threshold: float = ratio_threshold
self.ch_axis: int = ch_axis
def _is_supported(self, module: nn.Module, insert: bool = False) -> bool:
r"""Returns whether the given module is supported for observers
Args
module: The module to check and ensure is supported
insert: True if this is check for observer insertion, false if for report gen
Returns True if the module is supported by observer, False otherwise
"""
# check to see if module is of a supported type
is_supported_type = sum(list(map(lambda x: type(module) is x, self.SUPPORTED_MODULES))) > 0
# this is check for observer insertion
if insert:
return is_supported_type
else:
# this is for report gen and we also need to check if it contains observers
has_obs = hasattr(module, self.DEFAULT_PRE_OBSERVER_NAME)
return is_supported_type and has_obs
def determine_observer_insert_points(self, prepared_fx_model: GraphModule) -> Dict[str, Dict[str, Any]]:
r"""Determines where observers need to be inserted for the Input Weight Equalization Detector.
For this detector, we want to place observers in front of supported layers.
Currently inserts observers for:
linear layers
conv layers
Args:
prepared_fx_model (GraphModule): The prepared Fx GraphModule
Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict with:
key "target_node" -> the node we are trying to observe with this observer (torch.fx.node.Node)
key "observer_to_insert" -> the observer we wish to insert (ObserverBase)
key "is_post_observer" -> True if this is meant to be a post-observer for target_node, False if pre-observer
key "observer_args" -> The arguments that are meant to be passed into the observer
"""
# observer for this detector is ModelReportObserver
obs_ctr = ModelReportObserver
# return dict
obs_fqn_to_info: Dict[str, Dict[str, Any]] = {}
for fqn, module in prepared_fx_model.named_modules():
# check to see if module is of a supported type
if self._is_supported(module, insert=True):
# if it's a supported type, we want to get node and add observer insert locations
targeted_node = self._get_targeting_node(prepared_fx_model, fqn)
# add entry for pre-observer
pre_obs_fqn = fqn + "." + self.DEFAULT_PRE_OBSERVER_NAME
obs_fqn_to_info[pre_obs_fqn] = {
DETECTOR_TARGET_NODE_KEY: targeted_node,
DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(ch_axis=self.ch_axis),
DETECTOR_IS_POST_OBS_KEY: False,
DETECTOR_OBS_ARGS_KEY: targeted_node.args,
}
return obs_fqn_to_info
def get_detector_name(self) -> str:
r"""Returns the name of this detector"""
return "input_weight_equalization_detector"
def _extract_input_info(self, model: GraphModule) -> Dict[str, Dict]:
r"""
Takes in a callibrated GraphModule and then finds the relevant observers.
It then extracts the input information for each observer returns it
Args
model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers
Returns a dict mapping relavent module fqns (str) to a dict with keys:
"input_activation_per_channel_max" : maps to the per_channel max values
"input_activation_per_channel_min" : maps to the per_channel min values
"input_activation_global_max" : maps to the global max recorded
"input_activation_global_min" : maps to the global min recorded
"""
# return dictionary mapping observer fqns to desired info
input_info: Dict[str, Dict] = {}
for fqn, module in model.named_modules():
# if module is supported and it has a pre-observer
if self._is_supported(module):
# get pre observer for the module
pre_obs = getattr(module, self.DEFAULT_PRE_OBSERVER_NAME)
input_info[fqn] = {
self.ACTIVATION_PREFIX + self.PER_CHANNEL_MAX_KEY: pre_obs.max_val,
self.ACTIVATION_PREFIX + self.PER_CHANNEL_MIN_KEY: pre_obs.min_val,
self.ACTIVATION_PREFIX + self.GLOBAL_MAX_KEY: max(pre_obs.max_val),
self.ACTIVATION_PREFIX + self.GLOBAL_MIN_KEY: min(pre_obs.min_val),
}
return input_info
def _extract_weight_info(self, model: GraphModule) -> Dict[str, Dict]:
r"""
Takes in a callibrated GraphModule and then finds the relavent observers.
It then extracts the weight information for each layer an observer is attached to.
Args
model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers
Returns a dict mapping module fqns (str) to a dict with keys:
"per_channel_max" : maps to the per_channel max values
"per_channel_min" : maps to the per_channel min values
"global_max" : maps to the global max recorded
"global_min" : maps to the global min recorded
"""
# return dictionary mapping observer fqns to desired info
weight_info: Dict[str, Dict] = {}
for fqn, module in model.named_modules():
# if module is supported and it has a pre-observer
if self._is_supported(module):
# we don't need actual observer, just the module weights
# calculate min and max vals
min_val: torch.Tensor = torch.tensor([float('inf')])
max_val: torch.Tensor = torch.tensor([float('-inf')])
x_copy = module.weight
x_dim = x_copy.size()
new_axis_list = [i for i in range(len(x_dim))] # noqa: C416
new_axis_list[self.ch_axis] = 0
new_axis_list[0] = self.ch_axis
y = x_copy.permute(new_axis_list)
# Need to match dtype of min/max because the updates to buffers
# are done in place and types need to match for comparisons
y = y.to(min_val.dtype)
y = torch.flatten(y, start_dim=1)
if min_val.numel() == 0 or max_val.numel() == 0:
min_val, max_val = torch.aminmax(y, dim=1)
else:
min_val_cur, max_val_cur = torch.aminmax(y, dim=1)
min_val = torch.min(min_val_cur, min_val)
max_val = torch.max(max_val_cur, max_val)
weight_info[fqn] = {
self.WEIGHT_PREFIX + self.PER_CHANNEL_MAX_KEY: max_val,
self.WEIGHT_PREFIX + self.PER_CHANNEL_MIN_KEY: min_val,
self.WEIGHT_PREFIX + self.GLOBAL_MAX_KEY: max(max_val),
self.WEIGHT_PREFIX + self.GLOBAL_MIN_KEY: min(min_val),
}
return weight_info
def _calculate_range_ratio(self, info_dict: Dict, info_str: str, module_fqn: str) -> torch.Tensor:
r"""
Takes in an info dict and calculates the s_c matrix.
Args:
info_dict (dict): A dictionary of either input or weight range info
info_str (str): A str describing whether currently looking at weight or input info
Either "weight" or "input"
module_fqn (str): The fqn of the module we are looking at
Returns a tensor of values, where each value is the s_c stat for a different channel
"""
# calculate the ratios of the info
# get the prefix str
prefix_str = self.ACTIVATION_PREFIX if info_str == self.INPUT_STR else self.WEIGHT_PREFIX
per_channel_range = info_dict[prefix_str + self.PER_CHANNEL_MAX_KEY] - info_dict[prefix_str + self.PER_CHANNEL_MIN_KEY]
global_range = info_dict[prefix_str + self.GLOBAL_MAX_KEY] - info_dict[prefix_str + self.GLOBAL_MIN_KEY]
if global_range == 0:
range_zero_explanation = "We recommend removing this channel as it doesn't provide any useful information."
raise ValueError(
"The range of the {} data for module {} is 0, which means you have a constant value channel. {}".format(
info_str, module_fqn, range_zero_explanation
)
)
ratio = per_channel_range / global_range
return ratio
def _generate_comparision_values(self, input_info: Dict, weight_info: Dict) -> Dict[str, torch.Tensor]:
r"""
Takes in the information on the min and max values of the inputs and weights and:
Calculates the comp stat for each channel: s_c = sqrt(w_c/W)/sqrt(i_c/I)
Args:
input_info (dict): A dict mapping each observer to input range information
weight_info (dict): A dict mapping each observer to weight range information
Returns a dict mapping relavent observer fqns (str) to a 1-D tensor.
Each value is a different s_c value for a different channel
"""
# create return dictionary for each observer
module_fqn_to_channel: Dict[str, torch.Tensor] = {}
# for each module (both passed in dicts should have same keys)
for module_fqn in input_info:
# raise error if not in weight info
if module_fqn not in weight_info:
raise KeyError("Unable to find weight range stats for module {}".format(module_fqn))
# calculate the ratios of the weight info and input info
weight_ratio = self._calculate_range_ratio(weight_info[module_fqn], self.WEIGHT_STR, module_fqn)
input_ratio = self._calculate_range_ratio(input_info[module_fqn], self.INPUT_STR, module_fqn)
# if mismatched size, because of grouping, we want to replicate weight enough times
weight_channels = len(weight_ratio)
input_channels = len(input_ratio)
if weight_channels != input_channels:
# we try to replicate
assert input_channels % weight_channels == 0, "input channels should be divisible by weight channels."
# get replication factor
rep_factor: int = input_channels // weight_channels
# weight ratio is (n,), input ratio is (k,), we just repeat weight ratio k // n
weight_ratio = weight_ratio.repeat(rep_factor)
# calculate the s metric per channel
s = torch.sqrt(weight_ratio) / torch.sqrt(input_ratio)
module_fqn_to_channel[module_fqn] = s
# return compiled observer ratios
return module_fqn_to_channel
def _generate_dict_info(self, input_info: Dict, weight_info: Dict, comp_stats: Dict) -> Dict[str, Dict]:
r"""
Helper function for generate_detector_report that does the generation of the dictionary.
This process is done as specified in generate_detector_report documentation
Args:
input_info (dict): A dict mapping each module to input range information
weight_info (dict): A dict mapping each module to weight range information
comp_stats (dict): A dict mapping each module to its corresponding comp stat
Returns a dictionary mapping each module with relavent ModelReportObservers around them to:
whether input weight equalization is recommended
their s_c metric compared to the threshold
the threshold used to make the recommendation
the channel used for recording data
the input channel range info
the weight channel range info
"""
# store modules input weight equalization info
input_weight_equalization_info: Dict[str, Dict] = {}
# for each module we add separate set of suggestions
for module_fqn in input_info:
# get relavent info for this module
mod_input_info: Dict = input_info[module_fqn]
mod_weight_info: Dict = weight_info[module_fqn]
mod_comp_stat: Dict = comp_stats[module_fqn]
# decide if each channel should have input weight equalization or not
channel_rec_vals: list = []
for val in mod_comp_stat:
float_rep: float = val.item()
# decide if recommending input weight equalization
recommended: bool = float_rep >= self.ratio_threshold and float_rep <= 1 / self.ratio_threshold
channel_rec_vals.append(recommended)
# build the return dict input
# also unpack input and weight dicts into it
input_weight_equalization_info[module_fqn] = {
self.RECOMMENDED_KEY: channel_rec_vals,
self.COMP_METRIC_KEY: mod_comp_stat,
self.THRESHOLD_KEY: self.ratio_threshold,
self.CHANNEL_KEY: self.ch_axis,
**mod_input_info,
**mod_weight_info,
}
# return our compiled info for each module
return input_weight_equalization_info
def generate_detector_report(self, model: GraphModule) -> Tuple[str, Dict[str, Any]]:
r"""
Determines whether input weight equalization is appropriate for a given module.
Takes advantage of the ModelReport Observer which records per channel information of input range
It then uses the passed in weight info inconjunction to compute the desired ratio
Finally, it gives suggestions based on this information for each module of interest
Args:
model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers
weight_info (Dict): Maps modules of interest to information on their weights to be analyzed
Returns a tuple with two elements:
String report of of whether input weight equalization is recommended for certain modules
Dictionary mapping modules of interest to:
whether input weight equalization is recommended
their s_c metric compared to the threshold
the threshold used to make the recommendation
the channel used for recording data
the input channel range info
the weight channel range info
"""
# find the range of inputs
input_values: Dict[str, Dict] = self._extract_input_info(model)
# find the range of weights
weight_values: Dict[str, Dict] = self._extract_weight_info(model)
# calculate per_channel comparision statistic s_c
comp_stats: Dict[str, torch.Tensor] = self._generate_comparision_values(input_values, weight_values)
# generate the return dictionary
input_weight_equalization_info: Dict[str, Dict] = self._generate_dict_info(input_values, weight_values, comp_stats)
# now we can generate report based on this information
input_weight_string = "Input-Weight Equalization suggestions: \n"
# some strings to be formatted depending on module we are adding
module_suggestion_str = "For Module {} looked at with axis {}: \n"
channel_suggestion_str = "\tWe suggest {} input weight equalization because {}\n"
use_str = "to use"
no_use_str = "to not use"
input_weight_benefit_str = "{}/{} channels would benefit and we expect significant reduction in quantization error."
input_weight_non_benefit_reasoning = "{}/{} channels benefitting from input-weight equalization being applied."
input_weight_non_benefit_str = "we don't expect much improvement from input-weight equalization based on {}"
# added module check
added_module: bool = False
# compile the suggestion string
for module_fqn in input_weight_equalization_info:
# we added at least 1 module
added_module = True
# add the module level description
input_weight_string += module_suggestion_str.format(module_fqn, self.ch_axis)
mod_info: Dict[str, Any] = input_weight_equalization_info[module_fqn]
# gather info on how many channels would benefit from input weight and
recommendation_per_channel: torch.Tensor = mod_info[self.RECOMMENDED_KEY]
num_recs = sum(recommendation_per_channel)
if num_recs / len(recommendation_per_channel) >= self.DEFAULT_RECOMMEND_INPUT_WEIGHT_CHANNEL_RATIO:
input_benefit_formatted = input_weight_benefit_str.format(num_recs, len(recommendation_per_channel))
channel_str = channel_suggestion_str.format(use_str, input_benefit_formatted)
input_weight_string += channel_str
else:
non_benefit_reason_formatted = input_weight_non_benefit_reasoning.format(num_recs, len(recommendation_per_channel))
non_benefit_str = input_weight_non_benefit_str.format(non_benefit_reason_formatted)
channel_str = channel_suggestion_str.format(no_use_str, non_benefit_str)
input_weight_string += channel_str
# if no modules looked at, amend return string
if not added_module:
input_weight_string += "No applicable layers for suggestions. Only linear and conv valid.\n"
# return a tuple with the string explanation and the compiled dict info
return (input_weight_string, input_weight_equalization_info)
class OutlierDetector(DetectorBase):
r"""
Determines whether there are significant outliers in activation data around a certain layer.
This is ideally used in conjunction with information on stationary vs. non-stationary distribution:
If the data is stationary, and there are significant outliers, then we want to flag them
We want to do this on a per channel basis for detecting outliers
Determines whether activation data is flagged as outlier based on if data is stationary and:
p_r = avg(100th percentile / "reference_percentile"th percentile)
where:
p_r is average percentile ratio across all batches in the epoch
reference_percentile is a percentile values between 0 and 100 exclusive
if p_r is above some threshold, then we consider the activations to have significant outliers
Args:
ratio_threshold (float, optional): The threshold for p_r to determine if there are outliers in activations
Should be >= 1
Default: 3.5
reference_percentile (float, optional): The denominator to find the relative scale of the 100th percentile
Should be between 0 and 1
Default: 0.975
fraction_batches_used_threshold (float, optional): Threshold of fraction of batches per channel to determine outlier
If fraction is below this, we deem number of samples used to calculate outliers as insignificant and alert user
regardless of whether we detected outliers or not in channel to take a closer look at channel results
Should be between 0 and 1
Default: 0.95
ch_axis (int, optional): The channel axis being observed to determine input weight equalization
Default: 1
* :attr:`ratio_threshold`: The threshold for p_r to determine if there are outliers in activations
The p_r value (average ratio of 100th percentile/reference_percentile) is compared to ratio_threshold
If it is significantly greater, then we consider it an outlier
This threshold was calculated based on the ratio of the percentiles in a normal distribution
The calculations behind value choice: https://drive.google.com/file/d/1N2wdtXWI-kOH8S7HH4-PYB_NmqzZil4p/view?usp=sharing
* :attr:`reference_percentile`: The denominator of the top fraction to find the relative scale of the 100th percentile
Should be between 0 and 1
The calculations behind value choice: https://drive.google.com/file/d/1N2wdtXWI-kOH8S7HH4-PYB_NmqzZil4p/view?usp=sharing
* :attr:`fraction_batches_used_threshold`: The fraction of batches to determine outliers for each channel should be above this
Some batches may not be used because of 0-based errors, so this is to ensure a good amount of the total batches are used
Should be between 0 and 1
* :attr:`ch_axis`: The channel axis being observed to determine outliers
* :attr:`DEFAULT_PRE_OBSERVER_NAME`: The name of the pre-observer to be inserted for this detector
"""
# names for the pre observers that are inserted
DEFAULT_PRE_OBSERVER_NAME: str = "model_report_pre_observer"
# pre activation prefix
INPUT_ACTIVATION_PREFIX = "input_activation_"
# names for dict keys
OUTLIER_KEY = "outliers_detected"
NUM_BATCHES_KEY = "outlier_detection_batches_used"
IS_SUFFICIENT_BATCHES_KEY = "outlier_detection_is_sufficient_batches"
COMP_METRIC_KEY = "outlier_detection_percentile_ratios"
RATIO_THRES_KEY = "outlier_detection_ratio_threshold"
REF_PERCENTILE_KEY = "outlier_detection_reference_percentile"
CHANNEL_AXIS_KEY = "outlier_detection_channel_axis"
MAX_VALS_KEY = INPUT_ACTIVATION_PREFIX + "per_channel_max"
CONSTANT_COUNTS_KEY = "constant_batch_counts"
def __init__(
self,
ratio_threshold: float = 3.5,
reference_percentile: float = 0.975,
fraction_batches_used_threshold: float = 0.95,
ch_axis: int = 1,
):
# initialize the variables of interest
self.ratio_threshold = ratio_threshold
# make sure passed in percentile is valid
assert reference_percentile >= 0 and reference_percentile <= 1
assert fraction_batches_used_threshold >= 0 and fraction_batches_used_threshold <= 1
self.reference_percentile = reference_percentile
self.fraction_batches_used_threshold = fraction_batches_used_threshold
self.ch_axis = ch_axis
def get_detector_name(self) -> str:
r"""Returns the name of this detector"""
return "outlier_detector"
def _supports_insertion(self, module: nn.Module) -> bool:
r"""Returns whether the given module is supported for observers insertion
Any module that doesn't have children and isn't an observer itself is supported
Args
module: The module to check and ensure is supported
Returns True if the module is supported by observer, False otherwise
"""
# case for insertion of module
# check if the module has any children and isn't observer
num_children = len(list(module.children()))
return num_children == 0 and not is_activation_post_process(module)
def _supports_report_gen(self, module: nn.Module) -> bool:
r"""Returns whether the given module is supported for report generation
Any module that has a model report pre-observer is supported
Args
module: The module to check and ensure is supported
Returns True if the module is supported by observer, False otherwise
"""
return hasattr(module, self.DEFAULT_PRE_OBSERVER_NAME)
def determine_observer_insert_points(self, prepared_fx_model: GraphModule) -> Dict[str, Dict[str, Any]]:
r""" Determines where observers need to be inserted for the Outlier Detector.
For this detector, we want to place observers in front of supported layers.
Currently inserts observers for:
all layers that do not have children (leaf level layers)
Args:
prepared_fx_model (GraphModule): The prepared Fx GraphModule
Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict with:
key "target_node" -> the node we are trying to observe with this observer (torch.fx.node.Node)
key "observer_to_insert" -> the observer we wish to insert (ObserverBase)
key "is_post_observer" -> True if this is meant to be a post-observer for target_node, False if pre-observer
key "observer_args" -> The arguments that are meant to be passed into the observer
"""
# observer for this detector is ModelReportObserver
obs_ctr = ModelReportObserver
# return dict
obs_fqn_to_info: Dict[str, Dict[str, Any]] = {}
for fqn, module in prepared_fx_model.named_modules():
# check to see if module is of a supported type
if self._supports_insertion(module):
# if it's a supported type, we want to get node and add observer insert locations
targeted_node = self._get_targeting_node(prepared_fx_model, fqn)
# add entry for pre-observer
pre_obs_fqn = fqn + "." + self.DEFAULT_PRE_OBSERVER_NAME
obs_fqn_to_info[pre_obs_fqn] = {
DETECTOR_TARGET_NODE_KEY: targeted_node,
DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(ch_axis=self.ch_axis, comp_percentile=self.reference_percentile),
DETECTOR_IS_POST_OBS_KEY: False,
DETECTOR_OBS_ARGS_KEY: targeted_node.args,
}
return obs_fqn_to_info
def _calculate_outlier_info(
self,
percentile_ratios: torch.Tensor,
counted_batches: torch.Tensor,
total_batches: int,
) -> Dict[str, List[bool]]:
r"""
Gives info on whether the percentile ratios cacluated would be considered outliers
Also gives information on whether the collected data is statistically significant to make this claim
Args:
percentile_ratios (torch.Tensor): The average percentile_ratios per channel calculated by the observer
counted_batches (torch.Tensor): The number of batches used for average calculation per tensor
total_batches (int): The total number of batches that passed through observer in this epoch
Returns a dictionary mapping:
"outliers_detected" : list of bools per channel that are true if it is considered an outlier
"is_sufficient_batches": if o_r was >= fraction_batches_used_threshold:
where o_r = counted_batches / total_batches
"""
outlier_dict: Dict[str, List[bool]] = {self.OUTLIER_KEY: [], self.IS_SUFFICIENT_BATCHES_KEY: []}
# get both as flattened lists for easy mapping
ratios_list: List = percentile_ratios.tolist()
num_batches_list: List = counted_batches.tolist()
# calculate whether channels were statistically significant
significant_size = [
batch_size / total_batches >= self.fraction_batches_used_threshold for batch_size in num_batches_list
]
outlier_dict[self.IS_SUFFICIENT_BATCHES_KEY] = significant_size
# calculate for each channel whether it's an outlier or not based on ratio
outlier_detected = [ratio > self.ratio_threshold for ratio in ratios_list]
outlier_dict[self.OUTLIER_KEY] = outlier_detected
# return the dictionary with the two lists
return outlier_dict
def _generate_info_dict(self, model: GraphModule) -> Dict[str, Dict]:
r"""
Helper function for generate_detector_report that does the generation of the dictionary.
This process is done as specified in generate_detector_report documentation
Args:
model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers
Returns a dict mapping relavent module fqns to:
whether there were outliers found in activation before
the number of batches used for each channel
whether fraction of applicable batches used is above fraction_batches_used_threshold
their p_r metric compared to the threshold
the threshold used to make the recommendation
the reference_percentile used to make the recommendation
the channel axis used to determine individual channels
the constant batch counts per channel
the per channel max values
"""
# return dictionary mapping observer fqns to desired info
info_dict: Dict[str, Dict] = {}
for fqn, module in model.named_modules():
# if module is supported and it has a pre-observer
if self._supports_report_gen(module):
# get pre observer for the module
pre_obs: ModelReportObserver = getattr(module, self.DEFAULT_PRE_OBSERVER_NAME)
# get the number of batches and calculated ratio thresholds
num_batches: torch.Tensor = pre_obs.percentile_batches_tracked
average_ratios: torch.Tensor = pre_obs.average_percentile_ratio
channel_batch_cnts: torch.Tensor = pre_obs.constant_channels
total_batches: int = pre_obs.num_batches_tracked
# also get the max values
max_vals: torch.Tensor = pre_obs.max_val
# we have to specifically modify how we are recording negative ratio for pre-relu layers
for index, ratio_val in enumerate(average_ratios):
# check if we have a negative ratio
# a ratio might be negative if we have a situation where the 100th percentile is
# > 0 while the nth percentile is < 0, in which case this would not be detected
# as an outlier. Since we care more about magnitude, we make it positive.
if ratio_val.item() < 0:
# first make it positive
average_ratios[index] = -ratio_val
if ratio_val.item() < 1:
# if it's less than 1 we have the flip it as well
average_ratios[index] = 1 / ratio_val
outlier_calcs = self._calculate_outlier_info(average_ratios, num_batches, total_batches)
# calculate whether ratios were outliers
info_dict[fqn] = {
self.CHANNEL_AXIS_KEY: self.ch_axis,
self.REF_PERCENTILE_KEY: self.reference_percentile,
self.RATIO_THRES_KEY: self.ratio_threshold,
self.COMP_METRIC_KEY: average_ratios,
self.NUM_BATCHES_KEY: num_batches,
self.OUTLIER_KEY: outlier_calcs[self.OUTLIER_KEY],
self.IS_SUFFICIENT_BATCHES_KEY: outlier_calcs[self.IS_SUFFICIENT_BATCHES_KEY],
self.CONSTANT_COUNTS_KEY: channel_batch_cnts,
self.MAX_VALS_KEY: max_vals
}
return info_dict
def generate_detector_report(self, model: GraphModule) -> Tuple[str, Dict[str, Any]]:
r"""
Determines whether input weight equalization is appropriate for a given module.
Takes advantage of the ModelReport Observer which records the relavent percentile information
Args:
model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers
Returns a tuple with two elements:
String report of of whether there are outliers in the activations around certain modules
Dictionary mapping modules of interest to:
whether there were outliers found in activation before
the number of batches used for each channel
whether fraction of applicable batches used is above fraction_batches_used_threshold
their p_r metric compared to the threshold
the threshold used to make the recommendation
the reference_percentile used to make the recommendation
the channel axis used to determine individual channels
the constant batch counts per channel
the per channel max values
"""
# generate the information dictionary of outlier information
info_dict = self._generate_info_dict(model)
# now we can generate report based on this information
outlier_string = "Outlier detection report: \n"
# added module check
added_module: bool = False
# some strings to be formatted depending on module we are adding
module_suggestion_str = "For Module {} looked at with axis {}: \n"
channel_suggestion_str = "\tFor channel {}, we found outliers in the preceding activation data with {}.\n"
channel_max_value_str = "a max value across all batches of {}"
note_string = "Note: outlier detection is only reliable for {}. We recommend {} to ensure the most accurate results."
note_distribution = "stationary distributions"
note_rec = "running the static vs. dynamic detector to ensure activation data before modules above is stationary"
# suggestion for constant batch check since that can make it no outliers
constant_str = "\tFor channel {}, we found {} constant value batches. {}\n"
constant_suggestion = "We recommend taking a look at the dict and data to see how frequent this occured and why."
# compile the suggestion string
for module_fqn in info_dict:
# get module specific info
mod_info: Dict[str, Any] = info_dict[module_fqn]
# check to see if we already added high level model desc
added_model_desc = False
# look at each individual channel and add a suggestion
for index, outlier_detected in enumerate(mod_info[self.OUTLIER_KEY]):
if outlier_detected:
# we found at least 1 outlier
if not added_model_desc:
# add the module level description
outlier_string += module_suggestion_str.format(module_fqn, self.ch_axis)
added_model_desc = True
# we mark that we found at least one outlier
added_module = True
max_value_found_str = channel_max_value_str.format(mod_info[self.MAX_VALS_KEY][index])
channel_str = channel_suggestion_str.format(index, max_value_found_str)
outlier_string += channel_str
# also check if we found constant batch
if mod_info[self.CONSTANT_COUNTS_KEY][index] != 0:
# make sure we add a module level highlight.
if not added_model_desc:
# add the module level description
outlier_string += module_suggestion_str.format(module_fqn, self.ch_axis)
added_model_desc = True
constant_values_for_channel = mod_info[self.CONSTANT_COUNTS_KEY][index]
formatted_str = constant_str.format(index, constant_values_for_channel, constant_suggestion)
outlier_string += formatted_str
# we also added at least one thing to description
added_module = True
# if found outlier, give suggestion, else give default response
if added_module:
# compose the note string
note_composed = note_string.format(note_distribution, note_rec)
outlier_string += note_composed
else:
outlier_string += "There were no outliers found in the activations.\n"
return (outlier_string, info_dict)
| pytorch-master | torch/ao/quantization/fx/_model_report/detector.py |
import torch
from typing import Any, Set, Dict, List, Tuple, OrderedDict
from collections import OrderedDict as OrdDict
# try to import tablate
got_tabulate = True
try:
from tabulate import tabulate
except ImportError:
got_tabulate = False
# var to see if we could import matplotlib
got_matplotlib = True
try:
import matplotlib.pyplot as plt
except ImportError:
got_matplotlib = False
class ModelReportVisualizer:
r"""
The ModelReportVisualizer class aims to provide users a way to visualize some of the statistics
that were generated by the ModelReport API. However, at a higher level, the class aims to provide
some level of visualization of statistics to PyTorch in order to make it easier to parse data and
diagnose any potential issues with data or a specific model. With respect to the visualizations,
the ModelReportVisualizer class currently supports several methods of visualizing data.
Supported Visualization Methods Include:
- Table format
- Plot format (line graph)
- Histogram format
For all of the existing visualization methods, there is the option to filter data based on:
- A module fqn prefix
- Feature [required for the plot and histogram]
* :attr:`generated_reports` The reports generated by the ModelReport class in the structure below
Ensure sure that features that are the same across different report contain the same name
Ensure that objects representing the same features are the same type / dimension (where applicable)
Note:
Currently, the ModelReportVisualizer class supports visualization of data generated by the
ModelReport class. However, this structure is extensible and should allow the visualization of
other information as long as the information is structured in the following general format:
Report Structure
-- module_fqn [module with attached detectors]
|
-- feature keys [not every detector extracts same information]
[same collected info has same keys, unless can be specific to detector]
The goal behind the class is that the generated visualizations can be used in conjunction with the generated
report for people to get a better understanding of issues and what the fix might be. It is also just to provide
a good visualization platform, since it might be hard to parse through the ModelReport returned dictionary as
that grows in size.
General Use Flow Expected
1.) Initialize ModelReport object with reports of interest by passing in initialized detector objects
2.) Prepare your model with prepare_fx
3.) Call model_report.prepare_detailed_calibration on your model to add relavent observers
4.) Callibrate your model with data
5.) Call model_report.generate_report on your model to generate report and optionally remove added observers
6.) Use output of model_report.generate_report to initialize ModelReportVisualizer instance
7.) Use instance to view different views of data as desired, applying filters as needed
8.) Either see the super detailed information or just the actual printed or shown table / plot / histogram
"""
# keys for table dict
TABLE_TENSOR_KEY = "tensor_level_info"
TABLE_CHANNEL_KEY = "channel_level_info"
# Constants for header vals
NUM_NON_FEATURE_TENSOR_HEADERS = 2
NUM_NON_FEATURE_CHANNEL_HEADERS = 3
# Constants for row index in header
CHANNEL_NUM_INDEX = 2
def __init__(self, generated_reports: OrderedDict[str, Any]):
r"""
Initializes the ModelReportVisualizer instance with the necessary reports.
Args:
generated_reports (Dict[str, Any]): The reports generated by the ModelReport class
can also be a dictionary generated in another manner, as long as format is same
"""
self.generated_reports = generated_reports
def get_all_unique_module_fqns(self) -> Set[str]:
r"""
The purpose of this method is to provide a user the set of all module_fqns so that if
they wish to use some of the filtering capabilities of the ModelReportVisualizer class,
they don't need to manually parse the generated_reports dictionary to get this information.
Returns all the unique module fqns present in the reports the ModelReportVisualizer
instance was initialized with.
"""
# returns the keys of the ordered dict
return set(self.generated_reports.keys())
def get_all_unique_feature_names(self, plottable_features_only: bool = True) -> Set[str]:
r"""
The purpose of this method is to provide a user the set of all feature names so that if
they wish to use the filtering capabilities of the generate_table_view(), or use either of
the generate_plot_view() or generate_histogram_view(), they don't need to manually parse
the generated_reports dictionary to get this information.
Args:
plottable_features_only (bool): True if the user is only looking for plottable features,
False otherwise
plottable features are those that are tensor values
Default: True (only return those feature names that are plottable)
Returns all the unique module fqns present in the reports the ModelReportVisualizer
instance was initialized with.
"""
unique_feature_names = set()
for module_fqn in self.generated_reports:
# get dict of the features
feature_dict: Dict[str, Any] = self.generated_reports[module_fqn]
# loop through features
for feature_name in feature_dict:
# if we need plottable, ensure type of val is tensor
if not plottable_features_only or type(feature_dict[feature_name]) == torch.Tensor:
unique_feature_names.add(feature_name)
# return our compiled set of unique feature names
return unique_feature_names
def _get_filtered_data(self, feature_filter: str, module_fqn_filter: str) -> OrderedDict[str, Any]:
r"""
Filters the data and returns it in the same ordered dictionary format so the relavent views can be displayed.
Args:
feature_filter (str): The feature filter, if we want to filter the set of data to only include
a certain set of features that include feature_filter
If feature = "", then we do not filter based on any features
module_fqn_filter (str): The filter on prefix for the module fqn. All modules that have fqn with
this prefix will be included
If module_fqn_filter = "" we do not filter based on module fqn, and include all modules
First, the data is filtered based on module_fqn, and then filtered based on feature
Returns an OrderedDict (sorted in order of model) mapping:
module_fqns -> feature_names -> values
"""
# create return dict
filtered_dict: OrderedDict[str, Any] = OrdDict()
for module_fqn in self.generated_reports:
# first filter based on module
if module_fqn_filter == "" or module_fqn_filter in module_fqn:
# create entry for module and loop through features
filtered_dict[module_fqn] = {}
module_reports = self.generated_reports[module_fqn]
for feature_name in module_reports:
# check if filtering on features and do so if desired
if feature_filter == "" or feature_filter in feature_name:
filtered_dict[module_fqn][feature_name] = module_reports[feature_name]
# we have populated the filtered dict, and must return it
return filtered_dict
def _generate_tensor_table(
self,
filtered_data: OrderedDict[str, Dict[str, Any]],
tensor_features: List[str]
) -> Tuple[List, List]:
r"""
Takes in the filtered data and features list and generates the tensor headers and table
Currently meant to generate the headers and table for both the tensor information.
Args:
filtered_data (OrderedDict[str, Dict[str, Any]]): An OrderedDict (sorted in order of model) mapping:
module_fqns -> feature_names -> values
tensor_features (List[str]): A list of the tensor level features
Returns a tuple with:
A list of the headers of the tensor table
A list of lists containing the table information row by row
The 0th index row will contain the headers of the columns
The rest of the rows will contain data
"""
# now we compose the tensor information table
tensor_table: List[List[Any]] = []
tensor_headers: List[str] = []
# append the table row to the table only if we have features
if len(tensor_features) > 0:
# now we add all the data
for index, module_fqn in enumerate(filtered_data):
# we make a new row for the tensor table
tensor_table_row = [index, module_fqn]
for feature in tensor_features:
# we iterate in same order of added features
if feature in filtered_data[module_fqn]:
# add value if applicable to module
feature_val = filtered_data[module_fqn][feature]
else:
# add that it is not applicable
feature_val = "Not Applicable"
# if it's a tensor we want to extract val
if isinstance(feature_val, torch.Tensor):
feature_val = feature_val.item()
# we add to our list of values
tensor_table_row.append(feature_val)
tensor_table.append(tensor_table_row)
# add row of headers of we actually have something, otherwise just empty
if len(tensor_table) != 0:
tensor_headers = ["idx", "layer_fqn"] + tensor_features
return (tensor_headers, tensor_table)
def _generate_channels_table(
self,
filtered_data: OrderedDict[str, Any],
channel_features: List[str],
num_channels: int
) -> Tuple[List, List]:
r"""
Takes in the filtered data and features list and generates the channels headers and table
Currently meant to generate the headers and table for both the channels information.
Args:
filtered_data (OrderedDict[str, Any]): An OrderedDict (sorted in order of model) mapping:
module_fqns -> feature_names -> values
channel_features (List[str]): A list of the channel level features
num_channels (int): Number of channels in the channel data
Returns a tuple with:
A list of the headers of the channel table
A list of lists containing the table information row by row
The 0th index row will contain the headers of the columns
The rest of the rows will contain data
"""
# now we compose the table for the channel information table
channel_table: List[List[Any]] = []
channel_headers: List[str] = []
# counter to keep track of number of entries in
channel_table_entry_counter: int = 0
if len(channel_features) > 0:
# now we add all channel data
for index, module_fqn in enumerate(filtered_data):
# we iterate over all channels
for channel in range(num_channels):
# we make a new row for the channel
new_channel_row = [channel_table_entry_counter, module_fqn, channel]
for feature in channel_features:
if feature in filtered_data[module_fqn]:
# add value if applicable to module
feature_val = filtered_data[module_fqn][feature][channel]
else:
# add that it is not applicable
feature_val = "Not Applicable"
# if it's a tensor we want to extract val
if type(feature_val) is torch.Tensor:
feature_val = feature_val.item()
# add value to channel specific row
new_channel_row.append(feature_val)
# add to table and increment row index counter
channel_table.append(new_channel_row)
channel_table_entry_counter += 1
# add row of headers of we actually have something, otherwise just empty
if len(channel_table) != 0:
channel_headers = ["idx", "layer_fqn", "channel"] + channel_features
return (channel_headers, channel_table)
def generate_filtered_tables(self, feature_filter: str = "", module_fqn_filter: str = "") -> Dict[str, Tuple[List, List]]:
r"""
Takes in optional filter values and generates two tables with desired information.
The generated tables are presented in both a list-of-lists format
The reason for the two tables are that they handle different things:
1.) the first table handles all tensor level information
2.) the second table handles and displays all channel based information
The reasoning for this is that having all the info in one table can make it ambiguous which collected
statistics are global, and which are actually per-channel, so it's better to split it up into two
tables. This also makes the information much easier to digest given the plethora of statistics collected
Tensor table columns:
idx layer_fqn feature_1 feature_2 feature_3 .... feature_n
---- --------- --------- --------- --------- ---------
Per-Channel table columns:
idx layer_fqn channel feature_1 feature_2 feature_3 .... feature_n
---- --------- ------- --------- --------- --------- ---------
Args:
feature_filter (str, optional): Filters the features presented to only those that
contain this filter substring
Default = "", results in all the features being printed
module_fqn_filter (str, optional): Only includes modules that contains this string
Default = "", results in all the modules in the reports to be visible in the table
Returns a dictionary with two keys:
(Dict[str, Tuple[List, List]]) A dict containing two keys:
"tensor_level_info", "channel_level_info"
Each key maps to a tuple with:
A list of the headers of each table
A list of lists containing the table information row by row
The 0th index row will contain the headers of the columns
The rest of the rows will contain data
Example Use:
>>> # xdoctest: +SKIP("undefined variables")
>>> mod_report_visualizer.generate_filtered_tables(
... feature_filter = "per_channel_min",
... module_fqn_filter = "block1"
... ) # generates table with per_channel_min info for all modules in block 1 of the model
"""
# first get the filtered data
filtered_data: OrderedDict[str, Any] = self._get_filtered_data(feature_filter, module_fqn_filter)
# now we split into tensor and per-channel data
tensor_features: Set[str] = set()
channel_features: Set[str] = set()
# keep track of the number of channels we have
num_channels: int = 0
for module_fqn in filtered_data:
for feature_name in filtered_data[module_fqn]:
# get the data for that specific feature
feature_data = filtered_data[module_fqn][feature_name]
# check if not zero dim tensor
is_tensor: bool = isinstance(feature_data, torch.Tensor)
is_not_zero_dim: bool = is_tensor and len(feature_data.shape) != 0
if is_not_zero_dim or isinstance(feature_data, list):
# works means per channel
channel_features.add(feature_name)
num_channels = len(feature_data)
else:
# means is per-tensor
tensor_features.add(feature_name)
# we make them lists for iteration purposes
tensor_features_list: List[str] = sorted(list(tensor_features))
channel_features_list: List[str] = sorted(list(channel_features))
# get the tensor info
tensor_headers, tensor_table = self._generate_tensor_table(filtered_data, tensor_features_list)
# get the channel info
channel_headers, channel_table = self._generate_channels_table(
filtered_data, channel_features_list, num_channels
)
# let's now create the dictionary to return
table_dict = {
self.TABLE_TENSOR_KEY : (tensor_headers, tensor_table),
self.TABLE_CHANNEL_KEY : (channel_headers, channel_table)
}
# return the two tables
return table_dict
def generate_table_visualization(self, feature_filter: str = "", module_fqn_filter: str = ""):
r"""
Takes in optional filter values and prints out formatted tables of the information.
The reason for the two tables printed out instead of one large one are that they handle different things:
1.) the first table handles all tensor level information
2.) the second table handles and displays all channel based information
The reasoning for this is that having all the info in one table can make it ambiguous which collected
statistics are global, and which are actually per-channel, so it's better to split it up into two
tables. This also makes the information much easier to digest given the plethora of statistics collected
Tensor table columns:
idx layer_fqn feature_1 feature_2 feature_3 .... feature_n
---- --------- --------- --------- --------- ---------
Per-Channel table columns:
idx layer_fqn channel feature_1 feature_2 feature_3 .... feature_n
---- --------- ------- --------- --------- --------- ---------
Args:
feature_filter (str, optional): Filters the features presented to only those that
contain this filter substring
Default = "", results in all the features being printed
module_fqn_filter (str, optional): Only includes modules that contains this string
Default = "", results in all the modules in the reports to be visible in the table
Example Use:
>>> # xdoctest: +SKIP("undefined variables")
>>> mod_report_visualizer.generate_table_visualization(
... feature_filter = "per_channel_min",
... module_fqn_filter = "block1"
... )
>>> # prints out neatly formatted table with per_channel_min info
>>> # for all modules in block 1 of the model
"""
# see if we got tabulate
if not got_tabulate:
print("Make sure to install tabulate and try again.")
return None
# get the table dict and the specific tables of interest
table_dict = self.generate_filtered_tables(feature_filter, module_fqn_filter)
tensor_headers, tensor_table = table_dict[self.TABLE_TENSOR_KEY]
channel_headers, channel_table = table_dict[self.TABLE_CHANNEL_KEY]
# get the table string and print it out
# now we have populated the tables for each one
# let's create the strings to be returned
table_str = ""
# the tables will have some headers columns that are non-feature
# ex. table index, module name, channel index, etc.
# we want to look at header columns for features, that come after those headers
if len(tensor_headers) > self.NUM_NON_FEATURE_TENSOR_HEADERS:
# if we have at least one tensor level feature to be addded we add tensor table
table_str += "Tensor Level Information \n"
table_str += tabulate(tensor_table, headers=tensor_headers)
if len(channel_headers) > self.NUM_NON_FEATURE_CHANNEL_HEADERS:
# if we have at least one channel level feature to be addded we add tensor table
table_str += "\n\n Channel Level Information \n"
table_str += tabulate(channel_table, headers=channel_headers)
# if no features at all, let user know
if table_str == "":
table_str = "No data points to generate table with."
print(table_str)
def _get_plottable_data(self, feature_filter: str, module_fqn_filter: str) -> Tuple[List, List[List], bool]:
r"""
Takes in the feature filters and module filters and outputs the x and y data for plotting
Args:
feature_filter (str): Filters the features presented to only those that
contain this filter substring
module_fqn_filter (str): Only includes modules that contains this string
Returns a tuple of three elements
The first is a list containing relavent x-axis data
The second is a list containing the corresponding y-axis data
If the data is per channel
"""
# get the table dict and the specific tables of interest
table_dict = self.generate_filtered_tables(feature_filter, module_fqn_filter)
tensor_headers, tensor_table = table_dict[self.TABLE_TENSOR_KEY]
channel_headers, channel_table = table_dict[self.TABLE_CHANNEL_KEY]
# make sure it is only 1 feature that is being plotted
# get the number of features in each of these
tensor_info_features_count = len(tensor_headers) - ModelReportVisualizer.NUM_NON_FEATURE_TENSOR_HEADERS
channel_info_features_count = len(channel_headers) - ModelReportVisualizer.NUM_NON_FEATURE_CHANNEL_HEADERS
# see if valid tensor or channel plot
is_valid_per_tensor_plot: bool = tensor_info_features_count == 1
is_valid_per_channel_plot: bool = channel_info_features_count == 1
# offset should either be one of tensor or channel table or neither
feature_column_offset = ModelReportVisualizer.NUM_NON_FEATURE_TENSOR_HEADERS
table = tensor_table
# if a per_channel plot, we have different offset and table
if is_valid_per_channel_plot:
feature_column_offset = ModelReportVisualizer.NUM_NON_FEATURE_CHANNEL_HEADERS
table = channel_table
x_data: List = []
y_data: List[List] = []
# the feature will either be a tensor feature or channel feature
if is_valid_per_tensor_plot:
for table_row_num, row in enumerate(table):
# get x_value to append
x_val_to_append = table_row_num
# the index of the feature will the 0 + num non feature columns
tensor_feature_index = feature_column_offset
row_value = row[tensor_feature_index]
if not type(row_value) == str:
x_data.append(x_val_to_append)
y_data.append(row_value)
elif is_valid_per_channel_plot:
# gather the x_data and multiple y_data
# calculate the number of channels
num_channels: int = max(row[self.CHANNEL_NUM_INDEX] for row in table) + 1
for channel in range(num_channels):
y_data.append([]) # separate data list per channel
for table_row_num, row in enumerate(table):
# get x_value to append
x_val_to_append = table_row_num
current_channel = row[self.CHANNEL_NUM_INDEX] # intially chose current channel
new_module_index: int = table_row_num // num_channels
x_val_to_append = new_module_index
# the index of the feature will the 0 + num non feature columns
tensor_feature_index = feature_column_offset
row_value = row[tensor_feature_index]
if not type(row_value) == str:
# only append if new index we are appending
if len(x_data) == 0 or x_data[-1] != x_val_to_append:
x_data.append(x_val_to_append)
# append value for that channel
y_data[current_channel].append(row_value)
else:
# more than one feature was chosen
error_str = "Make sure to pick only a single feature with your filter to plot a graph."
error_str += " We recommend calling get_all_unique_feature_names() to find unique feature names."
error_str += " Pick one of those features to plot."
raise ValueError(error_str)
# return x, y values, and if data is per-channel
return (x_data, y_data, is_valid_per_channel_plot)
def generate_plot_visualization(self, feature_filter: str, module_fqn_filter: str = ""):
r"""
Takes in a feature and optional module_filter and plots of the desired data.
For per channel features, it averages the value across the channels and plots a point
per module. The reason for this is that for models with hundreds of channels, it can
be hard to diffrentiate one channel line from another, and so the point of generating
a single average point per module is to give a sense of general trends that encourage
further deep dives.
Note:
Only features in the report that have tensor value data are plottable by this class
When the tensor information is plotted, it will plot:
idx as the x val, feature value as the y_val
When the channel information is plotted, it will plot:
the first idx of each module as the x val, feature value as the y_val [for each channel]
The reason for this is that we want to be able to compare values across the
channels for same layer, and it will be hard if values are staggered by idx
This means each module is represented by only 1 x value
Args:
feature_filter (str): Filters the features presented to only those that
contain this filter substring
module_fqn_filter (str, optional): Only includes modules that contains this string
Default = "", results in all the modules in the reports to be visible in the table
Example Use:
>>> # xdoctest: +SKIP("undefined variables")
>>> mod_report_visualizer.generate_plot_visualization(
... feature_filter = "per_channel_min",
... module_fqn_filter = "block1"
... )
>>> # outputs line plot of per_channel_min information for all
>>> # modules in block1 of model each channel gets it's own line,
>>> # and it's plotted across the in-order modules on the x-axis
"""
# checks if we have matplotlib and let's user know to install it if don't
if not got_matplotlib:
print("make sure to install matplotlib and try again.")
return None
# get the x and y data and if per channel
x_data, y_data, data_per_channel = self._get_plottable_data(feature_filter, module_fqn_filter)
# plot based on whether data is per channel or not
ax = plt.subplot()
ax.set_ylabel(feature_filter)
ax.set_title(feature_filter + " Plot")
plt.xticks(x_data) # only show ticks for actual points
if data_per_channel:
ax.set_xlabel("First idx of module")
# set the legend as well
# plot a single line that is average of the channel values
num_modules = len(y_data[0]) # all y_data have same length, so get num modules
num_channels = len(y_data) # we want num channels to be able to calculate average later
avg_vals = [sum(y_data[:][index]) / num_channels for index in range(num_modules)]
# plot the three things we measured
ax.plot(x_data, avg_vals, label="Average Value Across {} Channels".format(num_channels))
ax.legend(loc='upper right')
else:
ax.set_xlabel("idx")
ax.plot(x_data, y_data)
# actually show the plot
plt.show()
def generate_histogram_visualization(self, feature_filter: str, module_fqn_filter: str = "", num_bins: int = 10):
r"""
Takes in a feature and optional module_filter and plots the histogram of desired data.
Note:
Only features in the report that have tensor value data can be viewed as a histogram
If you want to plot a histogram from all the channel values of a specific feature for
a specific model, make sure to specify both the model and the feature properly
in the filters and you should be able to see a distribution of the channel data
Args:
feature_filter (str, optional): Filters the features presented to only those that
contain this filter substring
Default = "", results in all the features being printed
module_fqn_filter (str, optional): Only includes modules that contains this string
Default = "", results in all the modules in the reports to be visible in the table
num_bins (int, optional): The number of bins to create the histogram with
Default = 10, the values will be split into 10 equal sized bins
Example Use:
>>> # xdoctest: +SKIP
>>> mod_report_visualizer.generategenerate_histogram_visualization_plot_visualization(
... feature_filter = "per_channel_min",
... module_fqn_filter = "block1"
... )
# outputs histogram of per_channel_min information for all modules in block1 of model
information is gathered across all channels for all modules in block 1 for the
per_channel_min and is displayed in a histogram of equally sized bins
"""
# checks if we have matplotlib and let's user know to install it if don't
if not got_matplotlib:
print("make sure to install matplotlib and try again.")
return None
# get the x and y data and if per channel
x_data, y_data, data_per_channel = self._get_plottable_data(feature_filter, module_fqn_filter)
# for histogram, we just care about plotting the y data
# plot based on whether data is per channel or not
ax = plt.subplot()
ax.set_xlabel(feature_filter)
ax.set_ylabel("Frequency")
ax.set_title(feature_filter + " Histogram")
if data_per_channel:
# set the legend as well
# combine all the data
all_data = []
for index, channel_info in enumerate(y_data):
all_data.extend(channel_info)
val, bins, _ = plt.hist(
all_data,
bins=num_bins,
stacked=True,
rwidth=0.8,
)
plt.xticks(bins)
else:
val, bins, _ = plt.hist(
y_data,
bins=num_bins,
stacked=False,
rwidth=0.8,
)
plt.xticks(bins)
plt.show()
| pytorch-master | torch/ao/quantization/fx/_model_report/model_report_visualizer.py |
import torch
from torch.ao.quantization.observer import ObserverBase
class ModelReportObserver(ObserverBase):
r"""This observer is used to record additional information regarding keeping track
of S = average_batch_activation_range/epoch_activation_range.
The purpose of this information is to prepare a report to present to users on whether
Dynamic or Static Quantization is more appropriate for their model given the general
distributions of their data.
Args:
ch_axis (int, optional): The channel axis for which the range and outlier stats are computed
Default: 1
comp_percentile (float, optional): The percentile to compare against 100 percentile to find outliers
Should be between 0 and 1 exclusive
Default: 0.9
* :attr:`num_batches_tracked` specifies number of batches passed through the observer
* :attr:`average_batch_activation_range` defines average across the ranges of each batch passed through
* :attr:`epoch_activation_min` defines the minimum value passed through the observer
* :attr:`epoch_activation_max` defines the maximum value passed through the observer
* :attr:`ch_axis` defines the channel being used to compute per channel min max stats
* :attr:`min_val` defines the per channel minimum values passed through
* :attr:`max_val` defines the per channel maximum values passed through
* :attr:`comp_percentile` defines comparison percentile to find outliers
* :attr:`average_percentile_ratio` defines the per channel average percentile ratios
* :attr:`percentile_batches_tracked` defines the number of percentile batches tracked for each channel
* :attr:`constant_channels` defines the number of batches that aren't constant channels per channel
Note: this tool is meant for FX Graph Mode Quantization
"""
def __init__(self, ch_axis: int = 1, comp_percentile: float = 0.9):
super().__init__(torch.qint8)
self.num_batches_tracked = 0
# keep track of the min and mix of the range for average batch and epoch as a whole
self.average_batch_activation_range: torch.Tensor = torch.tensor(float(0))
self.epoch_activation_min = torch.tensor(float("inf"))
self.epoch_activation_max = torch.tensor(float("-inf"))
# keep track of per channel min max information using the given channel
self.ch_axis: int = ch_axis
self.min_val: torch.Tensor = torch.tensor([])
self.max_val: torch.Tensor = torch.tensor([])
# keep track of percentile ratio information per channel
self.comp_percentile: torch.Tensor = torch.tensor([comp_percentile])
self.average_percentile_ratio: torch.Tensor = torch.tensor([])
self.percentile_batches_tracked: torch.Tensor = torch.tensor([])
self.constant_channels: torch.Tensor = torch.tensor([])
def forward(self, x):
x_copy = x.detach() # avoid keeping autograd tape
x_copy = x_copy.to(self.epoch_activation_min.dtype)
x_copy = self._calculate_range_stats(x_copy)
x_copy = self._calculate_min_max_stats(x_copy)
x_copy = self._calculate_percentile_stats(x_copy)
# return the passed in the value
return x
def _calculate_range_stats(self, x_copy):
r"""Calculates and stores range stats with forward values.
Args
x_copy: A copy of the forward data
Returns the passed in x_copy
"""
# get the min, max values of the data
min_val_cur, max_val_cur = torch.aminmax(x_copy)
# calculate new epoch range values
epoch_min_val = torch.min(self.epoch_activation_min, min_val_cur)
epoch_max_val = torch.max(self.epoch_activation_max, max_val_cur)
self.epoch_activation_min.copy_(epoch_min_val)
self.epoch_activation_max.copy_(epoch_max_val)
# calculate the average batch activation range
current_batch_range = max_val_cur - min_val_cur
new_range = (
self.average_batch_activation_range * self.num_batches_tracked
+ current_batch_range
) / (self.num_batches_tracked + 1)
self.average_batch_activation_range = new_range
self.num_batches_tracked += 1 # new batch was processed
return x_copy
def _calculate_min_max_stats(self, x_copy):
r"""Calculates and stores the per_channel min, max stats with forward values.
Does calculation based on channel axis: self.ch_axis
Args
x_copy: A copy of the forward data
Returns the passed in x_copy
"""
# get the current min and max vals
min_val = self.min_val
max_val = self.max_val
x_dim = x_copy.size()
new_axis_list = [i for i in range(len(x_dim))] # noqa: C416
new_axis_list[self.ch_axis] = 0
new_axis_list[0] = self.ch_axis
y = x_copy.permute(new_axis_list)
# Need to match dtype of min/max because the updates to buffers
# are done in place and types need to match for comparisons
y = y.to(self.min_val.dtype)
y = torch.flatten(y, start_dim=1)
if min_val.numel() == 0 or max_val.numel() == 0:
min_val, max_val = torch.aminmax(y, dim=1)
else:
min_val_cur, max_val_cur = torch.aminmax(y, dim=1)
min_val = torch.min(min_val_cur, min_val)
max_val = torch.max(max_val_cur, max_val)
self.min_val.resize_(min_val.shape)
self.max_val.resize_(max_val.shape)
self.min_val.copy_(min_val)
self.max_val.copy_(max_val)
return x_copy
def _calculate_percentile_stats(self, x_copy):
r"""Calculates and stores the per_channel percentile stats with forward values.
Does calculation based on channel axis: self.ch_axis
Args
x_copy: A copy of the forward data
Returns the passed in x_copy
"""
# get the dimension of the copy
x_dim = x_copy.size()
new_axis_list = [i for i in range(len(x_dim))] # noqa: C416
new_axis_list[self.ch_axis] = 0
new_axis_list[0] = self.ch_axis
y = x_copy.permute(new_axis_list)
# Need to match dtype of min/max because the updates to buffers
# are done in place and types need to match for comparisons
y = y.to(self.min_val.dtype)
y = torch.flatten(y, start_dim=1)
y = y.to(self.min_val.dtype)
# find the percentile values along the axis
# we want both 100th percentile and comp_percentile
# we also want to find 0th quartile to see if we have constant channel
quantiles_list = [0, self.comp_percentile, 1.00]
quantiles_to_find = torch.tensor(quantiles_list, dtype=self.min_val.dtype)
# find the quantiles
desired_quantiles = torch.quantile(y, quantiles_to_find, dim=self.ch_axis, interpolation="lower")
zero_quantile = desired_quantiles[0]
comp_quantile = desired_quantiles[1]
hundreth_quartile = desired_quantiles[2]
# if any of the channels have 0s, we ignore that channel for this calculation
any_non_zero_quantile_value: torch.Tensor = (comp_quantile != torch.tensor([0])) | (hundreth_quartile != torch.tensor([0]))
any_non_zero_quantile_value = any_non_zero_quantile_value.int() # transform boolean values to int values
# we also check if we have a constant channel
any_constant_channels: torch.Tensor = (hundreth_quartile - zero_quantile) == torch.tensor([0])
any_constant_channels = any_constant_channels.int() # transform boolean values to int values
# possibilities to get nan as an answer
# will ignore any of these three cases with 0s and just not deal with them for now
# case (1) 0 in numerator: issue if 0 is largest, all negative, and rest are really negative
# case (2) 0 in denominator: is possible unless case 3, we just ignore
# case (3) 0 in both: not outlier, channel just kinda useless, ignore
# get the ratio and get rid of nan values
quantile_ratios = hundreth_quartile / comp_quantile
quantile_ratios = torch.nan_to_num(quantile_ratios)
# update averages, remembering to only update if didn't have zeros
ratio_if_not_zero = any_non_zero_quantile_value * quantile_ratios
# if num_batches and average_ratio are not initialized, we want to initialize them
if self.percentile_batches_tracked.shape[0] == 0 or self.average_percentile_ratio.shape[0] == 0:
self.percentile_batches_tracked = torch.zeros_like(any_non_zero_quantile_value)
self.average_percentile_ratio = torch.zeros_like(ratio_if_not_zero)
# also initialize the constant channel var if that is not initialized separately
if self.constant_channels.shape[0] == 0:
self.constant_channels = torch.zeros_like(any_constant_channels)
# get current num batches and average ratio
num_batches = self.percentile_batches_tracked
average_ratio = self.average_percentile_ratio
# calculate new_number of batches, new_ratios, and get rid of nans because of 0 size batches
new_number_of_batches: torch.Tensor = num_batches + any_non_zero_quantile_value
new_ratios: torch.Tensor = ((average_ratio * num_batches) + ratio_if_not_zero) / new_number_of_batches
new_ratios = torch.nan_to_num(new_ratios)
# update the number of non-constant channels
new_constant_count: torch.Tensor = self.constant_channels + any_constant_channels
# update the values locally
self.percentile_batches_tracked.copy_(new_number_of_batches)
self.average_percentile_ratio.copy_(new_ratios)
self.constant_channels.copy_(new_constant_count)
return x_copy
@torch.jit.export
def get_batch_to_epoch_ratio(self):
epoch_activation_range = self.epoch_activation_max - self.epoch_activation_min
if epoch_activation_range == torch.tensor(float(0)):
raise ValueError("Range for Epoch is 0")
elif epoch_activation_range == torch.tensor(float("inf")):
raise ValueError(
"No data has been run through observer or infinity value present"
)
else:
return self.average_batch_activation_range / epoch_activation_range
@torch.jit.export
def reset_batch_and_epoch_values(self):
# set all the values back to their original defaults for a new epoch
self.num_batches_tracked = 0
self.average_batch_activation_range = torch.tensor(float(0))
self.epoch_activation_min = torch.tensor(float("inf"))
self.epoch_activation_max = torch.tensor(float("-inf"))
self.min_val = torch.tensor([])
self.max_val = torch.tensor([])
self.average_percentile_ratio = torch.tensor([])
self.percentile_batches_tracked = torch.tensor([])
self.constant_channels = torch.tensor([])
@torch.jit.export
def calculate_qparams(self):
raise Exception(
"calculate_qparams should not be called for ModelReportObserver"
)
| pytorch-master | torch/ao/quantization/fx/_model_report/model_report_observer.py |
pytorch-master | torch/ao/quantization/fx/_model_report/__init__.py |
|
from typing import Any, Dict, Set, Tuple
from collections import OrderedDict
import torch
from torch.ao.quantization.fx._model_report.detector import (
DetectorBase,
DETECTOR_OBS_ARGS_KEY,
DETECTOR_OBS_TO_INSERT_KEY,
DETECTOR_IS_POST_OBS_KEY,
DETECTOR_TARGET_NODE_KEY
)
from torch.ao.quantization.fx._model_report.model_report_visualizer import ModelReportVisualizer
from torch.ao.quantization.fx.graph_module import GraphModule
from torch.ao.quantization.observer import ObserverBase
from torch.ao.quantization.qconfig_mapping import QConfigMapping
class ModelReport:
r"""
The ModelReport class aims to provide users an easy way to diagnose issues that they run into
with their models. The class works with all traceable GraphModules to help diagnose issues,
though the requirements on the type of model more-so depends on the specific report the user
is trying to generate. With respect to the reports, the ModelReport class is intialized with
a set of Detector classes, each of which generate reports on quantization configuration
issues a use might have.
Currently supports generating reports on:
- Suggestions for per-channel vs. per-tensor quantization (nn.Module)
- Suggestions for dynamic vs static quantization for linear layers (Graph Modules)
- Suggestions for input-weight equalization for linear and conv layers (Graph Modules)
- Suggestions for outlier detection for all layers (Graph Modules)
The ModelReport class has the primary functionality of inserting observers (primarily the ModelReportObserver)
where needed for each detector to gather the information it needs, and then after callibration, the ModelReport
class compiles the report generated by each Detector class into a single report to return to the user. It also
has the capability to remove all the observers it inserted as well.
* :attr:`_model` The model we wish to generate the report for. Must be a traceable GraphModule
* :attr:`_desired_report_detectors` The set of Detectors representing desired reports from the ModelReport class
Make sure that these are all unique types of detectors [do not have more than 1 of the same class]
* :attr:`_desired_detector_names` The set of detector names of the _desired_report_detectors.
This set is generated by calling the get_detector_name() of each detector
* :attr:`_detector_name_to_observer_fqns` The mapping from each detector to fqns of observers of interest
The purpose of this is to keep track of what observers were inserted for each detector, so that they
can be removed at the end if desired
* :attr:`_prepared_flag` A boolean flag that keeps track of whether we have prepared the model or not
This is to ensure we only insert observers once with the ModelReport instance
* :attr:`_removed_observers` A boolean to track if we have removed observers already
The purpose is to ensure we don't attempt to remove observers twice with the same ModelReport
instance. This also allows the functionality where we can generate the report multiple times
as long as we haven't removed the observers yet.
Note:
This class was initially designed to work with the Fx Graph Mode workflow in mind. However,
full functionality is available as long as there is a traceable GraphModule that is being used.
One method to get a traceable GraphModule without going through the Fx workflow is to use
the QuantizationTracer class.
General Flow for Fx workflow:
1.) Initialize ModelReport object with reports of interest by passing in initialized detector objects and model
2.) Prepare your model with prepare_fx
3.) Call model_report.prepare_detailed_calibration to add relavent observers
4.) Callibrate your model with data
5.) Call model_report.generate_report on your model to generate report and optionally remove added observers
Optional
6.) Call model_report.generate_visualizer to get a ModelReportVisualizer instance
7.) To help in parsing report information and debugging, view report info as a:
- Table
- Histogram
- Line plot
8.) Call model_report.generate_qconfigs to generate the qconfigs based on the report suggestions
Example (with QuantizationTracer):
>>> # xdoctest: +SKIP
>>> # get the necessary qconfig
>>> config = PrepareCustomConfig()
>>> skipped_module_names, skipped_module_classes = get_skipped_module_name_and_classes(config, False)
>>> # initialize our model and get GraphModule
>>> model = SomeModel()
>>> tracer = QuantizationTracer(skipped_module_names, skipped_module_classes)
>>> graph_module = GraphModule(model, tracer.trace(model))
>>> # get our set of detectors and ModelReport instance
>>> detector_set = set([DynamicStaticDetector(tolerance=0.5), InputWeightEqualizationDetector(ratio_threshold=0.7)])
>>> tracer_reporter = ModelReport(graph_module, tracer_detector_set)
>>> # now we insert the observers and callibrate the model
>>> tracer_model_with_observers = tracer_reporter.prepare_detailed_calibration()
>>> for i in range(num_callibration_batches):
>>> example_input = get_callibration_input()
>>> tracer_model_with_observers(example_input)
>>> # finally we generate the reports and optionally remove the observers we inserted
>>> reports = tracer_reporter.generate_model_report(remove_inserted_observers=True)
>>> # Optional: we can generate the qconfig mapping based on the suggestions
>>> qconfigs = model_report.generate_qconfig_mapping()
>>> # Optional: we can generate the equalization mapping based on the suggestions
>>> qconfigs = model_report.generate_equalization_mapping()
>>> # Optional: we get a ModelReportVisualizer instance to do any visualizations desired
>>> model_report_visualizer = tracer_reporter.generate_visualizer()
"""
def __init__(self, model: GraphModule, desired_report_detectors: Set[DetectorBase]):
if len(desired_report_detectors) == 0:
raise ValueError("Should include at least 1 desired report")
# keep track of the model we wish to generate report for
self._model: GraphModule = model
# keep the reports private so they can't be modified
self._desired_report_detectors = desired_report_detectors
self._desired_detector_names = set([detector.get_detector_name() for detector in desired_report_detectors])
# keep a mapping of desired reports to observers of interest
# this is to get the readings, and to remove them, can create a large set
# this set can then be used to traverse the graph and remove added observers
self._detector_name_to_observer_fqns: Dict[str, Set[str]] = {}
# initialize each report to have empty set of observers of interest
for desired_report in self._desired_detector_names:
self._detector_name_to_observer_fqns[desired_report] = set([])
# flags to ensure that we can only prepare and remove observers once
self._prepared_flag = False
self._removed_observers = False
# store the reports that we generated for visualization purposes
# intially empty since no reports generated
self._generated_reports: Dict[str, Dict] = {}
def get_desired_reports_names(self) -> Set[str]:
""" Returns a copy of the desired reports for viewing """
return self._desired_detector_names.copy()
def get_observers_of_interest(self) -> Dict[str, Set[str]]:
""" Returns a copy of the observers of interest for viewing """
return self._detector_name_to_observer_fqns.copy()
def prepare_detailed_calibration(self) -> GraphModule:
r"""
Takes in a graph model and inserts the following observers:
- ModelReportObserver
Each observer is inserted based on the desired_reports into the relavent locations
Right now, each report in self._desired_detector_names has independent insertions
However, if a module already has a Observer of the same type, the insertion will not occur
This is because all of the same type of Observer collect same information, so redundant
Returns the same GraphModule with the observers inserted
"""
# if already prepared once, cannot prepare again
if self._prepared_flag:
raise ValueError("Already ran preparing detailed callibration. Run the report generation next after callibration.")
# loop through each detector, find where placements should be, and keep track
insert_observers_fqns: Dict[str, Any] = {}
for detector in self._desired_report_detectors:
# determine observer points for each detector
obs_fqn_to_info = detector.determine_observer_insert_points(self._model)
# map each insert point to the observer to use
insert_observers_fqns.update(obs_fqn_to_info)
# update the set of observers this report cares about
self._detector_name_to_observer_fqns[detector.get_detector_name()] = set(obs_fqn_to_info.keys())
# now insert all the observers at their desired locations
for observer_fqn in insert_observers_fqns:
target_node = insert_observers_fqns[observer_fqn][DETECTOR_TARGET_NODE_KEY]
insert_obs = insert_observers_fqns[observer_fqn][DETECTOR_OBS_TO_INSERT_KEY]
insert_post = insert_observers_fqns[observer_fqn][DETECTOR_IS_POST_OBS_KEY]
observer_args = insert_observers_fqns[observer_fqn][DETECTOR_OBS_ARGS_KEY]
self._insert_observer_around_module(
observer_fqn, target_node, insert_obs, observer_args, insert_post
)
self._prepared_flag = True
return self._model
def _insert_observer_around_module(
self,
obs_fqn: str,
target_node: torch.fx.node.Node,
obs_to_insert: ObserverBase,
observer_args: Tuple,
insert_post: bool
):
r"""
Helper function that inserts the observer into both the graph structure and the module of the model
Args
node_fqn (str): The fully qualified name of the observer we want to insert
target_node (torch.fx.node.Node): The node in model we are inserting observers around
obs_to_insert (ObserverBase): The observer we are inserting around target_node
observer_args (Tuple): The arguments we want to pass into the observer
insert_post (bool): whether this is meant to be a post observer for this node
"""
# if we are inserting post, then our target node is the next node
if insert_post:
target_node = target_node.next
with self._model.graph.inserting_before(target_node):
self._model.add_submodule(obs_fqn, obs_to_insert)
self._model.graph.create_node(op="call_module", target=obs_fqn, args=observer_args)
# recompile model after inserts are made
self._model.recompile()
def _get_node_from_fqn(self, node_fqn: str) -> torch.fx.node.Node:
r"""
Takes in a node fqn and returns the node based on the fqn
Args
node_fqn (str): The fully qualified name of the node we want to find in model
Returns the Node object of the given node_fqn otherwise returns None
"""
node_to_return = None
for node in self._model.graph.nodes:
# if the target matches the fqn, it's the node we are looking for
if node.target == node_fqn:
node_to_return = node
break
if node_to_return is None:
raise ValueError("The node_fqn is was not found within the module.")
# assert for MyPy
assert isinstance(node_to_return, torch.fx.node.Node)
return node_to_return
def generate_model_report(
self, remove_inserted_observers: bool
) -> Dict[str, Tuple[str, Dict]]:
r"""
Generates all the requested reports.
Note:
You should have callibrated the model with relavent data before calling this
The reports generated are specified by the desired_reports specified in desired_reports
Can optionally remove all the observers inserted by the ModelReport instance
Args:
remove_inserted_observers (bool): True to remove the observers inserted by this ModelReport instance
Returns a mapping of each desired report name to a tuple with:
The textual summary of that report information
A dictionary containing relavent statistics or information for that report
Note:
Throws exception if we try to generate report on model we already removed observers from
Throws exception if we try to generate report without preparing for callibration
"""
# if we haven't prepped model for callibration, then we shouldn't generate report yet
if not self._prepared_flag:
raise Exception("Cannot generate report without preparing model for callibration")
# if we already removed the observers, we cannot generate report
if self._removed_observers:
raise Exception("Cannot generate report on model you already removed observers from")
# keep track of all the reports of interest and their outputs
reports_of_interest = {}
for detector in self._desired_report_detectors:
# generate the individual report for the detector
report_output = detector.generate_detector_report(self._model)
reports_of_interest[detector.get_detector_name()] = report_output
# if user wishes to remove inserted observers, go ahead and remove
if remove_inserted_observers:
self._removed_observers = True
# get the set of all Observers inserted by this instance of ModelReport
all_observers_of_interest: Set[str] = set([])
for desired_report in self._detector_name_to_observer_fqns:
observers_of_interest = self._detector_name_to_observer_fqns[desired_report]
all_observers_of_interest.update(observers_of_interest)
# go through all_observers_of_interest and remove them from the graph and model
for observer_fqn in all_observers_of_interest:
# remove the observer from the model
self._model.delete_submodule(observer_fqn)
# remove the observer from the graph structure
node_obj = self._get_node_from_fqn(observer_fqn)
if node_obj:
self._model.graph.erase_node(node_obj)
else:
raise ValueError("Node no longer exists in GraphModule structure")
# remember to recompile the model
self._model.recompile()
# save the generated reports for visualization purposes
saved_reports: Dict[str, Dict] = {
report_name : report_tuple[1] for report_name, report_tuple in reports_of_interest.items()
}
self._generated_reports = saved_reports
# return the reports of interest
return reports_of_interest
def _is_same_info_for_same_key(self, info_dict_a: Dict, info_dict_b: Dict) -> bool:
r"""
Takes in two dictionaries and ensures that any common keys between the two have the same
values.
Args:
info_dict_a (Dict): First dictionary we wish to compare
info_dict_b (Dict): Second dictionary we wish to compare
Returns True if all shared keys have same values, false otherwise
"""
# get the set of keys for both
dict_a_keys: Set = set(info_dict_a.keys())
dict_b_keys: Set = set(info_dict_b.keys())
# get the insersection keys and check if same value for both dicts
intersecting_keys: Set = dict_a_keys.intersection(dict_b_keys)
for key in intersecting_keys:
dict_a_val = info_dict_a[key]
dict_b_val = info_dict_b[key]
# if it's a tensor we have to handle separately
if type(dict_a_val) == torch.Tensor:
# if dict_b_val not tensor, automatically false
if type(dict_b_val) != torch.Tensor or sum(dict_a_val != dict_b_val) != 0:
return False
else:
# for non-tensor vals
if dict_a_val != dict_b_val:
return False
# if no non matching shared keys found, return true
return True
def _reformat_reports_for_visualizer(self) -> OrderedDict:
r"""
Takes the generated reports and reformats them into the format that is desired by the
ModelReportVisualizer
Returns an OrderedDict mapping module_fqns to their features
"""
# we want to reorder and reformat the information so it is ordered in terms of order
# found in the model
# first create new dict with all modules as keys and features under respective module
module_fqns_to_features: Dict[str, Dict] = {}
for report_name in self._generated_reports:
# get mod -> feature dict and go through
module_info = self._generated_reports[report_name]
for module_fqn in module_info:
# check if already in our accumulation dict
if module_fqn in module_fqns_to_features:
# we merge all the features together
new_info: Dict = module_info[module_fqn]
present_info: Dict = module_fqns_to_features[module_fqn]
# merge them together into the new unioned dict
# same features keys -> same info, so okay if override
# do safety check to make sure shared keys have same info
if self._is_same_info_for_same_key(new_info, present_info):
module_fqns_to_features[module_fqn] = {**new_info, **present_info}
else:
error_str = "You have the same key with different values across detectors. "
error_str += "Someone incorrectly implemented a detector with conflicting keys to exisiting detectors."
raise ValueError(error_str)
else:
# we just set it
module_fqns_to_features[module_fqn] = module_info[module_fqn]
# our ordered dict so that modules can be ordered in order of how they appear in model
features_by_module: OrderedDict[str, Dict] = OrderedDict()
# we loop through modules in graph in order
for fqn, module in self._model.named_modules():
# find that fqn in fqns_to_features
if fqn in module_fqns_to_features:
# add it to our ordered dict
features_by_module[fqn] = module_fqns_to_features[fqn]
# return the ordered dict of info we created
return features_by_module
def generate_visualizer(self) -> ModelReportVisualizer:
r"""
Generates a ModelReportVisualizer instance using the reports generated
by the generate_model_report() method.
Returns the generated ModelReportVisualizer instance initialized
Note:
Throws exception if attempt to get visualizers without generating report
"""
# check if user has generated reports at least once
if len(self._generated_reports) == 0:
raise Exception("Unable to generate visualizers without first generating reports")
# get the ordered dict mapping modules to their full set of collected features / stats
module_fqns_to_features: OrderedDict = self._reformat_reports_for_visualizer()
# create and return ModelReportVisualizer instance
visualizer: ModelReportVisualizer = ModelReportVisualizer(module_fqns_to_features)
return visualizer
def generate_qconfig_mapping(self) -> QConfigMapping:
r"""
Generates a QConfigMapping based on the suggestions of the
ModelReport API. The generated mapping encompasses all the
different types of feedback from the different detectors
all into one place.
These configs are based on the suggestions provided by the ModelReport API
and can only be generated once the reports have been generated.
Returns a QConfigMapping for the quantization configuration
"""
pass
def generate_equalization_mapping(self) -> QConfigMapping:
r"""
Generates a QConfigMapping based on the suggestions of the
ModelReport API for equalization. The generated mapping encompasses all the
different types of feedback from the input-weight equalization detector.
These configs are based on the suggestions provided by the ModelReport API
and can only be generated once the reports have been generated.
Returns a QConfigMapping for the equalization configuration
"""
pass
| pytorch-master | torch/ao/quantization/fx/_model_report/model_report.py |
"""
Contains model level utilities which can be aware of the AutoQuantizationState
type.
"""
import torch
import torch.nn.functional as F
toq = torch.ops.quantized
from .mappings import conv_ops, conv_prepack_fns
from .quantization_state import AutoQuantizationState
from torch.quantization import (
ObserverBase,
FakeQuantizeBase,
)
from typing import Optional
def pack_weights_for_functionals(
module: torch.nn.Module,
) -> None:
"""
Packs weights for functionals seen while tracing.
Note: weight packing for modules is handled by eager mode quantization
flow.
"""
if hasattr(module, '_auto_quant_state'):
qstate: AutoQuantizationState = module._auto_quant_state # type: ignore[assignment]
# find any ops which need packing
for idx, seen_q_op_info in qstate.idx_to_seen_q_op_infos.items():
packable_args_len = len(seen_q_op_info.packable_tensor_idx_to_name) + \
len(seen_q_op_info.packable_nontensor_idx_to_arg)
if packable_args_len == 0:
continue
if seen_q_op_info.type in conv_ops:
# fetch all the info needed for packed params
assert seen_q_op_info.packable_tensor_idx_to_name[1] is not None
weight = getattr(module, seen_q_op_info.packable_tensor_idx_to_name[1])
assert seen_q_op_info.packable_tensor_idx_to_name[2] is not None
bias = getattr(module, seen_q_op_info.packable_tensor_idx_to_name[2])
stride = seen_q_op_info.packable_nontensor_idx_to_arg[3]
padding = seen_q_op_info.packable_nontensor_idx_to_arg[4]
dilation = seen_q_op_info.packable_nontensor_idx_to_arg[5]
groups = seen_q_op_info.packable_nontensor_idx_to_arg[6]
# quantize the weight
# TODO: create weight observers from qconfig.weight
assert seen_q_op_info.input_tensor_infos[1] is not None
weight_tensor_id = seen_q_op_info.input_tensor_infos[1].id
weight_obs = qstate.tensor_id_to_observer[str(weight_tensor_id)]
assert isinstance(weight_obs, (ObserverBase, FakeQuantizeBase))
scale, zp = weight_obs.calculate_qparams()
qweight = torch.quantize_per_tensor(weight, scale, zp, torch.qint8)
# create the packed params
packed_params = conv_prepack_fns[seen_q_op_info.type](
qweight, bias, stride, padding, dilation, groups)
# attach to module
name_idx = 0
prefix = "_packed_params_"
name_candidate = f"{prefix}{name_idx}"
while hasattr(module, name_candidate):
name_idx += 1
name_candidate = f"{prefix}{name_idx}"
setattr(module, name_candidate, packed_params)
qstate.idx_to_packed_weight_name[idx] = name_candidate
# TODO: delete the original weights
elif seen_q_op_info.type == F.linear:
# fetch all the info needed for packed params
def get_tensor_param_name(idx: int, name: str) -> Optional[str]:
param_name = seen_q_op_info.packable_tensor_idx_to_name.get(idx, None)
if param_name is not None:
return param_name
return seen_q_op_info.packable_tensor_kwarg_name_to_name.get(name, None)
weight_name = get_tensor_param_name(1, 'weight')
assert weight_name is not None
weight = getattr(module, weight_name)
bias_name = get_tensor_param_name(2, 'bias')
bias = getattr(module, bias_name) if bias_name is not None else None
# quantize the weight
# TODO: create weight observers from qconfig.weight
assert seen_q_op_info.input_tensor_infos[1] is not None
weight_tensor_id = seen_q_op_info.input_tensor_infos[1].id
weight_obs = qstate.tensor_id_to_observer[str(weight_tensor_id)]
assert isinstance(weight_obs, (ObserverBase, FakeQuantizeBase))
scale, zp = weight_obs.calculate_qparams()
qweight = torch.quantize_per_tensor(weight, scale, zp, torch.qint8)
# create the packed params
packed_params = toq.linear_prepack(qweight, bias)
# attach to module
name_idx = 0
prefix = "_packed_params_"
name_candidate = f"{prefix}{name_idx}"
while hasattr(module, name_candidate):
name_idx += 1
name_candidate = f"{prefix}{name_idx}"
setattr(module, name_candidate, packed_params)
qstate.idx_to_packed_weight_name[idx] = name_candidate
# TODO: delete the original weights
for _, child in module.named_children():
pack_weights_for_functionals(child)
def attach_scale_zp_values_to_model(
module: torch.nn.Module,
) -> None:
"""
Calculates the scale and zero_point from each observer and attaches
these values to the parent module. This is done to avoid recalculating
these values at inference.
"""
if hasattr(module, '_auto_quant_state'):
qstate: AutoQuantizationState = module._auto_quant_state # type: ignore[assignment]
for tensor_id, observer in qstate.tensor_id_to_observer.items():
activation_int8_or_int32_quantized = \
observer.dtype in [torch.quint8, torch.qint8, torch.qint32]
if activation_int8_or_int32_quantized:
scale, zp = observer.calculate_qparams()
# tensor_id_to_observer is a ModuleDict which has to have string keys
# tensor_id_to_scale_zp is a normal dict which can have int keys
qstate.tensor_id_to_scale_zp[int(tensor_id)] = (scale, zp)
qstate.tensor_id_to_observer.clear()
for _, child in module.named_children():
attach_scale_zp_values_to_model(child)
def attach_op_convert_info_to_model(
module: torch.nn.Module,
) -> None:
"""
Calculates the info needed to convert each op and attaches
it to the parent module. This is done to avoid recalculating these values
at inference.
"""
if hasattr(module, '_auto_quant_state'):
qstate: AutoQuantizationState = module._auto_quant_state # type: ignore[assignment]
for _, seen_q_op_info in qstate.idx_to_seen_q_op_infos.items():
qstate.idx_to_op_convert_info[seen_q_op_info.idx] = \
qstate.calculate_op_convert_info(seen_q_op_info)
for _, child in module.named_children():
attach_op_convert_info_to_model(child)
def attach_output_convert_info_to_model(
module: torch.nn.Module,
) -> None:
"""
Calculates the info needed to perform the module outputs hook
and attaches it to the parent module. This is done to avoid recalculating
these values at inference.
"""
if hasattr(module, '_auto_quant_state'):
qstate: AutoQuantizationState = module._auto_quant_state # type: ignore[assignment]
qstate.set_needs_dtype_transform_on_outputs()
for _, child in module.named_children():
attach_output_convert_info_to_model(child)
| pytorch-master | torch/ao/quantization/_dbr/model_utils.py |
import copy
import math
import operator
from types import ModuleType
from typing import Callable, Any, Tuple, Dict
import torch
import torch.fx
from .mappings import conv_ops
from .quantization_state import AutoQuantizationState
from .utils import (
get_packable_arg_idxs,
AutoQuantizationStateModuleDict,
)
class AllModuleTracer(torch.fx.Tracer):
"""
This is a tracer that knows how to convert quantizeable ops with
dynamic dispatch into their corresponding quantized subgraphs.
"""
node_name_to_dtype: Dict[str, Any]
def __init__(self, autowrap_modules: Tuple[ModuleType] = (math, ),
autowrap_functions: Tuple[Callable, ...] = (),
param_shapes_constant: bool = False) -> None:
super().__init__(
autowrap_modules, autowrap_functions,
param_shapes_constant)
self.node_name_to_dtype = {}
def is_leaf_module(self, m, module_qualified_name) -> bool:
return True
def _maybe_update_args_with_quants(self, args, arg_quant_infos, target):
# insert quants for inputs, if needed
if len(arg_quant_infos):
new_args = []
if target == torch.ops.quantized.cat:
new_first_arg = []
for idx, input_arg_quant_info in enumerate(arg_quant_infos):
if input_arg_quant_info is None:
new_first_arg.append(args[0][idx])
else:
# create a quant node
scale, zp, dtype = input_arg_quant_info
quant = super().create_node(
'call_function', torch.quantize_per_tensor,
(args[0][idx], scale.item(), zp.item(), dtype), {}, None, None)
new_first_arg.append(quant)
new_args = [new_first_arg, *args[1:]]
elif target == torch.cat:
return args
else:
# TODO: this is not handling non-tensor tuple args (for example,
# dilation in conv2d) correctly, it just happens to work but
# needs a fix.
for idx, arg in enumerate(args):
input_arg_quant_info = arg_quant_infos[idx]
if input_arg_quant_info is None:
new_args.append(args[idx])
else:
# create a quant node
scale, zp, dtype = input_arg_quant_info
quant = super().create_node(
'call_function', torch.quantize_per_tensor,
(args[idx], scale.item(), zp.item(), dtype), {}, None, None)
new_args.append(quant)
args = tuple(new_args)
return args
def _maybe_update_args_with_dequants(self, args):
new_args = []
for arg in args:
if (
isinstance(arg, torch.fx.Node) and
arg.name in self.node_name_to_dtype and
self.node_name_to_dtype[arg.name] != torch.float
):
dequant = torch.fx.Proxy(arg).dequantize().node
new_args.append(dequant)
else:
new_args.append(arg)
return tuple(new_args)
def _maybe_update_outputs(self, outputs, output_qtensor_infos, output_dtypes):
# TODO(future PR): handle other output types
assert len(outputs) == 1 and len(output_qtensor_infos) == 1
if output_dtypes is not None:
assert len(output_dtypes) == 1
output_dtype = output_dtypes[0]
qtensor_info = output_qtensor_infos[0]
if qtensor_info.inf_dtype != output_dtype:
assert output_dtype is torch.float, \
'non-float dtypes not handled yet'
dequant = torch.fx.Proxy(outputs[0]).dequantize().node
outputs = (dequant,)
return outputs
def create_node(self, kind, target, args, kwargs, name=None, type_expr=None):
if target == operator.add:
target = torch.add
if target == operator.mul:
target = torch.mul
# TODO(future PR): move this into mappings
if target == 'add':
target = torch.add
kind = 'call_function'
if target == 'mul':
target = torch.mul
kind = 'call_function'
dtype_to_use = torch.float
if kind == 'call_function' or kind == 'call_method':
qstate = self.root._auto_quant_state
assert isinstance(qstate, AutoQuantizationState)
if qstate.cur_op_needs_hooks(target):
# need to test this path with call_method
assert kind == 'call_function'
qstate.validate_cur_op(target)
old_target = target
# TODO use arg_dequant_infos
new_target, arg_quant_infos, arg_dequant_infos, packed_param_name, additional_kwargs, _, _ = \
qstate.get_op_convert_info(target)
for k in ('scale', 'zero_point'):
if k in additional_kwargs:
additional_kwargs[k] = additional_kwargs[k].item()
if new_target is not None:
target = new_target
args = self._maybe_update_args_with_quants(args, arg_quant_infos, target)
# if there is a packed param, replace the relevant args
if packed_param_name is not None:
new_args_with_packed = []
packable_arg_idxs = get_packable_arg_idxs(old_target)
added_packed = False
for idx, arg in enumerate(args):
if packable_arg_idxs is not None and idx in packable_arg_idxs:
if not added_packed:
# packed_param = getattr(self.root, packed_param_name)
packed_param_node = super().create_node(
'get_attr', packed_param_name, (), {}, None, None)
new_args_with_packed.append(packed_param_node)
added_packed = True
else:
new_args_with_packed.append(arg)
args = tuple(new_args_with_packed)
# TODO move op-specific logic out of here
if target is torch.ops.quantized.linear:
def linear_rewrite_args(input, weight, bias=None):
return (input, weight,
additional_kwargs['scale'],
additional_kwargs['zero_point'])
args = linear_rewrite_args(*args, **kwargs)
kwargs = {}
elif old_target not in conv_ops or target in conv_ops:
kwargs.update(**additional_kwargs)
else:
new_args = [*args]
new_args.append(additional_kwargs['scale'])
new_args.append(additional_kwargs['zero_point'])
args = tuple(new_args)
dtype_to_use = qstate.get_cur_output_inf_dtype()
qstate.mark_cur_op_complete(old_target)
else:
args = self._maybe_update_args_with_dequants(args)
elif kind == 'call_module':
# TODO: handle fqn
module_instance = getattr(self.root, target)
qstate = self.root._auto_quant_state
assert isinstance(qstate, AutoQuantizationState)
if qstate.cur_op_needs_hooks(module_instance):
qstate.validate_cur_op(module_instance)
# TODO use arg_dequant_infos
_, arg_quant_infos, arg_dequant_infos, _packed_param_name, additional_kwargs, _, _ = \
qstate.get_op_convert_info(module_instance)
for k in ('scale', 'zero_point'):
if k in additional_kwargs:
additional_kwargs[k] = additional_kwargs[k].item()
args = self._maybe_update_args_with_quants(args, arg_quant_infos, target)
kwargs.update(**additional_kwargs)
dtype_to_use = qstate.get_cur_output_inf_dtype()
qstate.mark_cur_op_complete(module_instance)
else:
args = self._maybe_update_args_with_dequants(args)
elif kind == 'output':
qstate = self.root._auto_quant_state
assert isinstance(qstate, AutoQuantizationState)
output_qtensor_infos = qstate.get_output_qtensor_infos()
output_dtypes = qstate.get_output_dtypes()
args = self._maybe_update_outputs(
args, output_qtensor_infos, output_dtypes)
out = super().create_node(kind, target, args, kwargs, name, type_expr)
self.node_name_to_dtype[out.name] = dtype_to_use
return out
# This is a hack to enable nn.Sequential to properly work with this
# class.
# TODO(future): remove the hack
def call_module(self, m: torch.nn.Module, forward: Callable[..., Any], args : Tuple[Any, ...], kwargs : Dict[str, Any]) -> Any:
if isinstance(m, AutoQuantizationStateModuleDict):
return args[0]
return super().call_module(m, forward, args, kwargs)
# TODO(future PR): handle cases where the module is not symbolically
# traceable
def rewrite_for_scripting(mod: torch.nn.Module) -> torch.nn.Module:
"""
Makes the dynamically dispatched ops in `mod` be explicit, so they
can be visibile to `torch.jit.script`. In detail:
1. symbolically traces the forward with FX, without any leaves
2. for each quantizeable op with dynamic dispatch, rewrites the graph to
contain the quantized subgraph (quant if necessary, quantized op,
dequant if necessary).
3. recursively repeat (1 - 2) for each child
"""
def rewrite_helper(mod : torch.nn.Module):
copied = copy.copy(mod)
for name, child in mod.named_children():
setattr(copied, name, rewrite_helper(child))
if hasattr(mod, '_auto_quant_state') and (
mod._auto_quant_state.has_at_least_one_seen_q_op_info() or # type: ignore[union-attr, operator]
(mod._auto_quant_state.get_output_dtypes() is not None) # type: ignore[union-attr, operator]
):
copied._auto_quant_state.reset_to_new_call() # type: ignore[union-attr, operator]
graph = AllModuleTracer().trace(copied)
return torch.fx.GraphModule(copied, graph, copied.__class__.__name__)
else:
return copied
return rewrite_helper(mod)
| pytorch-master | torch/ao/quantization/_dbr/auto_trace_rewriter.py |
from typing import Dict, Tuple, Callable, Optional
from .mappings import known_function_fusion_patterns_and_replacements
from .utils import (
FusionInfo,
SeenQOpInfo,
get_users_of_seen_q_op_info,
get_producer_of_seen_q_op_info,
)
def _identity(x):
return x
def pattern_is_match(
fusion_pattern: Tuple[Callable, ...],
cur_seen_q_op_info: Optional[SeenQOpInfo],
idx_to_seen_q_op_infos: Dict[int, SeenQOpInfo],
) -> bool:
is_match = True
for el_type in fusion_pattern:
if cur_seen_q_op_info is not None and el_type == cur_seen_q_op_info.type:
next_seen_q_op_infos = get_users_of_seen_q_op_info(
idx_to_seen_q_op_infos, cur_seen_q_op_info)
if len(next_seen_q_op_infos) == 1:
cur_seen_q_op_info = next_seen_q_op_infos[0]
else:
cur_seen_q_op_info = None
continue
else:
is_match = False
break
return is_match
def get_seen_q_op_info_of_start_of_fusion(
seen_q_op_info_end_of_fusion: SeenQOpInfo,
idx_to_seen_q_op_infos: Dict[int, SeenQOpInfo],
) -> SeenQOpInfo:
assert seen_q_op_info_end_of_fusion.fusion_info is not None
cur_seen_q_op_info = seen_q_op_info_end_of_fusion
for idx in range(len(seen_q_op_info_end_of_fusion.fusion_info.pattern) - 1):
cur_seen_q_op_info = get_producer_of_seen_q_op_info(
idx_to_seen_q_op_infos, cur_seen_q_op_info) # type: ignore[assignment]
return cur_seen_q_op_info
def get_seen_q_op_info_of_end_of_fusion(
seen_q_op_info_start_of_fusion: SeenQOpInfo,
idx_to_seen_q_op_infos: Dict[int, SeenQOpInfo],
) -> SeenQOpInfo:
assert seen_q_op_info_start_of_fusion.fusion_info is not None
cur_seen_q_op_info = seen_q_op_info_start_of_fusion
for idx in range(len(seen_q_op_info_start_of_fusion.fusion_info.pattern) - 1):
users = get_users_of_seen_q_op_info(
idx_to_seen_q_op_infos, cur_seen_q_op_info)
cur_seen_q_op_info = users[0]
return cur_seen_q_op_info
def match_fusion_patterns(
idx_to_seen_q_op_infos: Dict[int, SeenQOpInfo],
):
"""
Matches fusion patterns to elements of `idx_to_seen_q_op_infos`.
Modifies them inplace if matches are found.
Note:
1. The matching is local to the ops seen by a single parent module,
it does not cross module boundaries. This is for simplicity, and
there are no plans to relax this at the moment.
2. The matching only supports linear patterns of ops where all of
of the arguments needed to execute the fusion are passed to the first
op in the sequence. This is for simplicity, and can be relaxed
in a future PR if there is a need.
3. Currently the matching does not look at non quantizeable ops,
this will be fixed in the next PR.
"""
# Walk the subgraphs and find the function fusions. For now, this is
# brute forced for simplicity, can be optimized later if necessary.
for idx, seen_q_op_info in idx_to_seen_q_op_infos.items():
for fusion_pattern, replacement in \
known_function_fusion_patterns_and_replacements.items():
is_match = pattern_is_match(
fusion_pattern, seen_q_op_info, idx_to_seen_q_op_infos)
if not is_match:
continue
cur_seen_q_op_info = seen_q_op_info
for idx in range(len(fusion_pattern)):
if idx > 0:
users = get_users_of_seen_q_op_info(
idx_to_seen_q_op_infos, cur_seen_q_op_info)
cur_seen_q_op_info = users[0]
is_first_element = idx == 0
is_last_element = idx == len(fusion_pattern) - 1
replacement_type = replacement if is_first_element \
else _identity
fusion_info = FusionInfo(
fusion_pattern, replacement_type, is_first_element,
is_last_element)
cur_seen_q_op_info.fusion_info = fusion_info
break
| pytorch-master | torch/ao/quantization/_dbr/function_fusion.py |
import torch
from torch.jit._recursive import wrap_cpp_module
def remove_redundant_aliases(scripted_module: torch.nn.Module):
"""
Running torch.jit.trace on a model with DBR quantization introduces
extra alias ops, because we use `torch.Tensor.as_subclass` and tracing
through this results in an `aten::alias` function call in TorchScript.
This pass removes these alias calls when it is safe to do so.
"""
module_c = scripted_module._c
module_c = \
torch._C._jit_pass_dbr_quant_remove_redundant_aliases(module_c) # type: ignore[attr-defined]
scripted_module = wrap_cpp_module(module_c)
return scripted_module
| pytorch-master | torch/ao/quantization/_dbr/torchscript_utils.py |
pytorch-master | torch/ao/quantization/_dbr/__init__.py |
|
import logging
from typing import Tuple, Any, List, Dict
import torch
from torch.fx.node import map_aggregate
from .quantization_state import (
AutoQuantizationState,
)
from .utils import (
trace_with_inputs,
is_leaf,
HookType,
get_torch_function_hook_type,
get_module_hook_type,
OpQuantizeabilityType,
AutoQuantizationStateModuleDict,
get_fqn_valid_for_module_dict_key,
)
from .model_utils import (
pack_weights_for_functionals,
attach_scale_zp_values_to_model,
attach_op_convert_info_to_model,
attach_output_convert_info_to_model,
)
from . import auto_trace_rewriter
from torch.ao.quantization import is_activation_post_process
from torch.ao.quantization.qconfig_mapping import QConfigMapping
logger = logging.getLogger('auto_trace')
logging.basicConfig(level=logging.DEBUG)
# logging.basicConfig(level=logging.INFO)
# enabling this tanks performance, make sure to disable for benchmarking
# TODO(future PR): clean this up
enable_logging = False
# enable_logging = True
def add_auto_observation(
model : torch.nn.Module,
qconfig_mapping: QConfigMapping,
example_inputs: Tuple[Any],
input_dtypes: Any = (torch.float,), # must be same structure as model inputs
prepare_custom_config_dict: Dict[str, Any] = None,
) -> torch.nn.Module:
if prepare_custom_config_dict is None:
prepare_custom_config_dict = {}
output_dtypes = prepare_custom_config_dict.get('output_dtypes', (torch.float,))
def convert_to_interception_proxy(x):
if isinstance(x, torch.Tensor):
return x.as_subclass(QuantizationPrepareTensorProxy) # type: ignore[arg-type]
else:
return x
cur_module = None
first_call = True
module_stack : List[torch.nn.Module] = []
# Counter for tensor IDs, will be modified inplace by quant state.
# This is used to track tensors from output ops to input ops. For example,
# if op_n had a tensor output with id=1, and op_n+2 had a tensor input with
# id=1, we know that the output of op_n is the input to op_n+2. Note,
# this is a list because it needs to incremented inplace.
qtensor_id = [0]
module_id_to_fqn: Dict[int, str] = {}
# Counter for global quantizeable ops, useful for intermediate activation
# logging.
global_op_idx = [0]
global_disable_torch_function_override = False
class QuantizationPrepareTensorProxy(torch.Tensor):
"""
An override of `torch.Tensor` to enable dynamic tracing for
quantization.
For each function with a `__torch_function__` override, this proxy does
the following for functions which need quantization:
1. calls `_auto_quant_state.validate_cur_op` to validate that
the currently seen op is the same as what was recorded during tracing
2. calls `_auto_quant_state.op_prepare_before_hook`
3. executes the original function
4. calls `_auto_quant_state.op_prepare_after_hook`
5. calls `_auto_quant_state.mark_cur_op_complete` to increment
the current op index in preparation for the next op
Otherwise, calls the original function.
"""
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
nonlocal global_disable_torch_function_override
if (
# global override means disable the override here
global_disable_torch_function_override or
# to prevent printing things from going into an infinite loop
func == torch.Tensor.__repr__ or
# we don't need to override getters in this framework
func.__name__ == '__get__'
):
return super().__torch_function__(func, types, args, kwargs)
# if we are in a function, the current module is always a parent
nonlocal cur_module
parent_module = cur_module
if enable_logging:
if not is_activation_post_process(parent_module):
# logging for insides of obs/fq is not useful for this framework
# fqn map does not contain observers, which is why we
# cannot always assume that FQN exists
fqn_for_logging = module_id_to_fqn.get(
id(parent_module), 'unknown') if parent_module else None
logger.debug(
f' fqn:{fqn_for_logging} _tf_ {str(func)} len_args {len(args)}')
nonlocal qtensor_id
kwargs = kwargs if kwargs else {}
hook_type = get_torch_function_hook_type(parent_module, func)
if hook_type is HookType.OP_HOOKS:
fqn = module_id_to_fqn[id(parent_module)] if parent_module else None
qstate = parent_module._auto_quant_state # type: ignore[attr-defined]
if not first_call:
qstate.validate_cur_op(func)
# run "before" hook
if first_call:
args, kwargs = qstate.first_call_op_prepare_before_hook(
func, args, kwargs, qtensor_id, fqn, parent_module,
OpQuantizeabilityType.QUANTIZEABLE)
else:
args, kwargs = qstate.op_prepare_before_hook(
func, args, kwargs)
# forward
output = super().__torch_function__(func, types, args, kwargs)
# run "after" hook
if first_call:
output = qstate.first_call_op_prepare_after_hook(
func, output, args, qtensor_id,
OpQuantizeabilityType.QUANTIZEABLE)
else:
output = qstate.op_prepare_after_hook(
func, output, args, global_op_idx)
qstate.mark_cur_op_complete(func)
else:
# Hook type is not HookType.OP_HOOKS, if first_call is True we
# record the DAG of non-quantizeable ops.
if first_call:
qstate = getattr(parent_module, '_auto_quant_state', None)
if qstate:
fqn = module_id_to_fqn.get(id(parent_module), None) \
if parent_module else None
args, kwargs = qstate.first_call_op_prepare_before_hook(
func, args, kwargs, qtensor_id, fqn, parent_module,
OpQuantizeabilityType.NOT_QUANTIZEABLE)
output = super().__torch_function__(func, types, args, kwargs)
if first_call:
qstate = getattr(parent_module, '_auto_quant_state', None)
if qstate:
output = qstate.first_call_op_prepare_after_hook(
func, output, args, qtensor_id,
OpQuantizeabilityType.NOT_QUANTIZEABLE)
# TODO: is this right? Don't really understand this
if output is NotImplemented:
with torch._C.DisableTorchFunction():
output = func(*args, **kwargs).as_subclass(
QuantizationPrepareTensorProxy)
assert output is not NotImplemented
return output
def __repr__(self):
return f'QuantizationPrepareTensorProxy({super().__repr__()})'
# TODO(future PR): add other math overrides
class QuantizationInterceptionModule(type(model)): # type: ignore[misc]
"""
An override of user defined subclass of `nn.Module` to enable
dynamic tracing for quantization.
`cur_module` keeps track of the current module in the stack.
During the fist call, an `AutoQuantizationState` object is created and
attached to each non-leaf modules which we need to check for
quantizeable operations.
We override the `__call__` function to do the following for each
module:
If the module is an op which needs quantization:
1. calls `_auto_quant_state.validate_cur_op` to validate that
the currently seen op is the same as what was recorded during tracing
2. calls parent module's `._auto_quant_state.op_prepare_before_hook`
3. executes the original module forward
4. calls parent module's `_auto_quant_state.op_prepare_after_hook`
5. calls `_auto_quant_state.mark_cur_op_complete` to increment
the current op index in preparation for the next op
If the module can contain children ops that need quantization:
1. calls `_auto_quant_state.inputs_prepare_hook` (not implemented yet)
2. executes the original module forward
3. calls `_auto_quant_state.outputs_prepare_hook`
Otherwise, calls the original module forward.
"""
def __call__(self, *args, **kwargs):
new_args = map_aggregate(args, convert_to_interception_proxy)
new_kwargs = map_aggregate(kwargs, convert_to_interception_proxy)
orig_module_call = torch.nn.Module.__call__
orig_nn_sequential_forward = torch.nn.Sequential.forward
def _patched_module_call(self, *args, **kwargs):
if enable_logging:
fqn = module_id_to_fqn.get(id(self), None)
logger.debug(f" fqn:{fqn} _cl_: {type(self)} start")
nonlocal cur_module
old_module = cur_module
cur_module = self
try:
parent_module = module_stack[-1] if len(module_stack) else None
module_stack.append(self)
fqn = module_id_to_fqn.get(id(self), None)
hook_type = get_module_hook_type(parent_module, cur_module)
if hook_type is HookType.OP_HOOKS:
parent_qstate: AutoQuantizationState = \
parent_module._auto_quant_state # type: ignore[union-attr, assignment]
# before hooks
if not first_call:
parent_qstate.validate_cur_op(cur_module)
# If we are in this hook, `cur_module` is a leaf module.
# Therefore, we do not need to override any of its
# children. Disabling the overrides for performance.
nonlocal global_disable_torch_function_override
old_global_disable_torch_function_override = \
global_disable_torch_function_override
global_disable_torch_function_override = True
if first_call:
# mypy ignore is used instead of assert because this
# runs on every forward and assert has a performance cost
args, kwargs = parent_qstate.first_call_op_prepare_before_hook(
cur_module, args, kwargs, qtensor_id,
fqn, cur_module, # type: ignore[arg-type]
OpQuantizeabilityType.QUANTIZEABLE)
else:
# mypy ignore is used instead of assert because this
# runs on every forward and assert has a performance cost
args, kwargs = parent_qstate.op_prepare_before_hook(
cur_module, args, kwargs) # type: ignore[arg-type]
# original forward
output = orig_module_call(self, *args, **kwargs)
# Re-enable the overrides.
global_disable_torch_function_override = \
old_global_disable_torch_function_override
# after hooks
if first_call:
output = parent_qstate.first_call_op_prepare_after_hook(
cur_module, output, args, qtensor_id,
OpQuantizeabilityType.QUANTIZEABLE)
else:
output = parent_qstate.op_prepare_after_hook(
cur_module, output, args, global_op_idx)
parent_qstate.mark_cur_op_complete(cur_module)
elif hook_type is HookType.MODULE_IO_HOOKS:
# TODO(future PR): add inputs io hook
cur_qstate = cur_module._auto_quant_state
cur_qstate.reset_to_new_call()
# original forward
output = orig_module_call(self, *args, **kwargs)
# after hooks
if first_call:
output = cur_qstate.first_call_outputs_prepare_hook(
output, qtensor_id)
else:
output = cur_qstate.outputs_prepare_hook(output)
cur_qstate.validate_is_at_last_seen_idx()
elif hook_type is HookType.ARG_DEQUANTS:
if first_call and parent_module is not None:
parent_qstate_fc = getattr(
parent_module, '_auto_quant_state', None)
if parent_qstate_fc:
args, kwargs = \
parent_qstate_fc.first_call_op_prepare_before_hook(
cur_module, args, kwargs, qtensor_id, fqn,
cur_module,
OpQuantizeabilityType.NOT_QUANTIZEABLE)
output = orig_module_call(self, *args, **kwargs)
# if this fp32 was inplace, make sure to set the output dtype
# back to torch.float
if hasattr(output, '_qtensor_info'):
del output._qtensor_info
if first_call and parent_module is not None:
parent_qstate_fc = getattr(
parent_module, '_auto_quant_state', None)
if parent_qstate_fc:
output = \
parent_qstate_fc.first_call_op_prepare_after_hook(
cur_module, output, args, qtensor_id,
OpQuantizeabilityType.NOT_QUANTIZEABLE)
else:
output = orig_module_call(self, *args, **kwargs)
if enable_logging:
fqn = module_id_to_fqn.get(id(self), None)
logger.debug(f" fqn:{fqn} _cl_: {type(self)} end")
return output
finally:
module_stack.pop()
cur_module = old_module
torch.nn.Module.__call__ = _patched_module_call
torch.nn.Sequential.forward = _nn_sequential_patched_forward # type: ignore[assignment]
nonlocal first_call
try:
if first_call:
# Create a list before iterating because we are adding new
# named modules inside the loop.
named_modules = list(self.named_modules())
# Record module instances which are leaves or children of leaves
leaves = set()
for fqn, child in named_modules:
if is_leaf(child, prepare_custom_config_dict):
for _, child_child in child.named_modules():
leaves.add(child_child)
self._fqn_to_auto_quant_state_map = AutoQuantizationStateModuleDict()
for fqn, v in named_modules:
# fqn is the global FQN, i.e. 'foo.bar.baz'
# v is the module instance
#
# we need to associate the global FQN with SeenOp
# for modules, this is the module FQN
# for functions, this is the parent module FQN
module_id_to_fqn[id(v)] = fqn
if v in leaves:
continue
if v is self:
# for the top level module only, specify input
# and output dtypes
auto_quant_state = AutoQuantizationState(
qconfig_mapping, fqn,
input_dtypes, output_dtypes)
else:
auto_quant_state = AutoQuantizationState(
qconfig_mapping, fqn)
# The code below registers the auto_quant_state object
# of the child in the module hierarchy of the parent,
# and adds the auto_quant_state object to the child
# with a raw __setattr__, without registering it in
# the module hierarchy of the child.
# This is solving the problem of both storing extra state
# (observers) as well as not modifying the meaning of user
# code in child modules which iterates over all module
# children.
#
# This narrows down the issue of dynamically adding
# children to only affect the top level module and not
# the children.
# On the parent, register this module in the FQN map
fqn_to_use_for_key = \
get_fqn_valid_for_module_dict_key(fqn)
self._fqn_to_auto_quant_state_map[fqn_to_use_for_key] = \
auto_quant_state
# On the child, manually set the attribute without
# going through the `torch.nn.Module.__setattr__`
# function, to prevent this object from appearing in
# the child's module hierarchy.
object.__setattr__(
v, '_auto_quant_state', auto_quant_state)
global_op_idx[0] = 0
output = super().__call__(*new_args, **new_kwargs)
if first_call:
for _, v in self.named_modules():
if hasattr(v, '_auto_quant_state'):
v._auto_quant_state.match_fusion_patterns()
v._auto_quant_state.insert_observers(v)
return output
finally:
torch.nn.Module.__call__ = orig_module_call
torch.nn.Sequential.forward = orig_nn_sequential_forward # type: ignore[assignment]
first_call = False
model.__class__ = QuantizationInterceptionModule
# create the graph
trace_with_inputs(model, example_inputs)
return model
def add_auto_convert(module : torch.nn.Module) -> torch.nn.Module:
def convert_to_dispatch_proxy(x):
if isinstance(x, torch.Tensor):
return x.as_subclass(QuantizationConvertTensorProxy) # type: ignore[arg-type]
else:
return x
module_id_to_fqn: Dict[int, str] = {}
# Counter for global quantizeable ops, useful for intermediate activation
# logging.
global_op_idx = [0]
global_disable_torch_function_override = False
class QuantizationConvertTensorProxy(torch.Tensor):
"""
An override of `torch.Tensor` to enable dynamic dispatch for
quantization inference.
For each function with a `__torch_fuction__` override, this proxy does
the following for functions which need quantization:
1. calls `_auto_quant_state.validate_cur_op` to validate that
the currently seen op is the same as what was recorded during tracing
2. calls `_auto_quant_state.op_convert_before_hook`.
3. executes the function, with target, args and kwargs possibly modified
by (2)
4. calls `_auto_quant_state.inference_function_after_hook`.
5. calls `_auto_quant_state.mark_cur_op_complete` to increment
the current op index in preparation for the next op
Otherwise, calls the original function.
"""
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
nonlocal global_disable_torch_function_override
if (
# global override means disable the override here
global_disable_torch_function_override or
# to prevent printing things from going into an infinite loop
func == torch.Tensor.__repr__ or
# we don't need to override getters in this framework
func.__name__ == '__get__'
):
return super().__torch_function__(func, types, args, kwargs)
kwargs = kwargs if kwargs else {}
# if we are in a function, the current module is always a parent
parent_module = cur_module
hook_type = get_torch_function_hook_type(parent_module, func)
if enable_logging:
fqn_for_logging = module_id_to_fqn.get(
id(parent_module), 'unknown') if parent_module else None
logger.debug(
f" fqn:{fqn_for_logging} _tf_ {func} " +
f"hook_type {hook_type} " +
# f"arg_types {[type(arg) for arg in args]}) " +
f"arg_dtypes {[arg.dtype if isinstance(arg, torch.Tensor) else None for arg in args]}")
if hook_type is HookType.OP_HOOKS:
qstate: AutoQuantizationState = parent_module._auto_quant_state # type: ignore[union-attr]
# before hooks
qstate.validate_cur_op(func)
func, args, kwargs = qstate.op_convert_before_hook(
func, args, kwargs, parent_module) # type: ignore[arg-type]
# forward
output = super().__torch_function__(func, types, args, kwargs)
# after hooks
output = qstate.op_convert_after_hook(
func, output, global_op_idx)
qstate.mark_cur_op_complete(func)
elif hook_type is HookType.ARG_DEQUANTS:
# TODO(future PR): handle more dtypes
new_args = []
for arg in args:
if isinstance(arg, torch.Tensor) and arg.is_quantized:
new_args.append(arg.dequantize())
else:
new_args.append(arg)
args = tuple(new_args)
output = super().__torch_function__(func, types, args, kwargs)
else: # HookType.NONE
output = super().__torch_function__(func, types, args, kwargs)
# TODO: is this right? Don't really understand this
if output is NotImplemented:
with torch._C.DisableTorchFunction():
output = func(*args, **kwargs).as_subclass(
QuantizationConvertTensorProxy)
assert output is not NotImplemented
if enable_logging:
fqn_for_logging = module_id_to_fqn.get(
id(parent_module), 'unknown') if parent_module else None
out_dtype = None
if isinstance(output, torch.Tensor):
out_dtype = output.dtype
logger.debug(f" fqn:{fqn_for_logging} _tf_ {func} out {out_dtype} end")
return output
def __repr__(self):
return f'QuantizationConvertTensorProxy({super().__repr__()})'
cur_module = None
module_stack : List[torch.nn.Module] = []
assert len(module.__class__.__bases__) == 1
class QuantizationDispatchModule(module.__class__.__bases__[0]): # type: ignore[name-defined]
"""
An override of user defined subclass of `nn.Module` to enable
dynamic tracing for quantization, after model conversion
to quantized domain.
`cur_module` keeps track of the current module in the stack.
Tensor arguments are converted to `QuantizationConvertTensorProxy`.
We override the `__call__` function to do the following for each
module:
If the module is an op which needs quantization:
1. calls `_auto_quant_state.validate_cur_op` to validate that
the currently seen op is the same as what was recorded during tracing
2. calls parent module's `._auto_quant_state.op_convert_before_hook`
3. executes the original module forward
4. calls parent module's `_auto_quant_state.op_convert_after_hook`
5. calls `_auto_quant_state.mark_cur_op_complete` to increment
the current op index in preparation for the next op
If the module can contain children ops that need quantization:
1. calls `_auto_quant_state.inputs_convert_hook` (not implemented yet)
2. executes the original module forward
3. calls `_auto_quant_state.outputs_convert_hook`
Otherwise, calls the original module forward.
"""
def __call__(self, *args, **kwargs):
new_args = map_aggregate(args, convert_to_dispatch_proxy)
new_kwargs = map_aggregate(kwargs, convert_to_dispatch_proxy)
orig_module_call = torch.nn.Module.__call__
orig_nn_sequential_forward = torch.nn.Sequential.forward
def _patched_module_call(self, *args, **kwargs):
nonlocal cur_module
old_module = cur_module
cur_module = self
nonlocal global_disable_torch_function_override
try:
parent_module = module_stack[-1] if len(module_stack) else None
module_stack.append(self)
hook_type = get_module_hook_type(parent_module, cur_module)
if enable_logging:
fqn_for_logging = module_id_to_fqn.get(id(self), None)
logger.debug(
f" fqn: {fqn_for_logging} " +
f"_cl_ {type(self)} " +
f"arg_dtypes {[arg.dtype if isinstance(arg, torch.Tensor) else None for arg in args]} " +
f"hook_type {hook_type}")
if hook_type is HookType.OP_HOOKS:
# before hooks
qstate: AutoQuantizationState = \
parent_module._auto_quant_state # type: ignore[union-attr, assignment]
qstate.validate_cur_op(cur_module)
# If we are in this hook, `cur_module` is a leaf module.
# Therefore, we do not need to override any of its
# children. Disabling the overrides for performance.
old_global_disable_torch_function_override = \
global_disable_torch_function_override
global_disable_torch_function_override = True
_, args, kwargs = qstate.op_convert_before_hook(
cur_module, args, kwargs, cur_module)
# forward
output = orig_module_call(self, *args, **kwargs)
# after hooks
output = qstate.op_convert_after_hook(
cur_module, output, global_op_idx)
# Re-enable the override.
global_disable_torch_function_override = \
old_global_disable_torch_function_override
qstate.mark_cur_op_complete(cur_module)
elif hook_type is HookType.MODULE_IO_HOOKS:
cur_qstate: AutoQuantizationState = cur_module._auto_quant_state
cur_qstate.reset_to_new_call()
# before hooks (TODO)
# forward
output = orig_module_call(self, *args, **kwargs)
# after hooks
# For the sake of performance, we assume no overrides
# are needed for quantizing/dequantizing things
old_global_disable_torch_function_override = \
global_disable_torch_function_override
global_disable_torch_function_override = True
output = cur_qstate.outputs_convert_hook(output)
global_disable_torch_function_override = \
old_global_disable_torch_function_override
cur_qstate.validate_is_at_last_seen_idx()
elif hook_type is HookType.ARG_DEQUANTS:
# TODO(future PR): handle more dtypes
new_args = []
for arg in args:
if isinstance(arg, torch.Tensor) and arg.is_quantized:
dequant = arg.dequantize().as_subclass(
QuantizationConvertTensorProxy) # type: ignore[arg-type]
new_args.append(dequant)
else:
new_args.append(arg)
args = tuple(new_args)
output = orig_module_call(self, *args, **kwargs)
else:
output = orig_module_call(self, *args, **kwargs)
if enable_logging:
fqn_for_logging = module_id_to_fqn.get(id(self), None)
logger.debug(
f" fqn: {fqn_for_logging} " +
f"_cl_ {type(self)} " +
f"dtype {output.dtype if isinstance(output, torch.Tensor) else None} " +
"end")
return output
finally:
module_stack.pop()
cur_module = old_module
torch.nn.Module.__call__ = _patched_module_call
torch.nn.Sequential.forward = _nn_sequential_patched_forward # type: ignore[assignment]
try:
global_op_idx[0] = 0
output = super().__call__(*new_args, **new_kwargs)
def unwrap_proxy(a):
if isinstance(a, QuantizationConvertTensorProxy):
a.__class__ = torch.Tensor # type: ignore[assignment]
return a
output = map_aggregate(output, unwrap_proxy)
return output
finally:
torch.nn.Module.__call__ = orig_module_call
torch.nn.Sequential.forward = orig_nn_sequential_forward # type: ignore[assignment]
def rewrite_for_scripting(self):
return auto_trace_rewriter.rewrite_for_scripting(self)
pack_weights_for_functionals(module)
attach_scale_zp_values_to_model(module)
attach_op_convert_info_to_model(module)
attach_output_convert_info_to_model(module)
# Since eager mode convert could have changed the IDs of some modules,
# populate the FQN map again
for k, v in module.named_modules():
module_id_to_fqn[id(v)] = k
module.__class__ = QuantizationDispatchModule
return module
# AutoQuantizationState lives in parent module's _modules.
# Currently, `torch.nn.Sequential`'s forward iterates over all
# items in _modules. To avoid changing the meaning of the program, for
# now we patch the forward to ignore our quantization state.
# Note: this is a hackedy hack, before launching we should consider
# checking the fix into `torch.nn.Sequential` to avoid the patch.
def _nn_sequential_patched_forward(cls, input):
for module in cls:
if not isinstance(module, AutoQuantizationStateModuleDict):
input = module(input)
return input
| pytorch-master | torch/ao/quantization/_dbr/auto_trace.py |
from typing import Dict, Callable, Any, Optional
import torch
from torch.nn.intrinsic import _FusedModule
from ..utils import (
activation_is_int8_quantized,
activation_is_int32_quantized,
op_is_int8_dynamically_quantized,
)
from torch.ao.quantization import swap_module
from torch.ao.quantization.quantization_mappings import (
DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS,
)
def _swap_child_modules(
module: torch.nn.Module,
static_mappings: Dict[Callable, Any],
dynamic_mappings: Dict[Callable, Any],
parent_fqn: Optional[str] = None,
) -> None:
"""
For each direct child of `module`, swaps it using `static_mappings`
if the qconfig for that child is using int8 static quantization,
and the module type is in the mapping.
Recursively calls itself on each child.
"""
qstate = getattr(module, '_auto_quant_state', None)
reassign = {}
for local_fqn, mod in module.named_children():
if parent_fqn is None:
global_fqn = local_fqn
else:
global_fqn = f"{parent_fqn}.{local_fqn}"
# both fused modules and observed custom modules are
# swapped as one unit
if not isinstance(mod, _FusedModule):
_swap_child_modules(
mod, static_mappings, dynamic_mappings, global_fqn)
qconfig = getattr(mod, 'qconfig', None)
if not qconfig:
continue
activation_int8_quantized = activation_is_int8_quantized(qconfig)
op_int8_dynamically_quantized = op_is_int8_dynamically_quantized(qconfig)
activation_int32_quantized = activation_is_int32_quantized(qconfig)
# Get the output observer from qstate and attach it to the module,
# to match the API for Eager mode module swaps
if qstate is not None:
output_obs = qstate.get_output_observer_from_fqn(global_fqn)
if output_obs is not None:
mod.activation_post_process = output_obs
if activation_int8_quantized:
if not type(mod) in static_mappings:
continue
reassign[local_fqn] = swap_module(mod, static_mappings, {})
elif op_int8_dynamically_quantized:
if not type(mod) in dynamic_mappings:
continue
reassign[local_fqn] = swap_module(mod, dynamic_mappings, {})
elif activation_int32_quantized:
# For now, only apply reference logic to modules quantized to
# int32. Do it automatically.
# TODO(future PR): extend this logic to more dtypes, and add
# the is_reference API flag instead of doing this automatically.
# Note: swap modules only does the swap if the mapping for this
# module exists.
reassign[local_fqn] = swap_module(
mod, DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS, {})
# TODO(future PR): add support for other dtypes
for key, value in reassign.items():
module._modules[key] = value
| pytorch-master | torch/ao/quantization/_dbr/module_swap_utils.py |
import dataclasses
import enum
from typing import Callable, Tuple, Any, List, Optional, Dict
import torch
import torch.nn.functional as F
toq = torch.ops.quantized
from .mappings import (
functions_supported_by_quantization,
module_types_supported_by_quantization,
module_types_supported_by_quantization_preserves_dtype,
functions_supported_by_quantization_preserves_dtype,
fp32_to_int8_fun_mapping,
add_and_mul_ops,
conv_ops,
)
from ..qconfig import QConfigAny
from ..qconfig_mapping import QConfigMapping
from torch.quantization import (
ObserverBase,
FakeQuantizeBase,
is_activation_post_process,
)
from ..qconfig_mapping_utils import (
maybe_adjust_qconfig_for_module_type_or_name,
)
def _raise_obs_not_found_error(func):
raise RuntimeError(
f'Encountered arithmetic operation {torch.typename(func)} but we have '
f'encountered fewer arithmetic operations in previous calibration runs. '
f'This likely indicates that the program contains dynamic control flow. '
f' Quantization is not defined over dynamic control flow!')
def _raise_obs_op_mismatch(func, prev_op):
raise RuntimeError(
f'Encountered arithmetic operation {torch.typename(func)} but previously '
f'recorded operation was {torch.typename(prev_op)}!. This likely indicates '
f'that the program contains dynamic control flow. Quantization is not '
f'defined over dynamic control flow!')
@dataclasses.dataclass
class QTensorInfo:
id: int # tensor ID
orig_dtype: torch.dtype # dtype seen while tracing with example input
inf_dtype: torch.dtype # dtype at inference
@dataclasses.dataclass
class FusionInfo:
# linear matched pattern, example: [torch.add, torch.relu]
pattern: Tuple[Callable, ...]
# what the current element should be replaced with during execution
# example: toq.add_relu (for torch.add -> torch.relu)
replacement_type_this_element: Callable
# true if the current element is the first element of the pattern,
# for example true for torch.add in (torch.add -> torch.relu)
is_first_element: bool
# true if the current element is the last element of the pattern,
# for example true for torch.relu in (torch.add -> torch.relu)
is_last_element: bool
@dataclasses.dataclass
class SeenQOpInfo:
idx: int
# Python type of the seen op. For modules, this is type(mod). For
# functions, this is the target function.
type: Callable
# True if the type is a module, False otherwise (for functions/methods).
type_is_module: bool
# Note: FQN refers to the current module for modules and to the parent
# module for functions
fqn: str
# Information about the input tensors
# Non-tensor inputs are represented with None.
input_tensor_infos: List[Optional[QTensorInfo]]
# Information about the output tensors
# Non-tensor outputs are represented with None.
output_tensor_infos: List[QTensorInfo]
# Information about tensors which will need to be packed,
# idx is the argument index in args
# name is the name of this parameter in the parent module
packable_tensor_idx_to_name: Dict[int, Optional[str]]
# Information about non-tensors which will need to be packed,
# idx is the argument index in args
# arg is the argument value
packable_nontensor_idx_to_arg: Dict[int, Any]
# Information about tensors which will need to be packed from kwargs.
# kwarg_name is the kwarg name
# name is the name of this parameter in the parent module
packable_tensor_kwarg_name_to_name: Dict[str, Optional[str]]
# This is True if all packable args are simple attributes, or there
# are no packable args.
# This is False if some packable args are results of other functions.
op_packing_only_uses_module_attributes: bool
# QConfig for the op, can be None
qconfig: QConfigAny
# fusion_info for the op, is None if no fusion is found
fusion_info: Optional[FusionInfo]
# True if this op is a reference op during inference
is_reference_op_at_inference: bool
def __repr__(self) -> str:
s = f"(type): {self.type}\n"
s += f" (fqn): {self.fqn}\n"
s += f" (input_tensor_infos): {self.input_tensor_infos}\n"
s += f" (output_tensor_infos): {self.output_tensor_infos}"
if len(self.packable_tensor_idx_to_name):
s += f"\n (packable_tensor_idx_to_name): {self.packable_tensor_idx_to_name}"
if len(self.packable_nontensor_idx_to_arg):
s += f"\n (packable_nontensor_idx_to_arg): {self.packable_nontensor_idx_to_arg}"
if len(self.packable_tensor_kwarg_name_to_name):
s += f"\n (packable_tensor_kwarg_name_to_name): {self.packable_tensor_kwarg_name_to_name}"
if self.fusion_info:
s += f"\n (fusion_info): {self.fusion_info}"
return s
@dataclasses.dataclass
class SeenNonQOpInfo:
# Python type of the seen op. For modules, this is type(mod). For
# functions, this is the target function.
type: Callable
# Information about the input tensors
# Non-tensor inputs are represented with None.
input_tensor_infos: List[Optional[QTensorInfo]]
# Information about the output tensors
# Non-tensor outputs are represented with None.
output_tensor_infos: List[QTensorInfo]
class OpQuantizeabilityType(enum.Enum):
QUANTIZEABLE = 0
NOT_QUANTIZEABLE = 1
def op_needs_quantization(op: Callable) -> bool:
if op in functions_supported_by_quantization:
return True
elif type(op) in module_types_supported_by_quantization:
return True
else:
return False
# TODO: fix lint
class ObserverWrapper(torch.nn.Identity):
def __init__(self, child):
super().__init__()
self.child = child
self.dtype = child.dtype
def wrap_observers_in_placeholders(module: torch.nn.Module) -> None:
"""
Wraps each child observer of `module` in a placeholder which prevents
the execution of the observer during the forward. This is useful to prevent
tracing the model with example inputs from contributing to calibration
statistics.
"""
for name, child in module.named_children():
if isinstance(child, (ObserverBase, FakeQuantizeBase)):
wrapper = ObserverWrapper(child)
setattr(module, name, wrapper)
else:
wrap_observers_in_placeholders(child)
def unwrap_observers_from_placeholders(module: torch.nn.Module) -> None:
"""
Restores observers back to their original state.
"""
# Note: we cannot use module.named_children() because we can
# have two different names refer to the same module, for example
# when we are reusing observers for torch.add scalar version.
for name, child in module._modules.items():
if child is None:
continue
if isinstance(child, ObserverWrapper):
unwrapped = child.child
setattr(module, name, unwrapped)
else:
unwrap_observers_from_placeholders(child)
def trace_with_inputs(
model: torch.nn.Module,
example_args: Tuple[Any],
) -> None:
with torch.no_grad():
old_training = model.training
model.eval()
wrap_observers_in_placeholders(model)
model(*example_args)
unwrap_observers_from_placeholders(model)
if old_training:
model.train()
# TODO(future PR): verify correctness of this for all
# quantizeable modules
def is_leaf(
m: torch.nn.Module,
prepare_custom_config_dict: Optional[Dict[str, Any]],
) -> bool:
if prepare_custom_config_dict is None:
prepare_custom_config_dict = {}
if 'non_traceable_module_class' in prepare_custom_config_dict:
for target_cls in prepare_custom_config_dict['non_traceable_module_class']:
if isinstance(m, target_cls):
return True
# TODO(future PR): extend to the rest of the container classes
container_classes = (
torch.nn.Sequential,
torch.nn.ModuleList,
)
return (
# allowlist everything in torch.nn except containers
(m.__module__.startswith('torch.nn') and (
not isinstance(m, container_classes)
)) or
# allowlist nni modules, as they inherit from nn.Sequential
m.__module__.startswith('torch.nn.intrinsic') or
# observers and fake quants are leaves
is_activation_post_process(m)
)
class FuncOutputObsType(enum.Enum):
NONE = 0
NEW_OBS = 1
REUSES_FIRST_INPUT_OBS = 2
def get_func_output_obs_type(
seen_q_op_info: SeenQOpInfo,
) -> FuncOutputObsType:
op_type = seen_q_op_info.type
if seen_q_op_info.qconfig is None:
return FuncOutputObsType.NONE
# check for ops which need packed weights but the weights are
# coming from another function
if not seen_q_op_info.op_packing_only_uses_module_attributes:
return FuncOutputObsType.NONE
if op_type in add_and_mul_ops:
if (
len(seen_q_op_info.input_tensor_infos) > 0 and
seen_q_op_info.input_tensor_infos[0] is not None and
seen_q_op_info.input_tensor_infos[0].inf_dtype in (torch.int32, torch.int64)
):
# this is handling ops on dtypes such as torch.int
return FuncOutputObsType.NONE
elif (
len(seen_q_op_info.input_tensor_infos) > 1 and
seen_q_op_info.input_tensor_infos[1] is None
):
return FuncOutputObsType.REUSES_FIRST_INPUT_OBS
elif op_type in (torch.relu, F.relu):
return FuncOutputObsType.NONE
elif op_type == torch.cat:
if (
len(seen_q_op_info.input_tensor_infos) > 0 and
seen_q_op_info.input_tensor_infos[0] is not None and
seen_q_op_info.input_tensor_infos[0].inf_dtype in (torch.int32, torch.int64)
):
return FuncOutputObsType.NONE
elif op_type in (torch.nn.LSTM,):
return FuncOutputObsType.NONE
return FuncOutputObsType.NEW_OBS
def converted_func_needs_scale_zp(seen_q_op_info: SeenQOpInfo) -> bool:
op_type = seen_q_op_info.type
is_module = isinstance(op_type, type(torch.nn.Module))
if is_module:
return False
if seen_q_op_info.qconfig is None:
return False
if op_type in add_and_mul_ops:
# check if both arguments are tensors
inputs = seen_q_op_info.input_tensor_infos
both_args_tensors = len(inputs) == 2 and inputs[0] is not None and \
inputs[1] is not None
# disable quantization for torch.mul with int tensor arguments
first_dtype_is_not_int = len(inputs) > 0 and \
inputs[0] is not None and \
inputs[0].inf_dtype not in (torch.int32, torch.int64)
return both_args_tensors and first_dtype_is_not_int
elif op_type == torch.cat:
inputs = seen_q_op_info.input_tensor_infos
first_dtype_is_not_int = len(inputs) > 0 and \
inputs[0] is not None and \
inputs[0].inf_dtype not in (torch.int32, torch.int64)
return first_dtype_is_not_int
elif op_type in conv_ops or op_type == F.linear:
outputs = seen_q_op_info.output_tensor_infos
is_int8 = outputs[0].inf_dtype == torch.quint8
return is_int8
return False
class FuncOutputDTypeType(enum.Enum):
# for ops which are quantizeable and are configured by the qconfig,
# for example F.conv2d
DTYPE_DEPENDS_ON_QCONFIG = 0
# for ops which are quantizeable and take the dtype of the previous
# op, for example nn.Dropout
DTYPE_EQUALS_INPUT_DTYPE = 1
# for ops which may be quantizeable in some cases but are not
# quantizeable due to observed syntax (for example, F.conv2d with
# weights coming from another function).
DTYPE_DEFAULT_BC_UNSUPPORTED_SYNTAX = 2
def get_func_output_dtype_type(
seen_q_op_info: SeenQOpInfo,
) -> FuncOutputDTypeType:
if seen_q_op_info.type_is_module:
if seen_q_op_info.type in module_types_supported_by_quantization_preserves_dtype:
return FuncOutputDTypeType.DTYPE_EQUALS_INPUT_DTYPE
# check for ops which need packed weights but the weights are
# coming from another function
if not seen_q_op_info.op_packing_only_uses_module_attributes:
return FuncOutputDTypeType.DTYPE_DEFAULT_BC_UNSUPPORTED_SYNTAX
args = seen_q_op_info.input_tensor_infos
if seen_q_op_info.type in functions_supported_by_quantization_preserves_dtype:
return FuncOutputDTypeType.DTYPE_EQUALS_INPUT_DTYPE
elif seen_q_op_info.type in add_and_mul_ops and len(args) > 0 and \
args[0] is not None and \
args[0].orig_dtype in (torch.int32, torch.int64):
# binary ops with torch.int arguments do not support quantization
return FuncOutputDTypeType.DTYPE_EQUALS_INPUT_DTYPE
elif seen_q_op_info.type == torch.cat and len(args) > 0 and \
args[0] is not None and \
args[0].orig_dtype in (torch.int32, torch.int64):
# TODO(before land): do we still need this branch?
return FuncOutputDTypeType.DTYPE_EQUALS_INPUT_DTYPE
return FuncOutputDTypeType.DTYPE_DEPENDS_ON_QCONFIG
def get_weight_argument_info(op: Callable) -> Optional[Tuple[int, str]]:
if op == F.linear or op in conv_ops:
return (1, 'weight')
return None
def get_op_packing_only_uses_module_attributes(
op: Callable,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
module: torch.nn.Module,
) -> bool:
"""
Returns True if all arguments of this op which are weights are module
attributes on the root module, and False otherwise.
For example, for `F.linear(input, weight, bias)`, this would return
True if `weight` is stored directly on the parent module (the common case),
and False if `weight` was an output of a different op.
"""
# check for ops which need packed weights but the weights are
# coming from another function
info = get_weight_argument_info(op)
if info is not None:
idx, name = info
param_name = args[idx] if idx < len(args) else kwargs[name]
arg_name_in_root = get_param_name(module, param_name)
if arg_name_in_root is None:
return False
return True
def get_quantized_op(
seen_q_op_info: SeenQOpInfo,
idx_to_seen_q_op_infos: Dict[int, SeenQOpInfo],
) -> Optional[Callable]:
"""
Given a `seen_q_op_info`, returns the quantized version of the seen function.
If the `seen_q_op_info` corresponds to a module, returns `None`.
If the function does need quantizing, returns `None`.
"""
# if we are in a fusion, use the fusion replacement rules
if seen_q_op_info.fusion_info is not None:
return seen_q_op_info.fusion_info.replacement_type_this_element
op_type = seen_q_op_info.type
is_module = isinstance(op_type, type(torch.nn.Module))
if is_module:
return None
if seen_q_op_info.output_tensor_infos[0].inf_dtype != torch.quint8:
return None
if (
(op_type in add_and_mul_ops or op_type == torch.cat) and
seen_q_op_info.input_tensor_infos[0] is not None and
seen_q_op_info.input_tensor_infos[0].inf_dtype in (torch.int32, torch.int64)
):
# handle torch.mul with int tensor arguments
return None
elif op_type in fp32_to_int8_fun_mapping:
return fp32_to_int8_fun_mapping[op_type]
return None
def get_input_observed_arg_idxs(
op_type: Callable,
op_type_is_module: bool,
) -> Optional[List[int]]:
if op_type_is_module:
# TODO(future PR): handle RNNs
return [0]
elif op_type in conv_ops:
return [0, 1]
elif op_type == F.linear:
return [0, 1]
# None means "observe all Tensor args"
return None
def get_packable_tensor_arg_idxs(op: Callable) -> Optional[List[int]]:
"""
Returns tensor arg idxs which correspond to parameters which will need
to be packed.
"""
if op in conv_ops:
return [1, 2]
elif op == F.linear:
return [1, 2]
return None
def get_packable_tensor_kwarg_names(op: Callable) -> Optional[List[str]]:
"""
Returns tensor kwarg names which correspond to parameters which will
need to be packed.
"""
if op == F.linear or op in conv_ops:
return ['weight', 'bias']
return None
def get_param_name(module: torch.nn.Module, arg: Any) -> Optional[str]:
"""
Returns the name of arg with respect to the current module.
"""
for name, param in module.named_parameters():
if arg is param:
return name
return None
# raise AssertionError(f"arg {arg} not found in module {module}")
def get_packable_nontensor_arg_idxs(op: Callable) -> Optional[List[int]]:
"""
Returns nontensor arg idxs which correspond to arguments which will need
to be packed.
"""
if op in conv_ops:
# stride, padding, dilation, groups
return [3, 4, 5, 6]
return None
def get_packable_arg_idxs(op: Callable) -> Optional[List[int]]:
if op in conv_ops:
# weight, bias, stride, padding, dilation, groups
return [1, 2, 3, 4, 5, 6]
elif op == F.linear:
# weight, bias
return [1, 2]
return None
def get_weight_arg_idx(op: Callable) -> Optional[int]:
if op in conv_ops:
return 1
elif op == F.linear:
return 1
return None
def iterate_and_apply(
args: Any,
flattened_tensor_infos: List[Optional[QTensorInfo]],
func: Callable,
flattened_tensor_infos_idx=None
) -> Any:
"""
Inputs:
`args`: arguments to a function, may contain nested types, for example:
([torch.Tensor, torch.Tensor], int, (int, int))
`flattened_tensor_infos`: tensor information containers for each tensor
in `args`, flattened, for example corresponding with above:
({...}, {...}, None, None, None)
`func`: function to apply to each tensor in `args` to create `new_args`
Returns `new_args`, where each tensor has been transformed by `func`.
"""
arg_idx = 0
if flattened_tensor_infos_idx is None:
flattened_tensor_infos_idx = [0]
if isinstance(args, tuple):
new_args = []
for arg in args:
new_arg = iterate_and_apply(
arg, flattened_tensor_infos, func, flattened_tensor_infos_idx)
new_args.append(new_arg)
return tuple(new_args)
elif isinstance(args, list):
for idx in range(len(args)):
new_arg = iterate_and_apply(
args[idx], flattened_tensor_infos, func, flattened_tensor_infos_idx)
args[idx] = new_arg
return args
else:
# individual element
cur_flattened_tensor_info = \
flattened_tensor_infos[flattened_tensor_infos_idx[0]]
flattened_tensor_infos_idx[0] += 1
if cur_flattened_tensor_info is not None:
return func(args, cur_flattened_tensor_info)
else:
return args
def get_producer_of_seen_q_op_info(
idx_to_seen_q_op_info: Dict[int, SeenQOpInfo],
cur_seen_q_op_info: SeenQOpInfo,
) -> Optional[SeenQOpInfo]:
"""
Input: cur_seen_q_op_info, all seen ops
Output: the SeenQOpInfo which created the input to the current SeenQOpInfo
"""
if cur_seen_q_op_info.input_tensor_infos[0] is None:
return None
input_tensor_id = cur_seen_q_op_info.input_tensor_infos[0].id
for idx, seen_q_op_info in idx_to_seen_q_op_info.items():
for output_tensor_info in seen_q_op_info.output_tensor_infos:
if output_tensor_info is not None:
if input_tensor_id == output_tensor_info.id:
return seen_q_op_info
return None
def get_users_of_seen_q_op_info(
idx_to_seen_q_op_info: Dict[int, SeenQOpInfo],
cur_seen_q_op_info: SeenQOpInfo,
) -> List[SeenQOpInfo]:
"""
Input: cur_seen_q_op_info
Output: list of all seen_q_op_infos which use the output of the cur_seen_q_op_info,
"""
if len(cur_seen_q_op_info.output_tensor_infos) != 1:
return []
output_tensor_id = cur_seen_q_op_info.output_tensor_infos[0].id
results = []
for idx, seen_q_op_info in idx_to_seen_q_op_info.items():
for input_tensor_info in seen_q_op_info.input_tensor_infos:
if input_tensor_info is not None:
if output_tensor_id == input_tensor_info.id:
results.append(seen_q_op_info)
return results
class HookType(enum.Enum):
"""
Describes the various types of function and module hooks that are used
to implement quantization syntax transforms.
"""
# Hooks which are run before, during and after a quantizeable op.
# Usually used for op input and output observation, subsituating
# quantized kernels, and dynamically looking up arguments to quantized
# kernels.
OP_HOOKS = 0
# Hooks which are run before or after a `torch.nn.Module` which
# is a non-leaf. Usually used for dtype transforms if the user requests
# that the inputs or outputs of a certain module are of some dtype.
MODULE_IO_HOOKS = 1
# Hooks which are run before a non-quantizeable op which requires
# `torch.float` inputs. Any inputs which are not floats are converted
# back to floats.
ARG_DEQUANTS = 2
# Everything else
NONE = 3
def get_torch_function_hook_type(
parent_module: Optional[torch.nn.Module],
func: Callable,
) -> HookType:
# the direct __dict__ accesses are for performance, because
# the default `torch.nn.Module.__getattr__` has overhead.
parent_module_has_qstate = parent_module is not None and \
'_auto_quant_state' in parent_module.__dict__
needs_op_hooks = parent_module_has_qstate and \
parent_module.__dict__['_auto_quant_state'].cur_op_needs_hooks(func) # type: ignore[union-attr, operator]
if needs_op_hooks:
return HookType.OP_HOOKS
elif (
parent_module_has_qstate and
# do not attempt to dequantize the args to dequantize, as that will
# lead to infinite recursion
func != torch.Tensor.dequantize
):
return HookType.ARG_DEQUANTS
else:
return HookType.NONE
def get_module_hook_type(
parent_module: Optional[torch.nn.Module],
cur_module: torch.nn.Module,
) -> HookType:
cached_hook_type = getattr(cur_module, '_auto_quant_module_hook_type', None)
if cached_hook_type is not None:
return cached_hook_type
parent_module_has_qstate = parent_module is not None and \
'_auto_quant_state' in parent_module.__dict__
needs_op_hooks = parent_module_has_qstate and \
parent_module.__dict__['_auto_quant_state'].cur_op_needs_hooks(cur_module) # type: ignore[union-attr, operator]
# We need IO hooks if
# * we are calling forward on a module (always True here)
# * that module has quant state
# * that module does not need op hooks for the parent
needs_io_hooks = (
'_auto_quant_state' in cur_module.__dict__ and
(not needs_op_hooks)
)
needs_arg_dequants = parent_module_has_qstate and not needs_op_hooks
if needs_op_hooks:
result = HookType.OP_HOOKS
elif needs_io_hooks:
result = HookType.MODULE_IO_HOOKS
elif needs_arg_dequants:
result = HookType.ARG_DEQUANTS
else:
result = HookType.NONE
cur_module._auto_quant_module_hook_type = result # type: ignore[assignment]
return result
def clone_detach_tensor_without_dispatch(x: torch.Tensor) -> torch.Tensor:
"""
Creates a detached clone of `x`, unwrapping x from any dispatched
type before performing the copy.
This is necessary to not leak dispatched types to debugging logic
such as numeric suite.
TODO(future PR): figure out why is_quantized returns False for
the dispatched types, even though the underlying tensor is quantized.
"""
old_class = x.__class__
x.__class__ = torch.Tensor
x_copy = x.clone().detach()
x.__class__ = old_class
return x_copy
def get_input_args_quant_dequant_info(
seen_q_op_info: SeenQOpInfo,
tensor_id_to_scale_zp: Dict[int, Tuple[torch.Tensor, torch.Tensor]],
) -> Tuple[List[Optional[Tuple[float, int, torch.dtype]]], List[bool], bool]:
"""
Returns a list of information about the tensor inputs to the current op.
Quant list:
For each tensor input:
* if the tensor input needs a quant, the list will contain
(scale, zero_point)
* if the tensor input does not need a quant, the list will contain None
Dequant list:
For each tensor input:
* if the tensor input needs a dequant, True, otherwise, False
any_arg_quant_or_dequant_needed:
If True, at least one of quants or dequants is needed. If False,
there are no quants or dequants needed.
For example, if there are two tensor inputs to the current op, and the
first input needs a quant, this function will return
# quants
[(scale0, zero_point0), None],
# dequants
[False, False]
"""
quant_infos: List[Optional[Tuple[float, int, torch.dtype]]] = []
dequant_infos: List[bool] = []
# determine the expected output dtype
output_dtype = seen_q_op_info.output_tensor_infos[0].inf_dtype
packable_arg_idxs = get_packable_arg_idxs(seen_q_op_info.type)
any_arg_quant_or_dequant_needed = False
for input_arg_idx, input_arg in enumerate(seen_q_op_info.input_tensor_infos):
arg_will_be_packed = packable_arg_idxs is not None and \
input_arg_idx in packable_arg_idxs and \
seen_q_op_info.op_packing_only_uses_module_attributes
if input_arg is not None and not arg_will_be_packed:
tensor_id = input_arg.id
if input_arg.inf_dtype != output_dtype:
any_arg_quant_or_dequant_needed = True
if output_dtype in (torch.quint8, torch.qint32):
assert tensor_id in tensor_id_to_scale_zp
scale, zp = tensor_id_to_scale_zp[tensor_id]
# TODO: return this to the caller
quant_infos.append((scale, zp, output_dtype)) # type: ignore[arg-type]
if output_dtype == torch.qint32:
# For now, we treat all qint32 ops as reference, so
# we add a dequant before the op.
# TODO(future PR): extend this to more dtypes
# TODO(future PR): use is_reference flag instead of
# assuming
dequant_infos.append(True)
else:
dequant_infos.append(False)
else:
quant_infos.append(None)
dequant_infos.append(True)
else:
quant_infos.append(None)
dequant_infos.append(False)
else:
quant_infos.append(None)
dequant_infos.append(False)
return quant_infos, dequant_infos, any_arg_quant_or_dequant_needed
def get_cur_qconfig(
qconfig_mapping: QConfigMapping,
cur_fqn: str,
cur_op_type: Callable,
) -> Optional[QConfigAny]:
# precedence: global -> object_type -> module_name_regex -> module_name
# -> module_name_object_type_order
# (module_name_regex, module_name_object_type_order not implemented yet)
# global
global_qconfig = qconfig_mapping.global_qconfig
qconfig = maybe_adjust_qconfig_for_module_type_or_name(
qconfig_mapping, cur_op_type, cur_fqn, global_qconfig)
return qconfig
# We store quantization state for all children on the top level module in a
# ModuleDict. In order to properly special case this module from other
# ModuleDict instances, we create a marker class for it.
class AutoQuantizationStateModuleDict(torch.nn.ModuleDict):
pass
def get_fqn_valid_for_module_dict_key(fqn: str) -> str:
"""
Modifies `fqn` to make it a valid key to a ModuleDict.
"""
if fqn == '':
fqn = ' '
return fqn.replace('.', ':')
| pytorch-master | torch/ao/quantization/_dbr/utils.py |
from typing import List
import torch
from .function_fusion import pattern_is_match
from .utils import (
get_users_of_seen_q_op_info,
)
from .mappings import (
known_module_fusion_patterns,
)
def get_module_fusion_fqns(
module: torch.nn.Module,
) -> List[List[str]]:
"""
Input: a module with auto quantization state
Walks the subgraphs and determines which modules should be
fused.
Output: a list of FQNs of modules which should be fused.
"""
results = []
for _, child in module.named_modules():
if not hasattr(child, '_auto_quant_state'):
continue
qstate = child._auto_quant_state
# Walk the subgraphs and record the FQNs of all known module fusions.
# For now, this is brute forced for simplicity, can be optimized later if
# necessary.
# TODO(future PR): if a pattern is matched, add it to "seen" items
# and do not use it in future matching.
for idx, seen_q_op_info in qstate.idx_to_seen_q_op_infos.items():
for fusion_pattern in known_module_fusion_patterns:
is_match = pattern_is_match(
fusion_pattern, seen_q_op_info, qstate.idx_to_seen_q_op_infos)
if is_match:
cur_fqns = [seen_q_op_info.fqn]
cur_seen_q_op_info = seen_q_op_info
for _element in fusion_pattern[:-1]:
users = get_users_of_seen_q_op_info(
qstate.idx_to_seen_q_op_infos, cur_seen_q_op_info)
cur_seen_q_op_info = users[0]
cur_fqns.append(cur_seen_q_op_info.fqn)
# we check for existence to ensure the final fusion list
# is deduplicated, in case the same op is called multiple
# times in a single forward
if cur_fqns not in results:
results.append(cur_fqns)
return results
| pytorch-master | torch/ao/quantization/_dbr/fusion.py |
from typing import Callable, List, Tuple, Any, Optional, Dict
import torch
import torch.nn.functional as F
from .mappings import (
conv_ops,
ops_are_related,
)
from .utils import (
_raise_obs_not_found_error,
_raise_obs_op_mismatch,
op_needs_quantization,
SeenQOpInfo,
SeenNonQOpInfo,
QTensorInfo,
FuncOutputObsType,
get_func_output_obs_type,
converted_func_needs_scale_zp,
FuncOutputDTypeType,
get_func_output_dtype_type,
get_quantized_op,
get_input_observed_arg_idxs,
get_packable_tensor_arg_idxs,
get_param_name,
get_packable_nontensor_arg_idxs,
get_packable_arg_idxs,
get_weight_arg_idx,
iterate_and_apply,
get_op_packing_only_uses_module_attributes,
get_packable_tensor_kwarg_names,
clone_detach_tensor_without_dispatch,
get_input_args_quant_dequant_info,
get_cur_qconfig,
OpQuantizeabilityType,
)
from .function_fusion import (
match_fusion_patterns,
get_seen_q_op_info_of_start_of_fusion,
get_seen_q_op_info_of_end_of_fusion,
)
from ..qconfig_mapping import (
QConfigMapping,
)
from torch.ao.quantization.utils import (
activation_is_int32_quantized,
)
OpConvertInfo = Tuple[
# quantized equivalent of original op (None means keep original)
Optional[Callable],
# arg_quant_infos, each element is (scale, zp, dtype) for quantized and None otherwise
List[Optional[Tuple[float, int, torch.dtype]]],
# arg_dequant_infos, each element is True if this arg needs a dequant
List[bool],
# packed param name, if the op has a packed param
Optional[str],
# additional kwargs, such as output scale and zero_point
Dict[str, Any],
# any_arg_quant_or_dequant_needed, if False then we can skip looking at
# arg_quant_infos and arg_dequant_infos, for performance
bool,
# any_arg_kwarg_modification_needed, if False then we can return original
# args and kwargs, for performance
bool,
]
# TODO(future PR): maybe better name
# TODO(future PR): add serialization support
class AutoQuantizationState(torch.nn.Module):
"""
Contains state necessary to perform auto quantization on the parent
`nn.Module` instance.
"""
idx : int
def __init__(
self,
qconfig_mapping: QConfigMapping,
fqn: str,
input_dtypes: Any = None,
output_dtypes: Any = None,
):
super().__init__()
self.idx = 0
self.qconfig_mapping = qconfig_mapping
self.fqn = fqn
# this is a ModuleDict in order to properly register observers
# to be within the module hierarchy.
self.tensor_id_to_observer = torch.nn.ModuleDict()
# TODO(future PR): include kwargs
# Note: seen quantizeable ops are recorded with an index,
# because we enforce order of execution. However, seen
# unquantizeable ops are recorded without an index, because
# we do not enforce order of execution.
self.idx_to_seen_q_op_infos: Dict[int, SeenQOpInfo] = {}
self.seen_nonq_op_infos: List[SeenNonQOpInfo] = []
# qtensor_info objects of tensor outputs of the module, specified
# in order of iteration through the output type. Non-tensor outputs
# are represented with `None`.
self.output_qtensor_infos: List[Optional[QTensorInfo]] = []
self.input_dtypes = input_dtypes
self.output_dtypes = output_dtypes
# key: idx of seen op
# value: name of packed weight
# note: this is filled out right before convert
self.idx_to_packed_weight_name: Dict[int, str] = {}
self.tensor_id_to_scale_zp: Dict[int, Tuple[torch.Tensor, torch.Tensor]] = {}
# Numeric Suite add_loggers functionality
# if this flag is True, op outputs will be saved for debugging
self.log_op_outputs = False
# data structure to save op outputs for debugging
# * outer list represents the different model forward call instances
# * inner list represents the different op forward call instances in a
# model forward
# TODO(future PR): handle types which are not torch.Tensor
# TODO(future PR): use the Logger class and allow user overrides of it
self.op_outputs: List[List[Tuple[
int, # global op idx
Optional[str], # fqn
Callable, # fp32 op type (TODO future PR: add quantized op type)
torch.Tensor, # value
]]] = []
# model name to use in logging results
self.logging_model_name: Optional[str]
self.idx_to_op_convert_info: Dict[int, OpConvertInfo] = {}
# If this is True, module outputs will be checked and converted
# to the dtype specified by the user. If this is False, module outputs
# will be returned as is. This value can be precalculated and it is set
# to its final value after tracing.
self.needs_dtype_transform_on_outputs = True
def get_extra_state(self):
return {"tensor_id_to_scale_zp": self.tensor_id_to_scale_zp}
def set_extra_state(self, state):
self.tensor_id_to_scale_zp = state["tensor_id_to_scale_zp"]
for _, seen_q_op_info in self.idx_to_seen_q_op_infos.items():
self.idx_to_op_convert_info[seen_q_op_info.idx] = \
self.calculate_op_convert_info(seen_q_op_info)
def has_at_least_one_seen_q_op_info(self) -> bool:
return len(self.idx_to_seen_q_op_infos) > 0
def validate_is_at_last_seen_idx(self) -> None:
is_at_last_seen_idx = (
len(self.idx_to_seen_q_op_infos) == 0 or
self.idx == len(self.idx_to_seen_q_op_infos)
)
if not is_at_last_seen_idx:
raise AssertionError(
f"Cur idx: {self.idx}, expected idx: {len(self.idx_to_seen_q_op_infos)}")
def extra_repr(self) -> str:
s = ""
# idx_to_seen_q_op_infos
if len(self.idx_to_seen_q_op_infos):
s += "(seen_q_op_infos): {\n"
for k, v in self.idx_to_seen_q_op_infos.items():
s += f" {k}: {v}\n"
s += "}\n"
else:
s += "(seen_q_op_infos): {}\n"
if len(self.seen_nonq_op_infos):
s += "(seen_nonq_op_infos): {\n"
for n in self.seen_nonq_op_infos:
s += f" {n}\n"
s += "}\n"
else:
s += "(seen_nonq_op_infos): {}\n"
# output_qtensor_infos
s += "(output_qtensor_infos): ["
for i in self.output_qtensor_infos:
s += f"{i} "
s += "]\n"
# idx_to_packed_weight_name
if len(self.idx_to_packed_weight_name):
s += "(idx_to_packed_weight_name): {\n"
for k, v in self.idx_to_packed_weight_name.items(): # type: ignore[assignment]
s += f" {k}: {v}\n"
s += "}\n"
else:
s += "(idx_to_packed_weight_name): {}\n"
if len(self.tensor_id_to_scale_zp):
s += "(tensor_id_to_scale_zp): {\n"
for k, v in self.tensor_id_to_scale_zp.items(): # type: ignore[assignment]
s += f" {k}: {v}\n"
s += "}"
return s
def _get_cur_seen_q_op_info(self):
return self.idx_to_seen_q_op_infos[self.idx]
def get_cur_output_inf_dtype(self):
return self._get_cur_seen_q_op_info().output_tensor_infos[0].inf_dtype
def reset_to_new_call(self):
"""
Resets the internal op counter to start a new top level module call
"""
# torch.nn.Module __setattr__ has overhead,
# this code is the explicit fast path for `self.idx = 0`
object.__setattr__(self, 'idx', 0)
if self.log_op_outputs:
self.op_outputs.append([])
def cur_op_needs_hooks(self, cur_op: Callable) -> bool:
return op_needs_quantization(cur_op)
def validate_cur_op(self, cur_op: Callable) -> None:
"""
This function is expected to be called before any new function or
module call which needs hooks. It validates that the new function or
module is of the expected type based on the order of execution.
"""
try:
seen_q_op_info = self._get_cur_seen_q_op_info()
expected_op = seen_q_op_info.type
except IndexError:
_raise_obs_not_found_error(cur_op)
if not ops_are_related(cur_op, expected_op, seen_q_op_info.type_is_module):
_raise_obs_op_mismatch(cur_op, expected_op)
def mark_cur_op_complete(self, cur_op: Callable) -> None:
"""
This function is expected to be called after a function or module
processing is complete.
"""
# torch.nn.Module __setattr__ has overhead,
# this code is the explicit fast path for `self.idx += 1`
object.__setattr__(self, 'idx', self.idx + 1)
def first_call_outputs_prepare_hook(
self,
outputs: Any,
qtensor_id: List[int],
) -> Any:
"""
This function is expected to be called on the outputs of a prepared
module right before they are returned to the parent, during tracing.
"""
outputs = self._first_call_assign_qtensor_infos_to_mod_outputs(
outputs, qtensor_id)
return outputs
def outputs_prepare_hook(
self,
outputs: Any,
) -> Any:
"""
This function is expected to be called on the outputs of a prepared
module right before they are returned to the parent.
"""
return outputs
def outputs_convert_hook(
self,
outputs: Any,
) -> Any:
"""
This function is expected to be called on the outputs of a converted
module right before they are returned to the parent.
"""
outputs = self._maybe_mod_outputs_dtype_transform(outputs)
return outputs
def get_output_qtensor_infos(self) -> List[Optional[QTensorInfo]]:
"""
Used by the conversion to torch.jit.script.
"""
return self.output_qtensor_infos
def get_output_dtypes(self) -> Any:
"""
Used by the conversion to torch.jit.script.
"""
return self.output_dtypes
def first_call_op_prepare_before_hook(
self,
op: Callable,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
qtensor_id: List[int],
fqn: str,
root_module: torch.nn.Module,
op_quantizeability_type: OpQuantizeabilityType,
) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
"""
This function is expected to be called on args and kwargs of
`op` directly before `op` is executed, during tracing.
We record the type of `op`
and the IDs of its tensor inputs. Note: we add a placeholder for IDs
of tensor outputs, the placeholder will be filled out during the
`op_prepare_after_hook`.
The function returns modified `args` and `kwargs`.
"""
return self._first_call_op_prepare_before_hook_create_subgraphs(
op, args, kwargs, qtensor_id, fqn, root_module,
op_quantizeability_type)
def op_prepare_before_hook(
self,
op: Callable,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
"""
This function is expected to be called on args and kwargs of
`op` directly before `op` is executed.
We do the following:
* pass the inputs through observers, if needed
The function returns modified `args` and `kwargs`.
"""
seen_q_op_info = self._get_cur_seen_q_op_info()
def _maybe_observe(arg, tensor_info):
tensor_id = tensor_info.id
# TODO: do not run this twice on input and output
if str(tensor_id) in self.tensor_id_to_observer:
observer = self.tensor_id_to_observer[str(tensor_id)]
return observer(arg)
else:
return arg
args = iterate_and_apply(
args, seen_q_op_info.input_tensor_infos, _maybe_observe)
return args, kwargs
def first_call_op_prepare_after_hook(
self,
op: Callable,
output: Any,
args: Tuple[Any, ...],
qtensor_id: List[int],
op_quantizeability_type: OpQuantizeabilityType,
) -> Any:
"""
This function is called after an op call on a prepared model.
* create an observer for the output, if needed, and record it in
`tensor_id_to_observer`
* amend the current seen op with the tensor ID of the output
"""
self._first_call_op_prepare_after_hook_adjust_subgraphs(
op, output, args, qtensor_id, op_quantizeability_type)
return output
def op_prepare_after_hook(
self,
op: Callable,
output: Any,
args: Tuple[Any, ...],
global_op_idx: List[int],
) -> Any:
"""
This function is called after an op call on a prepared model.
* observe the output, if needed
"""
seen_q_op_info = self._get_cur_seen_q_op_info()
# if we are in a fusion, we only observe at the end of it
is_fusion = seen_q_op_info.fusion_info is not None
is_end_of_fusion = seen_q_op_info.fusion_info is not None and \
seen_q_op_info.fusion_info.is_last_element
if is_fusion:
if is_end_of_fusion:
# do observe in the end of fusions, according to info
# of the base op
seen_q_op_info_start = get_seen_q_op_info_of_start_of_fusion(
seen_q_op_info, self.idx_to_seen_q_op_infos)
# use the obs type from beginning of pattern
func_output_obs_type = get_func_output_obs_type(seen_q_op_info_start)
if func_output_obs_type != FuncOutputObsType.NONE:
# use the output tensor ID from the end of pattern
tensor_id = seen_q_op_info.output_tensor_infos[0].id
obs = self.tensor_id_to_observer[str(tensor_id)]
output = obs(output)
else:
# do not observe in the middle of fusions
pass
else:
# observe without fusions as normal
func_output_obs_type = get_func_output_obs_type(seen_q_op_info)
# TODO(future PR): other output types
if func_output_obs_type != FuncOutputObsType.NONE:
tensor_id = seen_q_op_info.output_tensor_infos[0].id
obs = self.tensor_id_to_observer[str(tensor_id)]
output = obs(output)
if self.log_op_outputs:
output_clone = clone_detach_tensor_without_dispatch(output)
self.op_outputs[-1].append(
(global_op_idx[0], seen_q_op_info.fqn, seen_q_op_info.type, output_clone))
global_op_idx[0] += 1
return output
def op_convert_before_hook(
self,
op: Callable,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
root_module: torch.nn.Module,
) -> Tuple[Callable, Tuple[Any, ...], Dict[str, Any]]:
"""
This function is called before an op call in a converted model.
For each arg in `args`, quantizes it if necessary.
Returns potentially modified `op`, potentially modified `args`,
potentially modified `kwargs`.
"""
# TODO generalize this for more things
# currently:
# * can quantize args (via arg_quant_infos)
# * can add scale and zp (via additional kwargs)
# needed for F.conv2d
# F.conv2d(input, weight, bias, stride, padding, dilation, groups)
# to
# q.conv2d(input, packed_params, scale, zero_point)
orig_op = op
maybe_new_op, arg_quant_infos, arg_dequant_infos, packed_param_name, \
additional_kwargs, any_arg_quant_or_dequant_needed, \
any_arg_kwarg_modification_needed = self.get_op_convert_info(op)
if maybe_new_op is not None:
op = maybe_new_op
if not any_arg_kwarg_modification_needed:
return op, args, kwargs
# print(op, arg_quant_infos, packed_param_name, additional_kwargs)
# potentially quantize args, based on arg_quant_infos
new_args = []
if any_arg_quant_or_dequant_needed:
tensor_arg_idx = 0
# TODO: refactor this to use iterate_and_apply
if orig_op is torch.cat: # torch.cat variants
# input tensors
new_first_arg = []
for arg in args[0]:
# TODO: handle non-tensor inputs
quant_info = arg_quant_infos[tensor_arg_idx]
dequant_info = arg_dequant_infos[tensor_arg_idx]
if quant_info is not None:
scale, zp, dtype = quant_info
arg = torch.quantize_per_tensor(arg, scale, zp, dtype)
if dequant_info is True:
# Note: both quant and dequant paths are taken for
# reference ops.
arg = arg.dequantize()
new_first_arg.append(arg)
tensor_arg_idx += 1
new_args = [new_first_arg, *args[1:]]
else:
for arg in args:
# TODO: handle non-tensor inputs
# TODO: this is not handling non-tensor tuple args (for example,
# dilation in conv2d) correctly, it just happens to work but
# needs a fix.
quant_info = arg_quant_infos[tensor_arg_idx]
dequant_info = arg_dequant_infos[tensor_arg_idx]
if quant_info is not None:
scale, zp, dtype = quant_info
arg = torch.quantize_per_tensor(arg, scale, zp, dtype)
if dequant_info is True:
# Note: both quant and dequant paths are taken for
# reference ops.
arg = arg.dequantize()
new_args.append(arg)
tensor_arg_idx += 1
else:
new_args = [*args]
# if there is a packed param, replace the relevant args
if packed_param_name is not None:
new_args_with_packed = []
packable_arg_idxs = get_packable_arg_idxs(orig_op)
added_packed = False
for idx, arg in enumerate(new_args):
if packable_arg_idxs is not None and idx in packable_arg_idxs:
if not added_packed:
packed_param = getattr(root_module, packed_param_name)
new_args_with_packed.append(packed_param)
added_packed = True
else:
new_args_with_packed.append(arg)
new_args = new_args_with_packed
# potentially extend kwargs with scale and zero_point
# TODO move op-specific logic out of here
if len(additional_kwargs):
if orig_op not in conv_ops and orig_op != F.linear:
kwargs.update(**additional_kwargs)
else:
seen_q_op_info = self._get_cur_seen_q_op_info()
if seen_q_op_info.output_tensor_infos[0].inf_dtype == torch.quint8:
new_args.append(additional_kwargs['scale'])
new_args.append(additional_kwargs['zero_point'])
# TODO move op-specific logic out of here
if op is torch.ops.quantized.linear:
kwargs.pop('bias', None)
return op, tuple(new_args), kwargs
def op_convert_after_hook(
self,
op: Callable,
output,
global_op_idx: List[int],
) -> Any:
"""
This function is called after an op call in a converted model.
"""
# TODO(future PR): improve performance by moving this out of the
# path of non-reference ops
seen_q_op_info = self._get_cur_seen_q_op_info()
if seen_q_op_info.is_reference_op_at_inference:
# given the current reference module design,
# we need to quantize to the target dtype
output_tensor_info = seen_q_op_info.output_tensor_infos[0]
tensor_id, inf_dtype = \
output_tensor_info.id, output_tensor_info.inf_dtype
scale, zp = self.tensor_id_to_scale_zp[tensor_id]
output = torch.quantize_per_tensor(
output, scale, zp, inf_dtype)
if self.log_op_outputs:
output_clone = clone_detach_tensor_without_dispatch(output)
seen_q_op_info = self._get_cur_seen_q_op_info()
self.op_outputs[-1].append(
(global_op_idx[0], seen_q_op_info.fqn, seen_q_op_info.type, output_clone))
global_op_idx[0] += 1
return output
def get_op_convert_info(
self,
op: Callable,
) -> OpConvertInfo:
"""
Returns the information needed for convert time modifications to `op`.
"""
return self.idx_to_op_convert_info[self.idx]
def calculate_op_convert_info(
self,
seen_q_op_info: SeenQOpInfo,
) -> OpConvertInfo:
"""
This precalculates the information which will be returned by
`get_op_convert_info`.
"""
# calculate new op
maybe_new_op = get_quantized_op(
seen_q_op_info, self.idx_to_seen_q_op_infos)
# calculate quant infos
arg_quant_infos, arg_dequant_infos, any_arg_quant_or_dequant_needed = \
get_input_args_quant_dequant_info(
seen_q_op_info, self.tensor_id_to_scale_zp)
# get packed param name, if applicable
packed_param_name = self._get_packed_param_name(seen_q_op_info)
# calculate scale and zp for output
# TODO: instead of always doing this if there is an observer,
# calculate whether this is needed based on the op and dtypes
additional_kwargs = {}
needs_scale_zp = converted_func_needs_scale_zp(seen_q_op_info)
if needs_scale_zp:
cur_seen_q_op_info = seen_q_op_info
# if this is a start of a fusion pattern, get the observer
# from the end of the fusion
is_start_of_fusion = seen_q_op_info.fusion_info and \
seen_q_op_info.fusion_info.is_first_element
if is_start_of_fusion:
cur_seen_q_op_info = get_seen_q_op_info_of_end_of_fusion(
seen_q_op_info, self.idx_to_seen_q_op_infos)
output_tensor_infos = cur_seen_q_op_info.output_tensor_infos
tensor_id = output_tensor_infos[0].id
scale, zp = self.tensor_id_to_scale_zp[tensor_id]
additional_kwargs.update({'scale': scale, 'zero_point': zp})
any_arg_kwarg_modification_needed = bool(
any_arg_quant_or_dequant_needed or
packed_param_name is not None or
len(additional_kwargs)
) # the cast to bool is to make mypy recognize this as a bool
return maybe_new_op, arg_quant_infos, arg_dequant_infos, \
packed_param_name, additional_kwargs, any_arg_quant_or_dequant_needed, \
any_arg_kwarg_modification_needed
def _get_packed_param_name(self, seen_q_op_info: SeenQOpInfo) -> Optional[str]:
"""
If the op in seen_q_op_info has a quantized packed param, returns it.
Otherwise, returns None.
"""
return self.idx_to_packed_weight_name.get(seen_q_op_info.idx, None)
def _first_call_assign_qtensor_infos_to_mod_outputs_tensor(
self,
output: torch.Tensor,
qtensor_id: List[int],
) -> torch.Tensor:
"""
This is a helper function for _first_call_assign_qtensor_infos_to_mod_outputs
to handle iterables of tensors without code duplication.
"""
if not hasattr(output, '_qtensor_info'):
# TODO: use actual dtype instead of defaulting to float
output._qtensor_info = QTensorInfo( # type: ignore[attr-defined]
qtensor_id[0], output.dtype, torch.float)
qtensor_id[0] += 1
self.output_qtensor_infos.append(output._qtensor_info) # type: ignore[attr-defined]
# TODO(future PR): add an observer if needed
return output
def _first_call_assign_qtensor_infos_to_mod_outputs(
self,
outputs: Any,
qtensor_id: List[int],
) -> Any:
"""
Takes `outputs`, which are a set of values about to be returned from
the current module. If `_qtensor_info` attributes do not already exist
on any tensors in `outputs`, this function adds them, initializing the
dtype to `torch.float`. This allows us to reason about module output
dtypes even if the last op in the module is not quantizeable.
"""
# TODO: handle objects with deeper nested tensors
if isinstance(outputs, torch.Tensor):
self._first_call_assign_qtensor_infos_to_mod_outputs_tensor(outputs, qtensor_id)
elif isinstance(outputs, tuple):
# TODO: handle other tuple subclasses more generically
new_outputs = []
for output in outputs:
if isinstance(output, torch.Tensor):
new_outputs.append(self._first_call_assign_qtensor_infos_to_mod_outputs_tensor(
output, qtensor_id))
else:
new_outputs.append(output)
# hacky check for collections.namedtuple, TODO improve this
# https://stackoverflow.com/questions/2166818/how-to-check-if-an-object-is-an-instance-of-a-namedtuple
if hasattr(outputs, '_fields'):
outputs = outputs.__class__(*new_outputs)
else:
outputs = tuple(new_outputs)
else:
pass
return outputs
def set_needs_dtype_transform_on_outputs(self):
"""
Calculates whether a dtype transform on module outputs is needed
and stores it. This is used to skip the outputs hook if it is not
needed.
"""
self.needs_dtype_transform_on_outputs = False
if not len(self.output_qtensor_infos):
# if there are no tensor outputs, there is nothing to transform
return
qtensor_info = self.output_qtensor_infos[0]
if self.output_dtypes is not None:
assert qtensor_info is not None
# check the output dtype, and do the conversion if needed
output_dtype = self.output_dtypes[0]
if qtensor_info.inf_dtype != output_dtype:
assert output_dtype is torch.float, \
'non-float output dtypes not handled yet'
self.needs_dtype_transform_on_outputs = True
def _maybe_mod_outputs_dtype_transform(
self,
outputs: Any,
) -> Any:
"""
Takes `outputs` which are about to be returned from this module
to the caller. If this module has restrictions on the dtypes of
tensors it has to return, does the dtype conversion. Otherwise,
does nothing.
"""
if not self.needs_dtype_transform_on_outputs:
return outputs
if isinstance(outputs, torch.Tensor):
qtensor_info = self.output_qtensor_infos[0]
if self.output_dtypes is not None:
assert qtensor_info is not None
# check the output dtype, and do the conversion if needed
output_dtype = self.output_dtypes[0]
if qtensor_info.inf_dtype != output_dtype:
assert output_dtype is torch.float, \
'non-float output dtypes not handled yet'
outputs = outputs.dequantize()
else:
# if no output dtype was specified, do nothing
pass
return outputs
def _first_call_op_prepare_before_hook_create_subgraphs_tensor(
self,
op: Callable,
arg: Any,
arg_tensor_infos: List[Optional[QTensorInfo]],
qtensor_id: List[int],
) -> None:
"""
Runs the prepare hook during first_call for individual
tensors. If the input argument is a tensor, this function is
called directly. If the input argument is an iterable such
as a list or a tuple, this function is called on each element of
the iteratble.
"""
# TODO(next): fix this for torch.cat
if not isinstance(arg, torch.Tensor):
arg_tensor_infos.append(None)
return
# If a tensor does not have an ID, add it. This allows
# us to track inputs shared by multiple quantizeable modules.
if not hasattr(arg, '_qtensor_info'):
arg._qtensor_info = QTensorInfo( # type: ignore[attr-defined]
qtensor_id[0], arg.dtype, arg.dtype)
qtensor_id[0] += 1
arg_tensor_infos.append(arg._qtensor_info) # type: ignore[attr-defined]
def _first_call_op_prepare_before_hook_create_subgraphs(
self,
op: Callable,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
qtensor_id: List[int],
fqn: str,
root_module: torch.nn.Module,
op_quantizeability_type: OpQuantizeabilityType,
) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
"""
Given an op, args, kwargs about to be executed, records the subgraph
of this op in `self`.
"""
arg_tensor_infos: List[Optional[QTensorInfo]] = []
for arg in args:
if isinstance(arg, (list, tuple)):
for inner_arg in arg:
self._first_call_op_prepare_before_hook_create_subgraphs_tensor(
op, inner_arg, arg_tensor_infos, qtensor_id)
else:
self._first_call_op_prepare_before_hook_create_subgraphs_tensor(
op, arg, arg_tensor_infos, qtensor_id)
if op_quantizeability_type is OpQuantizeabilityType.NOT_QUANTIZEABLE:
op_type_is_module = isinstance(op, torch.nn.Module)
op_type : Callable = type(op) if op_type_is_module else op # type: ignore[assignment]
self.seen_nonq_op_infos.append(SeenNonQOpInfo(
op_type, arg_tensor_infos, []))
return args, kwargs
op_packing_only_uses_module_attributes = \
get_op_packing_only_uses_module_attributes(op, args, kwargs, root_module)
packable_tensor_idx_to_name = {}
packable_nontensor_idx_to_arg = {}
packable_tensor_kwarg_name_to_name = {}
if op_packing_only_uses_module_attributes:
packable_tensor_arg_idxs = get_packable_tensor_arg_idxs(op)
if packable_tensor_arg_idxs is not None:
for arg_idx in packable_tensor_arg_idxs:
if arg_idx >= len(args):
continue
arg = args[arg_idx]
param_name = get_param_name(root_module, arg)
packable_tensor_idx_to_name[arg_idx] = param_name
packable_nontensor_arg_idxs = get_packable_nontensor_arg_idxs(op)
if packable_nontensor_arg_idxs is not None:
for arg_idx in packable_nontensor_arg_idxs:
packable_nontensor_idx_to_arg[arg_idx] = args[arg_idx]
packable_tensor_kwarg_names = \
get_packable_tensor_kwarg_names(op)
if packable_tensor_kwarg_names is not None:
for kwarg_name in packable_tensor_kwarg_names:
if kwarg_name not in kwargs:
continue
kwarg = kwargs[kwarg_name]
kwarg_name_on_module = get_param_name(root_module, kwarg)
packable_tensor_kwarg_name_to_name[kwarg_name] = \
kwarg_name_on_module
if self.idx not in self.idx_to_seen_q_op_infos:
op_type_is_module = isinstance(op, torch.nn.Module)
op_type = type(op) if op_type_is_module else op # type: ignore[assignment]
qconfig = get_cur_qconfig(self.qconfig_mapping, fqn, op_type)
# TODO(future PR): use API flag instead of qconfig for is_reference
is_reference_op_at_inference = \
qconfig is not None and activation_is_int32_quantized(qconfig)
self.idx_to_seen_q_op_infos[self.idx] = SeenQOpInfo(
self.idx, op_type, op_type_is_module, fqn, arg_tensor_infos, [],
packable_tensor_idx_to_name, packable_nontensor_idx_to_arg,
packable_tensor_kwarg_name_to_name,
op_packing_only_uses_module_attributes, qconfig, None,
is_reference_op_at_inference)
return args, kwargs
def _first_call_op_prepare_after_hook_adjust_subgraphs(
self,
op: Callable,
output: Any,
args: Tuple[Any, ...],
qtensor_id: List[int],
op_quantizeability_type: OpQuantizeabilityType,
) -> None:
"""
After `op` was just executed, modifies the subgraph recorded
for this op with the information about the output. Note, this
has to be done in the "after" hook because the output of the op
does not exist in the "before" hook.
"""
# TODO(future PR): check if _qtensor_id needs to become an actual
# attribute of Tensor
# TODO(future PR): handle non-tensor outputs
if op_quantizeability_type is OpQuantizeabilityType.QUANTIZEABLE:
seen_q_op_info = self._get_cur_seen_q_op_info()
func_output_dtype_type = get_func_output_dtype_type(seen_q_op_info)
if func_output_dtype_type == FuncOutputDTypeType.DTYPE_DEPENDS_ON_QCONFIG:
qconfig = get_cur_qconfig(
self.qconfig_mapping, seen_q_op_info.fqn,
seen_q_op_info.type)
if qconfig is None:
dtype_to_use = torch.float
else:
dtype_to_use = qconfig.activation().dtype
elif func_output_dtype_type == FuncOutputDTypeType.DTYPE_DEFAULT_BC_UNSUPPORTED_SYNTAX:
dtype_to_use = torch.float
else:
# TODO(future PR): respect qconfig for torch.cat
if isinstance(args[0], (tuple, list)): # for torch.cat
unique_arg_dtypes = [
arg._qtensor_info.inf_dtype for arg in args[0]]
assert len(set(unique_arg_dtypes)) == 1, \
'an iterable with arguments with different inference ' + \
'dtypes is not supported yet'
dtype_to_use = args[0][0]._qtensor_info.inf_dtype
else:
dtype_to_use = args[0]._qtensor_info.inf_dtype
else:
dtype_to_use = None # type: ignore[assignment]
def _add_output_qtensor_info(output, dtype_to_use):
if dtype_to_use is None:
dtype_to_use = output.dtype
output._qtensor_info = QTensorInfo(
qtensor_id[0], output.dtype, dtype_to_use) # type: ignore[arg-type]
if op_quantizeability_type is OpQuantizeabilityType.QUANTIZEABLE:
target = self.idx_to_seen_q_op_infos[self.idx].output_tensor_infos
else:
target = self.seen_nonq_op_infos[-1].output_tensor_infos
target.append(output._qtensor_info)
qtensor_id[0] += 1
if isinstance(output, torch.Tensor):
_add_output_qtensor_info(output, dtype_to_use)
elif isinstance(output, tuple):
for element in output:
if isinstance(element, torch.Tensor):
_add_output_qtensor_info(element, dtype_to_use)
def match_fusion_patterns(self):
match_fusion_patterns(self.idx_to_seen_q_op_infos)
def _maybe_insert_input_observers(self, seen_q_op_info: SeenQOpInfo):
func_output_dtype_type = get_func_output_dtype_type(seen_q_op_info)
input_observed_arg_idxs = get_input_observed_arg_idxs(
seen_q_op_info.type, seen_q_op_info.type_is_module)
if func_output_dtype_type == FuncOutputDTypeType.DTYPE_DEPENDS_ON_QCONFIG:
for idx, tensor_info in enumerate(seen_q_op_info.input_tensor_infos):
if tensor_info is None:
continue
if input_observed_arg_idxs is not None and \
idx not in input_observed_arg_idxs:
continue
qconfig = get_cur_qconfig(
self.qconfig_mapping, seen_q_op_info.fqn, seen_q_op_info.type)
if qconfig is None:
# If qconfig is None, we do not need any input observers
continue
elif tensor_info.inf_dtype != torch.quint8:
# TODO(future PR): this assumes current dtype is quint8,
# this is not always true
# TODO(future PR): currently this only handles float32 and
# quint8, we need to extend it to other dtypes
tensor_id = tensor_info.id # type: ignore[attr-defined]
weight_arg_idx = get_weight_arg_idx(seen_q_op_info.type)
obs = qconfig.weight() if idx == weight_arg_idx else \
qconfig.activation()
self.tensor_id_to_observer[str(tensor_id)] = obs
def _maybe_insert_output_observers(
self,
seen_q_op_info: SeenQOpInfo,
root_module: torch.nn.Module,
):
if seen_q_op_info.fusion_info is not None:
if not seen_q_op_info.fusion_info.is_first_element:
# if we are in a fusion but not at the start, do not insert observer
return
else:
# if we are in a fusion and at the start, insert observer for its end
# get the output of the end of the fusion
cur_seen_q_op_info = get_seen_q_op_info_of_end_of_fusion(
seen_q_op_info, self.idx_to_seen_q_op_infos)
output_tensor_id = cur_seen_q_op_info.output_tensor_infos[0].id
else:
output_tensor_id = seen_q_op_info.output_tensor_infos[0].id
func_output_obs_type = get_func_output_obs_type(seen_q_op_info)
if func_output_obs_type == FuncOutputObsType.NEW_OBS:
# TODO(future PR): check qconfig is None
qconfig = get_cur_qconfig(
self.qconfig_mapping, seen_q_op_info.fqn, seen_q_op_info.type)
assert qconfig is not None
self.tensor_id_to_observer[str(output_tensor_id)] = \
qconfig.activation()
elif func_output_obs_type == FuncOutputObsType.REUSES_FIRST_INPUT_OBS:
assert seen_q_op_info.input_tensor_infos[0] is not None
first_input_tensor_id = seen_q_op_info.input_tensor_infos[0].id
first_input_obs = \
self.tensor_id_to_observer[str(first_input_tensor_id)]
self.tensor_id_to_observer[str(output_tensor_id)] = first_input_obs
def insert_observers(self, root_module: torch.nn.Module):
for idx, seen_q_op_info in self.idx_to_seen_q_op_infos.items():
self._maybe_insert_input_observers(seen_q_op_info)
self._maybe_insert_output_observers(seen_q_op_info, root_module)
def get_output_observer_from_fqn(self, fqn: str) -> Optional[torch.nn.Module]:
for idx, seen_q_op_info in self.idx_to_seen_q_op_infos.items():
if seen_q_op_info.fqn != fqn:
continue
output_tensor_id = seen_q_op_info.output_tensor_infos[0].id
if str(output_tensor_id) in self.tensor_id_to_observer:
return self.tensor_id_to_observer[str(output_tensor_id)]
return None
# This is a hack to enable nn.Sequential to properly work with
# this class.
# TODO(future): remove the hack
def forward(self, x):
raise NotImplementedError('Calling AutoQuantizationState.forward is not supported')
# return x
| pytorch-master | torch/ao/quantization/_dbr/quantization_state.py |
import torch
from typing import Callable, Dict
from ..qconfig_mapping import QConfigMapping
TYPE_TO_REPLACEMENT_TYPE: Dict[Callable, Callable] = {
torch.add: torch.Tensor.add,
torch.Tensor.add_: torch.Tensor.add,
torch.mul: torch.Tensor.mul,
torch.Tensor.mul_: torch.Tensor.mul,
}
def normalize_object_types(qconfig_mapping: QConfigMapping) -> None:
"""
This function looks for entries in `qconfig_mapping.object_type_qconfigs`
corresponding to PyTorch overrides of Python math functions
such as `torch.add` and `torch.mul`. If any of these functions are found,
it changes the type to the tensor variant of these functions.
This is needed because the tensor variant is what is expected
within the framework.
"""
for object_type, qconfig in list(qconfig_mapping.object_type_qconfigs.items()):
replacement_type = TYPE_TO_REPLACEMENT_TYPE.get(object_type, None) # type: ignore[arg-type]
if replacement_type is not None:
del qconfig_mapping.object_type_qconfigs[object_type]
qconfig_mapping.object_type_qconfigs[replacement_type] = qconfig
| pytorch-master | torch/ao/quantization/_dbr/qconfig_mapping_utils.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.quantized as nnq
toq = torch.ops.quantized
from torch.ao.quantization.quantization_mappings import (
DEFAULT_STATIC_QUANT_MODULE_MAPPINGS,
DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS,
DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS,
)
import operator
from typing import Callable
# TODO(future PR): reuse all of these with existing quantization mappings
fp32_to_int8_fun_mapping = {
torch.Tensor.add: torch.ops.quantized.add,
torch.Tensor.add_: torch.ops.quantized.add,
torch.add: torch.ops.quantized.add,
operator.add: torch.ops.quantized.add,
operator.iadd: torch.ops.quantized.add,
torch.Tensor.mul: torch.ops.quantized.mul,
torch.mul: torch.ops.quantized.mul,
operator.mul: torch.ops.quantized.mul,
torch.cat: torch.ops.quantized.cat,
F.conv1d: torch.ops.quantized.conv1d,
F.conv2d: torch.ops.quantized.conv2d,
F.conv3d: torch.ops.quantized.conv3d,
F.linear: toq.linear,
}
# TODO: enforce that functions in fp32_to_int8_fun_mapping must both be
# in functions_supported_by_quantization
functions_supported_by_quantization = set([
torch.Tensor.add,
torch.Tensor.add_,
torch.Tensor.mul,
torch.add,
torch.mul,
torch.cat,
# adding for MobileNetV2, will need a better place for these
torch.nn.functional.adaptive_avg_pool2d,
F.hardsigmoid,
torch.flatten,
toq.add,
toq.mul,
toq.cat,
F.conv1d,
F.conv2d,
F.conv3d,
toq.conv1d,
toq.conv2d,
toq.conv3d,
F.dropout,
torch.relu,
F.relu,
F.linear,
toq.linear,
])
module_types_supported_by_quantization = set()
module_types_supported_by_quantization |= \
set(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS.keys())
module_types_supported_by_quantization |= \
set(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS.values())
module_types_supported_by_quantization |= \
set(DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS.keys())
module_types_supported_by_quantization |= \
set(DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS.values())
module_types_supported_by_quantization |= \
set(DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS.keys())
module_types_supported_by_quantization |= \
set(DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS.values())
module_types_supported_by_quantization |= set([
# these are quantizeable modules which do not need swaps
nn.ReLU,
nn.Dropout,
nn.Identity,
])
module_types_supported_by_quantization -= set([
# TODO(future PR): enable DBR quantization for embeddings
nn.Embedding,
nnq.Embedding,
nn.EmbeddingBag,
nnq.EmbeddingBag,
])
# These can work in either fp32 or quint8, without the need for observation
# TODO: better name
module_types_supported_by_quantization_preserves_dtype = set([
nn.Identity,
nn.Dropout,
])
functions_supported_by_quantization_preserves_dtype = set([
F.dropout,
])
add_and_mul_ops = set([
torch.add,
torch.Tensor.add,
torch.Tensor.add_,
torch.mul,
torch.Tensor.mul,
])
# TODO(future): reuse global mapping
known_module_fusion_patterns = [
(torch.nn.Conv2d, torch.nn.ReLU),
(torch.nn.Conv2d, torch.nn.BatchNorm2d),
]
# TODO(future): reuse global mapping
known_function_fusion_patterns_and_replacements = {
(torch.Tensor.add, torch.relu): toq.add_relu,
}
binary_related_ops = (
(torch.add, torch.Tensor.add),
(torch.add, torch.Tensor.add_),
(torch.Tensor.add, torch.Tensor.add_),
(torch.mul, torch.Tensor.mul),
(torch.mul, torch.Tensor.mul_),
(torch.Tensor.mul, torch.Tensor.mul_),
)
conv_ops = set([
F.conv1d,
F.conv2d,
F.conv3d,
])
conv_prepack_fns = {
F.conv1d: toq.conv1d_prepack,
F.conv2d: toq.conv2d_prepack,
F.conv3d: toq.conv3d_prepack,
}
# TODO(future PR): reuse global mapping
a_related_to_b = set()
for a, b in binary_related_ops:
a_related_to_b.add((a, b))
a_related_to_b.add((b, a))
for a, b in DEFAULT_STATIC_QUANT_MODULE_MAPPINGS.items():
a_related_to_b.add((a, b))
a_related_to_b.add((b, a))
for a, b in DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS.items():
a_related_to_b.add((a, b))
a_related_to_b.add((b, a))
for a, b in DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS.items():
a_related_to_b.add((a, b))
a_related_to_b.add((b, a))
for a, b in fp32_to_int8_fun_mapping.items():
a_related_to_b.add((a, b))
a_related_to_b.add((b, a))
def ops_are_related(
cur_op: Callable,
expected_op_type: Callable,
type_is_module: bool,
) -> bool:
# if isinstance(cur_op, torch.nn.Module):
if type_is_module:
cur_op = type(cur_op)
return cur_op == expected_op_type or \
(cur_op, expected_op_type) in a_related_to_b
# validity checks
# TODO: move these out
for m in module_types_supported_by_quantization_preserves_dtype:
assert m in module_types_supported_by_quantization, \
f"{m} needs to be added to module_types_supported_by_quantization"
for f in functions_supported_by_quantization_preserves_dtype:
assert f in functions_supported_by_quantization, \
f"{f} needs to be added to functions_supported_by_quantization"
| pytorch-master | torch/ao/quantization/_dbr/mappings.py |
import builtins
import collections
import math
import operator
import warnings
from collections.abc import Iterable
from enum import Enum
from functools import partial, reduce, wraps
from typing import Callable, List, Optional, overload, Sequence, Tuple, Union
import torch
import torch._prims as prims
import torch._prims_common as utils
from torch._prims_common import (
check,
DeviceLikeType,
DimsSequenceType,
DimsType,
dtype_to_type,
ELEMENTWISE_TYPE_PROMOTION_KIND,
is_weakly_lesser_type,
Number,
NumberType,
REDUCTION_OUTPUT_TYPE_KIND,
ShapeType,
StrideType,
TensorLike,
TensorLikeType,
TensorOrNumberLikeType,
TensorSequenceType,
)
from torch._prims_common.wrappers import (
_maybe_convert_to_dtype,
_maybe_resize_out,
_safe_copy_out,
elementwise_type_promotion_wrapper,
elementwise_unary_scalar_wrapper,
out_wrapper,
)
# Experimental module containing prototype Python references for existing
# PyTorch operations.
__all__ = [
#
# Elementwise Unary References
#
"abs",
"acos",
"acosh",
"asinh",
"asin",
"atan",
"atanh",
"bitwise_not",
# "cbrt", # No corresponding torch operation
"ceil",
"conj_physical",
"cos",
"cosh",
"digamma",
"erf",
"erfinv",
"erfc",
"exp",
"expm1",
"exp2",
"fill",
"floor",
"frac",
"isfinite",
"isinf",
"isnan",
"i0",
"lgamma",
"log",
"log1p",
"log2",
"log10",
"nan_to_num",
"neg",
"positive",
"reciprocal",
"round", # TODO: model kwargs
"sigmoid",
"sign",
"signbit",
"sin",
"sinh",
"sqrt",
"square",
"tan",
"tanh",
"trace",
"trunc",
#
# Elementwise Binary References
#
"add",
"atan2",
"bitwise_and",
"bitwise_left_shift",
"bitwise_or",
"bitwise_right_shift",
"bitwise_xor",
# "complex",
"copysign",
"div",
"eq",
"float_power",
"floor_divide",
"fmax",
"fmin",
"fmod",
"gcd",
"ge",
"gt",
"heaviside",
"hypot",
"igamma",
"igammac",
"imag",
"isclose",
"lcm",
# 'ldexp',
"le",
"logical_and",
"logical_not",
"logical_or",
"logical_xor",
"lt",
# 'max', # implement with reductions
"maximum",
# 'min', # implement with reductions
"minimum",
"mul",
"ne",
"nextafter",
# 'polar', # abs, cos, sin
"pow",
"real",
"remainder",
"rsub",
# # special.xlog1py
# # special.zeta
"sub",
"true_divide",
"trunc_divide",
# 'xlogy', # where?, log, mul
#
# Elementwise Ternary References
#
"clamp",
#
# Conditional references
#
"masked_fill",
"where",
#
# Data conversion and movement references
#
"clone",
"copy_to", # TODO: add OpInfo (or implement .to)
"item", # TODO: add OpInfo
#
# Reduction ops
#
"all",
"amax",
"amin",
"any",
"mean",
"std_mean",
"var_mean",
"sum",
"prod",
"var",
#
# Linear algebra ops
#
"addr",
#
# View & Shape Ops
#
"atleast_1d",
"atleast_2d",
"atleast_3d",
"as_strided",
"broadcast_shapes",
"broadcast_tensors",
"broadcast_to",
"cat",
"chunk",
"column_stack",
"conj",
"constant_pad_nd",
"contiguous",
"dsplit",
"dstack",
"expand",
"flatten",
"flip",
"fliplr",
"flipud",
"hsplit",
"hstack",
"meshgrid",
"narrow",
"native_layer_norm",
"permute",
"ravel",
"reshape",
"roll",
"rot90",
"rsqrt",
"stack",
"swap_axes", # alias for transpose
"squeeze",
"t",
"tensor_split",
"transpose",
"unsqueeze",
"view",
"vsplit",
"vstack",
"unflatten",
"unbind",
#
# Tensor Creation
#
"empty",
"empty_like",
"empty_strided",
"full",
"full_like",
"ones",
"ones_like",
"scalar_tensor",
"zeros",
"zeros_like",
"arange",
"linspace",
"logspace",
#
# Randomness References
#
"uniform", # TODO: add OpInfo -- and testing for randomness?
#
# Test-related functions
#
"allclose",
"equal", # TODO: add OpInfo
]
Tensor = torch.Tensor
def _broadcast_shapes(*_shapes):
shapes = tuple(
(x,) if isinstance(x, int) else x
for x in filter(lambda x: x is not None, _shapes)
)
# Short-circuits on no input
if len(shapes) == 0:
return None
# Type checking
# TODO: make common validations available as utils
for shape in shapes:
assert isinstance(shape, Sequence)
# Computes common shape
common_shape = [
1,
] * reduce(max, (len(shape) for shape in shapes))
for shape in shapes:
for idx in range(-1, -1 - len(shape), -1):
if common_shape[idx] == 1:
if shape[idx] < 0:
raise ValueError(
"Attempting to broadcast a dimension with negative length!"
)
common_shape[idx] = shape[idx]
elif shape[idx] != 1:
if common_shape[idx] != shape[idx]:
raise RuntimeError(
"Attempting to broadcast a dimension of length ",
str(shape[idx]),
"!",
)
return common_shape
def _maybe_broadcast(*args, preserve_cpu_scalar_tensors=True):
# Computes common shape
common_shape = _broadcast_shapes(
*map(lambda t: t.shape if isinstance(t, TensorLike) else None, args)
)
def __maybe_broadcast(x, shape):
if x is None:
return None
elif isinstance(x, Number):
return x
elif isinstance(x, TensorLike):
if preserve_cpu_scalar_tensors and utils.is_cpu_scalar_tensor(x):
return x
if not utils.same_shape(x.shape, common_shape):
common_rank = len(common_shape) + 1
start = common_rank - (len(x.shape) + 1)
dims = tuple(range(start, len(x.shape) + start))
return prims.broadcast_in_dim(x, common_shape, dims)
return x
else:
raise RuntimeError(
"Unexpected type when broadcasting: " + str(type(x)) + "!"
)
return tuple(__maybe_broadcast(x, common_shape) for x in args)
# Utilities should come BEFORE this import
from torch._decomp import register_decomposition
#
# Elementwise unary references
#
infer_aten_op = object()
# TODO: add type promotion support
def _make_elementwise_unary_reference(
type_promotion_kind,
*,
aten_op=infer_aten_op,
disable_meta=False,
extra_meta=None,
) -> Callable:
def inner(prim: Callable):
nonlocal aten_op
@wraps(prim)
@out_wrapper()
@elementwise_unary_scalar_wrapper
@elementwise_type_promotion_wrapper(
type_promoting_args=("a",),
type_promotion_kind=type_promotion_kind,
)
def _ref(a: TensorLikeType) -> TensorLikeType:
if not isinstance(a, TensorLike):
raise RuntimeError(
"Expected a tensor input for an elementwise unary operation!"
)
if extra_meta is not None:
extra_meta(a)
return prim(a)
if aten_op is infer_aten_op:
aten_op = getattr(torch.ops.aten, prim.__name__)
if aten_op is not None:
register_decomposition(aten_op, disable_meta=disable_meta)(_ref)
return _ref
return inner
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT)
def abs(a):
return prims.abs(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def acos(a):
return prims.acos(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def acosh(a):
return prims.acosh(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def asin(a):
return prims.asin(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def asinh(a):
return prims.asinh(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def atan(a):
return prims.atan(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def atanh(a):
return prims.atanh(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)
def bitwise_not(a):
return prims.bitwise_not(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)
def ceil(a):
return prims.ceil(a)
@register_decomposition(torch.ops.aten.conj_physical)
@out_wrapper()
def conj_physical(input: TensorLikeType):
if not input.dtype.is_complex:
return input
return prims.conj_physical(input)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def cos(a):
return prims.cos(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def cosh(a):
return prims.cosh(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def digamma(a):
return prims.digamma(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def erf(a):
return prims.erf(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def erfinv(a):
return prims.erf_inv(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def erfc(a):
return prims.erfc(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def exp(a):
return prims.exp(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def expm1(a):
return prims.expm1(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def exp2(a):
return prims.exp2(a)
# Fill has its own implementation because it has a value parameter
# CompositeImplicitAutograd - don't register decomp
@out_wrapper()
@elementwise_type_promotion_wrapper(
type_promoting_args=("a,"),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH,
)
def fill(a: TensorLikeType, value: NumberType) -> TensorLikeType:
assert isinstance(a, TensorLike)
assert isinstance(value, Number)
python_type = utils.dtype_to_type(a.dtype)
if not utils.is_weakly_lesser_type(type(value), python_type):
msg = "value argument of type {0} cannot be safely cast to type {1}!".format(
type(value), python_type
)
raise ValueError(msg)
return prims.fill(a, value)
def fill_(a: TensorLikeType, value: NumberType) -> TensorLikeType:
r = prims.fill(a, value)
prims.copy_to(a, r)
return a
def zero_(a: TensorLikeType) -> TensorLikeType:
r = prims.fill(a, 0)
prims.copy_to(a, r)
return a
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)
def floor(a):
return prims.floor(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)
def frac(x: TensorLikeType) -> TensorLikeType:
trunc_x = mul(floor(abs(x)), sign(x))
return sub(x, trunc_x)
# imag does not use _make_elementwise_unary_reference because it does not support out
def imag(a: TensorLikeType) -> TensorLikeType:
assert isinstance(a, TensorLike)
utils.check(
utils.is_complex_dtype(a.dtype), lambda: "imag only supports complex tensors."
)
return prims.imag(a)
@_make_elementwise_unary_reference(
ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
aten_op=None, # CompositeImplicitAutograd
)
def isfinite(a: TensorLikeType) -> TensorLikeType:
if utils.is_float_dtype(a.dtype) or utils.is_complex_dtype(a.dtype):
return prims.isfinite(a)
return ones_like(a, dtype=torch.bool)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL)
def isinf(a: TensorLikeType) -> TensorLikeType:
if utils.is_complex_dtype(a.dtype):
return logical_or(isinf(real(a)), isinf(imag(a)))
return logical_not(logical_or(isnan(a), isfinite(a)))
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL)
def isposinf(a: TensorLikeType) -> TensorLikeType:
utils.check(
not utils.is_complex_dtype(a.dtype),
lambda: f"Complex dtype is not supported for isposinf, got dtype {a.dtype}",
)
if utils.is_float_dtype(a.dtype):
return eq(a, float("inf"))
return zeros_like(a, dtype=torch.bool)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL)
def isneginf(a: TensorLikeType) -> TensorLikeType:
utils.check(
not utils.is_complex_dtype(a.dtype),
lambda: f"Complex dtype is not supported for isneginf, got dtype {a.dtype}",
)
if utils.is_float_dtype(a.dtype):
return eq(a, float("-inf"))
return zeros_like(a, dtype=torch.bool)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL)
def isnan(a: TensorLikeType) -> TensorLikeType:
return prims.ne(a, a)
# TODO: if this is special maybe it should be defined there and imported here?
@_make_elementwise_unary_reference(
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, aten_op=torch.ops.aten.special_i0
)
def i0(a):
return prims.bessel_i0(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def lgamma(a):
return prims.lgamma(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def log(a):
return prims.log(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def log1p(a):
return prims.log1p(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def log2(a):
return prims.log2(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def log10(a):
return prims.log10(a)
@out_wrapper()
def log_softmax(
a: TensorLikeType,
dim: int,
*,
dtype: Optional[torch.dtype] = None,
) -> TensorLikeType:
result_dtype = dtype or a.dtype
computation_dtype = utils.get_computation_dtype(a.dtype)
a_ = _maybe_convert_to_dtype(a, computation_dtype)
return _maybe_convert_to_dtype(a_ - logsumexp(a_, dim, keepdim=True), result_dtype) # type: ignore[return-value]
@out_wrapper()
def logsumexp(
a: TensorLikeType,
dim: DimsType,
keepdim: bool = False,
) -> TensorLikeType:
dim = utils.canonicalize_dims(a.ndim, dim)
# ATen specifies int[1] type dims which expands integers to tuples of length 1
if not isinstance(dim, Iterable):
dim = (dim,)
if utils.is_float_dtype(a.dtype) or utils.is_complex_dtype(a.dtype):
# For float and complex dtypes, we shift input to exp by a constant to avoid overflow
a_max = amax(a, dim, keepdim=True)
a_max = where(abs(a_max) == float("inf"), 0.0, a_max)
a_max_squeezed = prims.squeeze(a_max, dim) if not keepdim else a_max
result = log(sum(exp(a - a_max), dim, keepdim=keepdim)) + a_max_squeezed
else:
# This case covers boolean and integer dtypes and we use non-stabilized computation
result = log(sum(exp(a), dim, keepdim=keepdim))
return result
@register_decomposition(torch.ops.aten.nan_to_num)
@out_wrapper()
def nan_to_num(
a: TensorLikeType,
nan: Optional[NumberType] = 0.0,
posinf: Optional[NumberType] = None,
neginf: Optional[NumberType] = None,
) -> TensorLikeType:
assert isinstance(a, TensorLike)
if utils.is_boolean_dtype(a.dtype) or utils.is_integer_dtype(a.dtype):
return clone(a)
if nan is None:
nan = 0.0
if posinf is None:
posinf = prims.maximum_value(a.dtype)
if neginf is None:
neginf = prims.minimum_value(a.dtype)
result = where(isnan(a), nan, a)
is_neg = signbit(a)
is_neginf = bitwise_and(isinf(a), is_neg)
result = where(is_neginf, neginf, result)
is_posinf = bitwise_and(isinf(a), bitwise_not(is_neg))
result = where(is_posinf, posinf, result)
return result
def _neg_meta(a: TensorLikeType):
if a.dtype is torch.bool:
msg = "neg is not supported on bool tensors."
raise RuntimeError(msg)
@_make_elementwise_unary_reference(
ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, extra_meta=_neg_meta
)
def neg(a):
return prims.neg(a)
# positive does not use _make_elementwise_unary_reference because it does not support out
# CompositeImplicitAutograd - don't register decomp
def positive(a: TensorLikeType) -> TensorLikeType:
assert isinstance(a, TensorLike)
if a.dtype is torch.bool:
msg = "positive does not support bool tensors."
raise RuntimeError(msg)
return a
# real does not use _make_elementwise_unary_reference because it does not support out
def real(a: TensorLikeType) -> TensorLikeType:
assert isinstance(a, TensorLike)
if utils.is_complex_dtype(a.dtype):
return prims.real(a)
return a
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def reciprocal(a):
return prims.reciprocal(a)
# TODO: round takes additional kwargs
@_make_elementwise_unary_reference(
ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
aten_op=None, # TODO: this does need a decomp, but kwarg handling is needed
)
def round(a):
return prims.round(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def rsqrt(a):
return prims.rsqrt(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def sigmoid(a: TensorLikeType) -> TensorLikeType:
return true_divide(1, add(1, exp(neg(a))))
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)
def sign(a):
return prims.sign(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL)
def signbit(a):
return prims.signbit(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def sin(a):
return prims.sin(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def sinh(a):
return prims.sinh(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def sqrt(a):
return prims.sqrt(a)
@_make_elementwise_unary_reference(
ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG,
aten_op=None, # CompositeImplicitAutograd,
)
def square(a: TensorLikeType) -> TensorLikeType:
return mul(a, a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def tan(a):
return prims.tan(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def tanh(a):
return prims.tanh(a)
@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)
def trunc(a):
return prims.trunc(a)
def _make_elementwise_binary_reference(
prim: Callable,
*,
type_promotion_kind,
aten_op=infer_aten_op,
has_out=True,
supports_lhs_python_scalar=True,
supports_rhs_python_scalar=True,
disable_meta=False,
) -> Callable:
@elementwise_type_promotion_wrapper(
type_promoting_args=("a", "b"),
type_promotion_kind=type_promotion_kind,
)
def _ref(
a: Union[Tensor, NumberType],
b: Union[Tensor, NumberType],
) -> Tensor:
if not supports_lhs_python_scalar and isinstance(a, Number):
raise ValueError(
"Received a lhs Python scalar to an elementwise binary operation that does not accept lhs scalars!"
)
if not supports_rhs_python_scalar and isinstance(b, Number):
raise ValueError(
"Received a rhs Python scalar to an elementwise binary operation that does not accept rhs scalars!"
)
# TODO: enable this for operations that support it, like add
if isinstance(a, Number) and isinstance(b, Number):
raise ValueError(
"Receive two Number inputs to an elementwise binary operation!"
)
a, b = _maybe_broadcast(a, b)
return prim(a, b)
if has_out:
_ref = out_wrapper()(_ref)
if aten_op is infer_aten_op:
aten_op = getattr(torch.ops.aten, prim.__name__.split(".")[0])
if aten_op is not None:
register_decomposition(aten_op, disable_meta=disable_meta)(_ref)
return _ref
# Add has its own implementation because it has an alpha argument
@register_decomposition(torch.ops.aten.add)
@out_wrapper()
@elementwise_type_promotion_wrapper(
type_promoting_args=("a", "b"),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def add(
a: Union[TensorLikeType, NumberType],
b: Union[TensorLikeType, NumberType],
*,
alpha: Optional[NumberType] = None,
):
"""
Reference implementation of torch.add
"""
if isinstance(a, Number) and isinstance(b, Number):
raise ValueError(
"Receive two Number inputs to an elementwise binary operation!"
)
a, b = _maybe_broadcast(a, b)
if alpha is not None:
dtype = a.dtype if isinstance(a, TensorLike) else b.dtype # type: ignore[union-attr]
python_type = utils.dtype_to_type(dtype)
if not utils.is_weakly_lesser_type(type(alpha), python_type):
msg = (
"alpha argument of type {0} cannot be safely cast to type {1}!".format(
type(alpha), python_type
)
)
raise ValueError(msg)
b = prims.mul(b, alpha)
return prims.add(a, b)
# TODO: add docstring
atan2 = _make_elementwise_binary_reference(
prims.atan2,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
supports_lhs_python_scalar=False,
supports_rhs_python_scalar=False,
)
# TODO: add docstring
bitwise_and = _make_elementwise_binary_reference(
prims.bitwise_and,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
# TODO: add docstring
bitwise_left_shift = _make_elementwise_binary_reference(
prims.shift_left,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
aten_op=torch.ops.aten.bitwise_left_shift, # prim/aten name mismatch
)
# TODO: add docstring
bitwise_or = _make_elementwise_binary_reference(
prims.bitwise_or,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
# TODO: add docstring
bitwise_right_shift = _make_elementwise_binary_reference(
prims.shift_right_arithmetic,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
aten_op=torch.ops.aten.bitwise_right_shift, # prim/aten name mismatch
)
# TODO: add docstring
bitwise_xor = _make_elementwise_binary_reference(
prims.bitwise_xor,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def _copysign(
a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType]
):
if isinstance(b, Number) and isinstance(a, Tensor):
b = scalar_tensor(b, dtype=a.dtype, device=a.device)
elif isinstance(a, Tensor) and isinstance(b, Tensor) and a.device != b.device:
msg = "Expected divisor (b) to be on the same device ({0}) as dividend (a), but it is found on {1}!".format(
a.device, b.device
)
raise RuntimeError(msg)
return where(signbit(b), neg(abs(a)), abs(a))
# TODO: add docstring
copysign = _make_elementwise_binary_reference(
_copysign,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
supports_lhs_python_scalar=False,
aten_op=torch.ops.aten.copysign,
)
# TODO: add docstring
# complex = _make_elementwise_binary_reference(prims.complex, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)
@register_decomposition(torch.ops.aten.div)
@out_wrapper()
def div(
a: Union[TensorLikeType, NumberType],
b: Union[TensorLikeType, NumberType],
*,
rounding_mode: Optional[str] = None,
):
"""
Reference implementation of torch.div
"""
if rounding_mode is None:
return true_divide(a, b)
elif rounding_mode == "trunc":
return trunc_divide(a, b)
elif rounding_mode == "floor":
return floor_divide(a, b)
else:
msg = (
"div expected rounding_mode to be one of None, 'trunc', or 'floor' "
"but found {0}.".format(rounding_mode)
)
raise ValueError(msg)
# TODO: add docstring
eq = _make_elementwise_binary_reference(
prims.eq,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
supports_lhs_python_scalar=False,
)
def _pow(
a: Union[TensorLikeType, NumberType],
b: Union[TensorLikeType, NumberType],
) -> TensorLikeType:
assert isinstance(a, TensorLikeType) or isinstance(b, TensorLikeType)
if isinstance(b, Number):
if b == 1.0:
return a.clone() # type: ignore[return-value,union-attr]
elif b == 2.0:
return a * a # type: ignore[return-value]
elif b == 0.5:
return torch.sqrt(a) # type: ignore[arg-type]
return prims.pow(a, b)
# TODO: add docstring
pow = _make_elementwise_binary_reference(
_pow,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG,
aten_op=torch.ops.aten.pow,
)
# TODO: add docstring
# Float power has its own implementation because it has unique type promotion.
# NB: aten_op not registered because CompositeExplicitAutograd
@out_wrapper()
def float_power(
a: Union[TensorLikeType, NumberType],
b: Union[TensorLikeType, NumberType],
) -> Tensor:
if isinstance(a, Number) and isinstance(b, Number):
raise ValueError(
"Receive two Number inputs to an elementwise binary operation!"
)
# Handles type promotion
dtype = utils.get_higher_dtype(a, b)
assert dtype is not None
if utils.is_complex_dtype(dtype):
dtype = torch.complex128
else:
dtype = torch.float64
# Float power has the following contiguous cast behavior to be
# consistent with its C++ impl
if isinstance(a, TensorLike) and a.dtype != dtype:
a = prims.to_dtype(a, dtype)
if isinstance(b, TensorLike) and b.dtype != dtype:
b = prims.to_dtype(b, dtype)
a, b = _maybe_broadcast(a, b)
return pow(a, b)
# >>> a = torch.tensor(-0.2500, dtype=torch.float64)
# tensor(-0.250000000000000, dtype=torch.float64)
#
# >>> b = torch.tensor(-0.0010, dtype=torch.float64)
# tensor(-0.001000000000000, dtype=torch.float64)
#
# Note: In this case, casting float to double will expand the float mantissa with zeros,
# while creating a double generates a distinct mantissa.
# >>> torch.tensor(-0.001).to(dtype=torch.float64)
# tensor(-0.001000000047497, dtype=torch.float64)
#
# Floor Division
# The difference is caused because torch.remainder(a, b) = -0.001.
#
# >>> torch.floor(torch.true_divide(a, b))
# tensor(250., dtype=torch.float64)
#
# >>> torch.div(a, b, rounding_mode='floor')
# tensor(249., dtype=torch.float64)
#
# Definition: a // b = (a - remainder(a, b)) / b
# >>> torch.true_divide(torch.sub(a, torch.remainder(a, b)), b)
# tensor(249., dtype=torch.float64)
#
# For reference, see CPython's implementation:
# https://github.com/python/cpython/blob/ace008c531dd685a30c1dd68f9b5ba35f20171cf/Objects/floatobject.c#L636
def _floor_divide(
a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType]
):
# Wrap scalars because some references only accept tensor arguments.
if isinstance(a, Number) and isinstance(b, Number):
a = scalar_tensor(a)
b = scalar_tensor(b)
elif isinstance(b, Number) and isinstance(a, Tensor):
b = scalar_tensor(b, dtype=a.dtype, device=a.device)
elif isinstance(a, Number) and isinstance(b, Tensor):
a = scalar_tensor(a, dtype=b.dtype, device=b.device)
elif isinstance(a, Tensor) and isinstance(b, Tensor) and a.device != b.device:
if a.device == torch.device("cpu"):
msg = "Expected divisor (b) to be on the same device ({0}) as dividend (a), but it is found on {1}!".format(
a.device, b.device
)
raise RuntimeError(msg)
else:
b = prims.device_put(b, device=a.device)
assert isinstance(a, Tensor) and isinstance(b, Tensor)
dtype = a.dtype
if utils.is_float_dtype(dtype):
return _floor_divide_float(a, b)
elif utils.is_integer_dtype(dtype):
return _floor_divide_integer(a, b)
else:
check(False, lambda: f"{dtype} not supported for floor_divide")
def _floor_divide_integer(a: Tensor, b: Tensor) -> Tensor:
a, b = _maybe_broadcast(a, b)
if not a.dtype.is_signed:
return prims.div(a, b)
# Convert truncation to flooring:
offset = (torch.signbit(a) != torch.signbit(b)).logical_and(torch.fmod(a, b) != 0)
return prims.div(a, b) - prims.convert_element_type(offset, a.dtype)
def _floor_divide_float(a: Tensor, b: Tensor) -> Tensor:
mod = fmod(a, b)
div = true_divide(sub(a, mod), b)
# Ensure that the remainder has the same sign as denominator
different_signed_inputs = bitwise_xor(lt(a, 0), lt(b, 0))
non_zero_remainder = ne(mod, 0)
mask = bitwise_and(non_zero_remainder, different_signed_inputs)
div = where(mask, sub(div, 1), div)
# Map quotient to nearest integer value
floor_div = floor(div)
mask = gt(sub(div, floor_div), 0.5)
floor_div = where(mask, add(floor_div, 1), floor_div)
basic_div = true_divide(a, b)
zero_tensor = scalar_tensor(0, dtype=basic_div.dtype, device=basic_div.device)
# If quotient is zero, copy signbit from true_divide quotient
floor_div = where(ne(div, 0), floor_div, copysign(zero_tensor, basic_div))
# If denominator is zero, then follow true_divide behavior
return where(ne(b, 0), floor_div, basic_div)
# TODO: add docstring
floor_divide = _make_elementwise_binary_reference(
_floor_divide,
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
aten_op=torch.ops.aten.floor_divide,
)
# TODO: add docstring
fmax = _make_elementwise_binary_reference(
prims.fmax,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
aten_op=torch.ops.aten.fmax,
supports_lhs_python_scalar=False,
supports_rhs_python_scalar=False,
)
# TODO: add docstring
fmin = _make_elementwise_binary_reference(
prims.fmin,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
aten_op=torch.ops.aten.fmin,
supports_lhs_python_scalar=False,
supports_rhs_python_scalar=False,
)
# TODO: add docstring
fmod = _make_elementwise_binary_reference(
prims.fmod,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
aten_op=torch.ops.aten.fmod,
supports_lhs_python_scalar=False,
supports_rhs_python_scalar=True,
)
# TODO: add docstring
gcd = _make_elementwise_binary_reference(
prims.gcd,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
aten_op=torch.ops.aten.gcd,
supports_lhs_python_scalar=False,
supports_rhs_python_scalar=False,
)
# TODO: add docstring
ge = _make_elementwise_binary_reference(
prims.ge,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
supports_lhs_python_scalar=False,
)
# TODO: add docstring
gt = _make_elementwise_binary_reference(
prims.gt,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
supports_lhs_python_scalar=False,
)
def _heaviside(input: TensorLikeType, values: TensorLikeType) -> TensorLikeType:
input_eq_zero = eq(input, 0)
input_lt_zero = logical_or(lt(input, 0), isnan(input))
zeros_and_ones = where(input_lt_zero, 0, 1)
output = where(input_eq_zero, values, zeros_and_ones)
return output
heaviside = _make_elementwise_binary_reference(
_heaviside,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH,
supports_lhs_python_scalar=False,
supports_rhs_python_scalar=False,
aten_op=torch.ops.aten.heaviside,
)
hypot = _make_elementwise_binary_reference(
prims.hypot,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
supports_lhs_python_scalar=False,
supports_rhs_python_scalar=False,
)
igamma = _make_elementwise_binary_reference(
prims.igamma,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
supports_lhs_python_scalar=False,
supports_rhs_python_scalar=False,
)
igammac = _make_elementwise_binary_reference(
prims.igammac,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
supports_lhs_python_scalar=False,
supports_rhs_python_scalar=False,
)
def _check_close_args(
name: str,
a: TensorLikeType,
b: TensorLikeType,
rtol: float,
atol: float,
) -> None:
check(
a.dtype == b.dtype,
lambda: "{0}: Attempting to compare tensors of different dtypes {1} and {2}!".format(
name, a.dtype, b.dtype
),
ValueError,
)
check(
rtol >= 0,
lambda: "{0}: rtol must be greater than or equal to zero, but got {1}!".format(
name, rtol
),
)
check(
atol >= 0,
lambda: "{0}: atol must be greater than or equal to zero, but got {1}!".format(
name, atol
),
)
# CompositeImplicitAutograd - don't register decomp
def isclose(
a: TensorLikeType,
b: TensorLikeType,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
) -> TensorLikeType:
_check_close_args(name="torch.isclose", a=a, b=b, rtol=rtol, atol=atol)
close = eq(a, b)
if equal_nan and (utils.is_float_dtype(a.dtype) or utils.is_complex_dtype(a.dtype)):
close = logical_or(close, logical_and(isnan(a), isnan(b)))
# Note: In case of zero tolerances the closeness inequality degenerates to an equality check.
# In this case, the short-circuit prevents false positives as detailed in the paragraph below.
if atol == 0 and rtol == 0:
return close
# Note [closeness error computation]
# atol and rtol are provided as doubles, so the computation
# rtol * other will produce a float or complex tensor.
# When the difference (self - other) is compared to it then the
# tensor representing the difference will also be cast to float or complex.
# However, since (self - other) in uint8 is very likely to produce a
# negative value, this moves the cast forward so the difference is
# always computed in a float or complex type.
# If the values of the integer tensors cannot be exactly represented
# by the default scalar type then this may cause an incorrect result.
if not utils.is_float_dtype(a.dtype) and not utils.is_complex_dtype(a.dtype):
a = prims.convert_element_type(a, torch.get_default_dtype())
b = prims.convert_element_type(b, torch.get_default_dtype())
allowed_error = add(atol, abs(mul(b, rtol)))
actual_error = abs(sub(a, b))
# Computes finite closeness
result = logical_or(
close, logical_and(isfinite(actual_error), le(actual_error, allowed_error))
)
return result
def _lcm(a: TensorLikeType, b: TensorLikeType):
g = gcd(a, b)
return where(eq(g, 0), 0, abs(mul(true_divide(a, g), b)))
# TODO: add docstring
lcm = _make_elementwise_binary_reference(
_lcm,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
aten_op=torch.ops.aten.lcm,
supports_lhs_python_scalar=False,
supports_rhs_python_scalar=False,
)
# TODO: add docstring
le = _make_elementwise_binary_reference(
prims.le,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
supports_lhs_python_scalar=False,
)
def _logical_and(a: TensorLikeType, b: TensorLikeType):
if not utils.is_boolean_dtype(a.dtype):
a = a != 0
if not utils.is_boolean_dtype(b.dtype):
b = b != 0
return a & b
logical_and = _make_elementwise_binary_reference(
_logical_and,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
aten_op=torch.ops.aten.logical_and,
)
@_make_elementwise_unary_reference(
ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, aten_op=torch.ops.aten.logical_not
)
def logical_not(a: TensorLikeType):
if not utils.is_boolean_dtype(a.dtype):
return a == 0
return ~a
def _logical_or(a: TensorLikeType, b: TensorLikeType):
if not utils.is_boolean_dtype(a.dtype):
a = a != 0
if not utils.is_boolean_dtype(b.dtype):
b = b != 0
return bitwise_or(a, b)
logical_or = _make_elementwise_binary_reference(
_logical_or,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
aten_op=torch.ops.aten.logical_or,
)
def _logical_xor(a: TensorLikeType, b: TensorLikeType):
if not utils.is_boolean_dtype(a.dtype):
a = a != 0
if not utils.is_boolean_dtype(b.dtype):
b = b != 0
return a ^ b
# TODO: skip unnecessary conversion of long to float
logical_xor = _make_elementwise_binary_reference(
_logical_xor,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
aten_op=torch.ops.aten.logical_xor,
)
# TODO: add docstring
lt = _make_elementwise_binary_reference(
prims.lt,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
supports_lhs_python_scalar=False,
)
# TODO: add docstring
maximum = _make_elementwise_binary_reference(
prims.maximum,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
# TODO: add docstring
minimum = _make_elementwise_binary_reference(
prims.minimum,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
# TODO: add docstring
mul = _make_elementwise_binary_reference(
prims.mul,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
# TODO: add docstring
ne = _make_elementwise_binary_reference(
prims.ne,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
supports_lhs_python_scalar=False,
)
# TODO: add docstring
nextafter = _make_elementwise_binary_reference(
prims.nextafter,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH,
supports_lhs_python_scalar=False,
supports_rhs_python_scalar=False,
)
# TODO: add docstring
remainder = _make_elementwise_binary_reference(
prims.remainder,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
aten_op=torch.ops.aten.remainder,
)
# reverse sub
def rsub(
a: Union[TensorLikeType, NumberType],
b: Union[TensorLikeType, NumberType],
*,
alpha: Optional[NumberType] = None,
):
if isinstance(a, Number):
msg = "Received a Number for the first argument, but expected a Tensor"
raise ValueError(msg)
return sub(b, a, alpha=alpha)
# TODO: add docstring
# TODO: consider refactoring this with add impl
# sub has its own implementation because it has an alpha argument
@register_decomposition(torch.ops.aten.sub)
@out_wrapper()
@elementwise_type_promotion_wrapper(
type_promoting_args=("a", "b"),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def sub(
a: Union[TensorLikeType, NumberType],
b: Union[TensorLikeType, NumberType],
*,
alpha: Optional[NumberType] = None,
):
"""
Reference implementation of torch.sub
"""
if isinstance(a, Number) and isinstance(b, Number):
raise ValueError(
"Receive two Number inputs to an elementwise binary operation!"
)
a, b = _maybe_broadcast(a, b)
if alpha is not None:
dtype = a.dtype if isinstance(a, TensorLike) else b.dtype # type: ignore[union-attr]
python_type = utils.dtype_to_type(dtype)
if not utils.is_weakly_lesser_type(type(alpha), python_type):
msg = (
"alpha argument of type {0} cannot be safely cast to type {1}!".format(
type(alpha), python_type
)
)
raise ValueError(msg)
b = prims.mul(b, alpha)
return prims.sub(a, b)
# TODO: add docstring
true_divide = _make_elementwise_binary_reference(
prims.div,
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
aten_op=None, # CompositeImplicitAutograd
)
def _trunc_divide(
a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType]
):
dtype = utils.get_dtype(a)
if utils.is_integer_dtype(dtype):
return prims.div(a, b)
return trunc(prims.div(a, b))
# TODO: add docstring
trunc_divide = _make_elementwise_binary_reference(
_trunc_divide,
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
aten_op=None, # CompositeImplicitAutograd
)
#
# Elementwise Ternary References
#
@register_decomposition(torch.ops.aten.clamp)
@out_wrapper()
@elementwise_type_promotion_wrapper(
type_promoting_args=("a", "min", "max"),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def clamp(
a: TensorLikeType,
min: Optional[TensorOrNumberLikeType] = None,
max: Optional[TensorOrNumberLikeType] = None,
) -> TensorLikeType:
# NOTE: grad behavior with implementation `where` is not consistent on `nan`
if min is None and max is None:
msg = "clamp called but both min and max are none!"
raise ValueError(msg)
if min is not None:
a_isnan = torch.isnan(a)
condition = torch.bitwise_or(torch.ge(a, min), a_isnan) # type: ignore[arg-type]
# we should also propagate `nan` coming from boundaries. However, that's
# not necessary since `ge` would already `False` when either operands has
# a `nan`. So this line below is redundant
# `condition = bitwise_and(condition, bitwise_not(isnan(min)))`
a = torch.where(condition, a, min) # type: ignore[arg-type]
if max is not None:
a_isnan = torch.isnan(a)
# same as above, no need to adjust `nan` from `max`
condition = torch.bitwise_or(torch.le(a, max), a_isnan) # type: ignore[arg-type]
a = torch.where(condition, a, max) # type: ignore[arg-type]
return a
@register_decomposition(torch.ops.aten.clamp_min)
@out_wrapper()
def clamp_min(
self: TensorLikeType,
min: TensorOrNumberLikeType = None,
) -> TensorLikeType:
return torch.clamp(self, min=min) # type: ignore[arg-type]
@register_decomposition(torch.ops.aten.clamp_max)
@out_wrapper()
def clamp_max(
self: TensorLikeType,
max: TensorOrNumberLikeType = None,
) -> TensorLikeType:
return torch.clamp(self, max=max) # type: ignore[arg-type]
#
# Conditional references
#
# https://pytorch.org/docs/stable/generated/torch.where.html
# TODO: implement alternate where
@register_decomposition(torch.ops.aten.where)
@out_wrapper()
@elementwise_type_promotion_wrapper(
type_promoting_args=("a", "b"),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH,
)
def where(
pred: Tensor,
a: Optional[TensorOrNumberLikeType] = None,
b: Optional[TensorOrNumberLikeType] = None,
):
""" """
if a is None or b is None:
raise NotImplementedError
utils.check_same_device(pred, a, b, allow_cpu_scalar_tensors=True)
check(
pred.dtype is torch.bool,
lambda: f"expected predicate to be bool, got {pred.dtype}",
)
pred, a, b = _maybe_broadcast(pred, a, b)
return prims.where(pred, a, b)
#
# Data Movement References
#
def clone(
a: TensorLikeType, *, memory_format: torch.memory_format = torch.preserve_format
) -> TensorLikeType:
result = torch.empty_like(
a, requires_grad=a.requires_grad, memory_format=memory_format
)
copy_to(result, a)
return result
def copy_to(a: Tensor, b: Tensor, *, allow_cross_device=True):
if not allow_cross_device and a.device != b.device:
msg = "Attempting to copy from device {0} to device {1}, but cross-device copies are not allowed!".format(
b.device, a.device
)
raise RuntimeError(msg)
return prims.copy_to(a, b)
@register_decomposition(torch.ops.aten.item)
def item(a: TensorLikeType) -> NumberType:
if a.numel() != 1:
msg = f"Can't convert a tensor with {a.numel()} elements to a number!"
raise ValueError(msg)
# NOTE: explicit conversion is necessary for bool!
# See https://github.com/pytorch/pytorch/issues/78071
number_type = utils.dtype_to_type(a.dtype)
return number_type(prims.item(a))
#
# Reduction references
#
def _reduction(
a: TensorLikeType,
prim: Callable,
*,
has_identity: bool = True,
accepts_dim_tuple: bool = True, # to handle min/argmin that accept single dim only
dims: Optional[DimsType] = None,
keepdims: bool = False,
dtype: Optional[torch.dtype] = None, # should be specified for ops that support it
out: Optional[Tensor] = None,
output_dtype_kind: REDUCTION_OUTPUT_TYPE_KIND,
) -> TensorLikeType: # it is usually SAME, but I want
# ref writers to actually think about what to put here
assert isinstance(a, TensorLike)
if a.ndim > 64:
raise RuntimeError(
"Received a tensor with {0} dimensions, but only tensors with up to 64 dims are supported!".format(
a.ndim
)
)
if out is not None:
assert isinstance(out, TensorLike)
if dtype is not None:
# TODO - this is true for eager mode currently, but it's wrong behavior for complex norms
if dtype != out.dtype:
raise RuntimeError(
"dtype argument and out dtype must match in reduction"
)
if not accepts_dim_tuple:
assert dims is None or isinstance(dims, int)
if isinstance(dims, int):
dims = (dims,) # type: ignore[assignment]
dims = utils.reduction_dims(a.shape, dims)
if not has_identity:
valid_shape = a.ndim == 0 or py_all(a.shape[i] for i in dims)
if not valid_shape:
raise RuntimeError(
"reducing over zero-size dimension for reduction operation without identity"
)
computation_dtype, result_dtype = utils.reduction_dtypes(
a, output_dtype_kind, dtype
)
a_converted = prims.convert_element_type(a, computation_dtype)
result = prim(a_converted, dims)
if keepdims:
output_shape = [a.shape[i] if i not in dims else 1 for i in range(a.ndim)]
broadcast_dims = [i for i in range(a.ndim) if i not in dims]
result = prims.broadcast_in_dim(result, output_shape, broadcast_dims)
if out is not None:
assert result_dtype is not None
if dtype is not None and result_dtype != out.dtype:
raise RuntimeError(
"Expected the dtype of reduction result and out to match"
)
out = _maybe_resize_out(out, result.shape)
return _safe_copy_out(copy_from=result, copy_to=out) # type: ignore[arg-type]
if result.dtype != result_dtype and result_dtype is not None:
result = prims.convert_element_type(result, result_dtype)
return result
# Saves Python all
py_all = all
@register_decomposition(torch.ops.aten.all)
@out_wrapper()
def all(
a: TensorLikeType,
dim: Optional[DimsType] = None,
keepdim: bool = False,
) -> TensorLikeType:
# Computes nelem
if isinstance(dim, int):
dim = (dim,) # type: ignore[assignment]
dims = utils.reduction_dims(a.shape, dim) # type: ignore[arg-type]
nelem = 1 if a.ndim == 0 else reduce(operator.mul, (a.shape[i] for i in dims), 1)
a_ = _maybe_convert_to_dtype(a, torch.bool)
result = eq(sum(a_, dim=dim, keepdim=keepdim), nelem) # type: ignore[arg-type]
# Preserves uint8 -- probably a legacy mask thing
if a.dtype is torch.uint8:
return prims.convert_element_type(result, torch.uint8)
return result
# Saves Python any
py_any = any
@register_decomposition(torch.ops.aten.any)
@out_wrapper()
def any(
a: TensorLikeType,
dim: Optional[DimsType] = None,
keepdim: bool = False,
) -> TensorLikeType:
a_ = _maybe_convert_to_dtype(a, torch.bool)
result = ne(sum(a_, dim=dim, keepdim=keepdim), False) # type: ignore[arg-type]
# Preserves uint8 -- probably a legacy mask thing
if a.dtype is torch.uint8:
return prims.convert_element_type(result, torch.uint8)
return result
@register_decomposition(torch.ops.aten.sum)
def sum(
a: TensorLikeType,
dim: Union[Optional[int], Optional[List[int]]] = None,
keepdim: bool = False,
*,
dtype=None,
out: Optional[Tensor] = None,
) -> TensorLikeType:
if dtype is None:
if utils.is_boolean_dtype(a.dtype) or utils.is_integer_dtype(a.dtype):
dtype = torch.int64
else:
dtype = a.dtype
# reduces over all dimensions if dim=() is passed
if dim == () or dim == []:
dim = None
return _reduction(
a,
prims.sum,
dims=dim,
keepdims=keepdim,
dtype=dtype,
out=out,
output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME,
)
@register_decomposition(torch.ops.aten.prod)
def prod(
a: TensorLikeType,
dim: Union[Optional[int], Optional[List[int]]] = None,
keepdim: bool = False,
*,
dtype=None,
out: Optional[Tensor] = None,
) -> TensorLikeType:
if dtype is None:
if utils.is_boolean_dtype(a.dtype) or utils.is_integer_dtype(a.dtype):
dtype = torch.int64
else:
dtype = a.dtype
# reduces over all dimensions if dim=() is passed
if dim == () or dim == []:
dim = None
return _reduction(
a,
prims.prod,
dims=dim,
keepdims=keepdim,
dtype=dtype,
out=out,
output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME,
)
@register_decomposition(torch.ops.aten.amin)
def amin(
a: TensorLikeType,
dim: Union[Optional[int], Optional[List[int]]] = None,
keepdim: bool = False,
*,
out: Optional[Tensor] = None,
) -> TensorLikeType:
# reduces over all dimensions if dim=() is passed
if dim == () or dim == []:
dim = None
return _reduction(
a,
prims.amin,
dims=dim,
keepdims=keepdim,
dtype=None,
out=out,
has_identity=False,
output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME,
)
@register_decomposition(torch.ops.aten.amax)
def amax(
a: TensorLikeType,
dim: Optional[DimsType] = None,
keepdim: bool = False,
*,
out: Optional[Tensor] = None,
) -> TensorLikeType:
# reduces over all dimensions if dim=() is passed
if dim == () or dim == []:
dim = None
return _reduction(
a,
prims.amax,
dims=dim,
keepdims=keepdim,
dtype=None,
out=out,
has_identity=False,
output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME,
)
def _set_correction(
unbiased: Optional[bool] = None,
correction: Optional[int] = None,
):
if correction is not None and unbiased is not None:
raise RuntimeError("cannot specify both correction and unbiased arguments")
elif correction is None and unbiased is None:
correction = 1
elif correction is None and unbiased is not None:
correction = 0 if unbiased is False else 1
if not isinstance(correction, int):
raise ValueError("correction argument should be integer")
if correction < 0:
raise ValueError("correction argument should be non-negative")
return correction
@out_wrapper()
def var(
a: TensorLikeType,
dim: Optional[DimsType] = None,
unbiased: Optional[bool] = None,
keepdim: bool = False,
*,
correction: Optional[int] = None,
) -> TensorLikeType:
correction = _set_correction(unbiased, correction)
# reduces over all dimensions if dim=() is passed
if dim == () or dim == []:
dim = None
result = _reduction(
a,
partial(prims.var, correction=correction),
dims=dim,
keepdims=keepdim,
dtype=None,
out=None,
has_identity=True,
output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT,
)
return result
@out_wrapper()
def std(
a: TensorLikeType,
dim: Union[Optional[int], Optional[List[int]]] = None,
unbiased: Optional[bool] = None,
keepdim: bool = False,
*,
correction: Optional[int] = None,
) -> TensorLikeType:
correction = _set_correction(unbiased, correction)
# reduces over all dimensions if dim=() is passed
if dim == () or dim == []:
dim = None
opmath_dtype, dtype = utils.reduction_dtypes(
a, REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT
)
result = _reduction(
a,
partial(prims.var, correction=correction),
dims=dim,
keepdims=keepdim,
dtype=opmath_dtype,
out=None,
has_identity=True,
output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT,
)
result = sqrt(result)
return _maybe_convert_to_dtype(result, dtype) # type: ignore[return-value,arg-type]
@register_decomposition(torch.ops.aten.mean)
def mean(
a: TensorLikeType,
dim: Optional[DimsType] = None,
keepdim: bool = False,
*,
dtype=None,
out=None,
) -> TensorLikeType:
# reduces over all dimensions if dim=() is passed
if dim == () or dim == []:
dim = None
if dtype is None:
dtype = a.dtype
# can't use out wrapper because of this argument
if out is not None and out.dtype != dtype:
raise RuntimeError("expected out dtype and dtype to match")
result = _reduction(
a,
prims.sum,
dims=dim,
keepdims=keepdim,
dtype=dtype,
out=None,
output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.KEEP_PROMOTED_TYPE,
)
if utils.is_integer_dtype(dtype):
raise RuntimeError("result type should be floating point or complex")
if isinstance(dim, int):
dim = (dim,) # type: ignore[assignment]
dims = utils.reduction_dims(a.shape, dim) # type: ignore[arg-type]
nelem = 1 if a.ndim == 0 else reduce(operator.mul, (a.shape[i] for i in dims), 1)
result = true_divide(result, nelem)
result_dtype = a.dtype if dtype is None else dtype
result = _maybe_convert_to_dtype(result, result_dtype) # type: ignore[assignment]
if out is not None:
assert isinstance(out, TensorLike)
out = _maybe_resize_out(out, result.shape)
return _safe_copy_out(copy_from=result, copy_to=out) # type: ignore[arg-type]
return result
@register_decomposition(torch.ops.aten.std_mean.correction)
def std_mean(
a: TensorLikeType,
dim: Union[Optional[int], Optional[List[int]]] = None,
*,
unbiased: Optional[bool] = None,
keepdim: bool = False,
correction: Optional[int] = None,
):
s = std(a, dim, unbiased, keepdim, correction=correction)
m = mean(a, dim, keepdim)
return s, m
@register_decomposition(torch.ops.aten.var_mean)
def var_mean(
a: TensorLikeType,
dim: Optional[DimsType] = None,
unbiased: Optional[bool] = None,
keepdim: bool = False,
*,
correction: Optional[int] = None,
):
v = var(a, dim, unbiased, keepdim, correction=correction)
m = mean(a, dim, keepdim)
return v, m
@register_decomposition(torch.ops.aten.addr)
@out_wrapper()
@elementwise_type_promotion_wrapper(
type_promoting_args=("self", "vec1", "vec2"),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def addr(
self: TensorLikeType,
vec1: TensorLikeType,
vec2: TensorLikeType,
*,
beta: NumberType = 1,
alpha: NumberType = 1,
) -> TensorLikeType:
check(
vec1.ndim == 1,
lambda: f"addr: Expected 1-D argument vec1, but got {vec1.ndim}-D",
)
check(
vec2.ndim == 1,
lambda: f"addr: Expected 1-D argument vec2, but got {vec2.ndim}-D",
)
self = self.expand(vec1.shape[0], vec2.shape[0])
if utils.is_boolean_dtype(self.dtype):
# Integers are accepted for booleans
check(
is_weakly_lesser_type(type(beta), int),
lambda: f"expected bool/int beta but got {type(beta)}",
)
check(
is_weakly_lesser_type(type(alpha), int),
lambda: f"expected bool/int alpha but got {type(beta)}",
)
if not beta:
return torch.outer(vec1, vec2) if alpha else torch.full_like(self, False)
else:
return torch.logical_or(
self,
torch.outer(vec1, vec2) if alpha else torch.full_like(self, False),
)
else:
check(
is_weakly_lesser_type(type(beta), dtype_to_type(self.dtype)),
lambda: f"cannot safely convert {type(beta)} to {self.dtype}",
)
check(
is_weakly_lesser_type(type(alpha), dtype_to_type(self.dtype)),
lambda: f"cannot safely convert {type(alpha)} to {self.dtype}",
)
if beta == 0:
# This means NaNs from self are dropped if beta is zero
return alpha * torch.outer(vec1, vec2)
else:
return beta * self + alpha * torch.outer(vec1, vec2)
# CompositeImplicitAutograd - don't register decomp
def atleast_1d(
arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType
) -> Union[TensorLikeType, Tuple[TensorLikeType, ...]]:
"""Reference implementation of :func:`torch.atleast_1d`."""
if not args and isinstance(arg, collections.abc.Sequence):
args_ = arg
else:
assert not isinstance(arg, collections.abc.Sequence)
args_ = (arg,) + args
res = tuple(a if a.ndim >= 1 else unsqueeze(a, 0) for a in args_)
return res if len(res) > 1 else res[0]
# Helper function with assert to avoid MyPy error
# of incompatible type passed to unsqueeze
def _unsqueeze_atleast(
at_least_fn: Callable, dim: int, arg: TensorLikeType
) -> TensorLikeType:
arg_ = at_least_fn(arg)
assert isinstance(arg_, TensorLike)
return unsqueeze(arg_, dim)
# CompositeImplicitAutograd - don't register decomp
def atleast_2d(
arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType
) -> Union[TensorLikeType, Tuple[TensorLikeType, ...]]:
"""Reference implementation of :func:`torch.atleast_2d`."""
if not args and isinstance(arg, collections.abc.Sequence):
args_ = arg
else:
assert not isinstance(arg, collections.abc.Sequence)
args_ = (arg,) + args
unsqueeze_atleast_1d = partial(_unsqueeze_atleast, atleast_1d, 0)
res = tuple(a if a.ndim >= 2 else unsqueeze_atleast_1d(a) for a in args_)
return res if len(res) > 1 else res[0]
# CompositeImplicitAutograd - don't register decomp
def atleast_3d(
arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType
) -> Union[TensorLikeType, Tuple[TensorLikeType, ...]]:
"""Reference implementation of :func:`torch.atleast_3d`."""
if not args and isinstance(arg, collections.abc.Sequence):
args_ = arg
else:
assert not isinstance(arg, collections.abc.Sequence)
args_ = (arg,) + args
unsqueeze_atleast_2d = partial(_unsqueeze_atleast, atleast_2d, -1)
res = tuple(a if a.ndim >= 3 else unsqueeze_atleast_2d(a) for a in args_)
return res if len(res) > 1 else res[0]
def as_strided(
a: TensorLikeType, size: ShapeType, stride: StrideType, storage_offset: int = 0
) -> TensorLikeType:
return prims.as_strided(a, size, stride, storage_offset)
def broadcast_shapes(*shapes) -> ShapeType:
return torch.Size(_broadcast_shapes(*shapes))
def broadcast_tensors(*tensors) -> List[TensorLikeType]:
return list(_maybe_broadcast(*tensors, preserve_cpu_scalar_tensors=False))
# CompositeImplicitAutograd - don't register decomp
def broadcast_to(a: TensorLikeType, size: ShapeType) -> TensorLikeType:
start = len(size) - len(a.shape)
dims = tuple(range(start, len(a.shape) + start))
return prims.broadcast_in_dim(a, size, dims)
@register_decomposition(torch.ops.aten.cat)
@out_wrapper()
@elementwise_type_promotion_wrapper(
type_promoting_args=("tensors",),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH,
)
def cat(tensors: TensorSequenceType, dim: int = 0) -> TensorLikeType:
if len(tensors) == 0:
msg = "cat expects at least one tensor, but received zero!"
raise ValueError(msg)
for tensor in tensors:
assert isinstance(tensor, TensorLike)
utils.check_same_device(*tensors, allow_cpu_scalar_tensors=False)
dim = utils.canonicalize_dim(tensors[0].ndim, dim)
utils.validate_idx(tensors[0].ndim, dim)
# Filters tensors with one dimension of length zero
filtered = tuple(x for x in tensors if not (x.ndim == 1 and x.numel() == 0))
if len(filtered) == 0:
t = tensors[0]
# TODO: fix this to work with meta tensors
try:
requires_grad = any(x.requires_grad for x in tensors)
except Exception:
requires_grad = False
return empty((0,), dtype=t.dtype, device=t.device, requires_grad=requires_grad)
return prims.cat(filtered, dim)
# CompositeImplicitAutograd - don't register decomp
@out_wrapper()
def column_stack(tensors: TensorSequenceType) -> TensorLikeType:
aligned_tensors = tuple(
x if x.ndim > 1 else prims.expand_dims(x, list(range(x.ndim, 2)))
for x in tensors
)
return cat(aligned_tensors, 1)
def conj(input: TensorLikeType) -> TensorLikeType:
if not input.dtype.is_complex:
return input
if input.is_sparse:
return torch.conj_physical(input)
return prims.conj(input)
# This replicates at::constant_pad_nd, defined in ATen/native/PadNd.cpp
@register_decomposition(torch.ops.aten.constant_pad_nd)
def constant_pad_nd(
input: TensorLikeType, pad: List[int], value: NumberType = 0
) -> TensorLikeType:
check(
len(pad) % 2 == 0,
lambda: f"Length of pad must be even but instead it equals {len(pad)}",
)
input_sizes = input.shape
l_inp = len(input_sizes)
l_pad = len(pad) // 2
l_diff = l_inp - l_pad
check(
l_inp >= l_pad,
lambda: "Length of pad should be no more than twice the number of "
f"dimensions of the input. Pad length is {len(pad)} while the input has "
f"{l_inp} dimensions.",
)
c_input = input
for i in range(l_diff, l_inp):
pad_idx = 2 * (l_inp - i - 1)
if pad[pad_idx] < 0:
c_input = c_input.narrow(i, -pad[pad_idx], c_input.shape[i] + pad[pad_idx])
if pad[pad_idx + 1] < 0:
c_input = c_input.narrow(i, 0, c_input.shape[i] + pad[pad_idx + 1])
# if none of the pads are positive we can just return the result
if builtins.all(p <= 0 for p in pad):
return c_input.clone()
new_shape = list(input_sizes[:l_diff])
for i in range(l_pad):
pad_idx = len(pad) - ((i + 1) * 2)
new_dim = input_sizes[l_diff + i] + pad[pad_idx] + pad[pad_idx + 1]
check(
new_dim > 0,
lambda: f"The input size {input_sizes[l_diff + i]}, plus negative padding "
f"{pad[pad_idx]} and {pad[pad_idx + 1]} resulted in a negative output size, "
f"which is invalid. Check dimension {l_diff + i} of your input.",
)
new_shape.append(new_dim)
memory_format = utils.suggest_memory_format(input)
output = torch.empty(
new_shape,
dtype=input.dtype,
device=input.device,
requires_grad=input.requires_grad,
memory_format=memory_format,
)
if value == 0 and input.dtype == torch.bool:
value = False
# torch.fill isn't typed to allow complex values
output = torch.fill(output, value) # type: ignore[arg-type]
c_output = output
for i in range(l_diff, l_inp):
pad_idx = 2 * (l_inp - i - 1)
if pad[pad_idx] > 0:
c_output = c_output.narrow(
i, pad[pad_idx], c_output.shape[i] - pad[pad_idx]
)
if pad[pad_idx + 1] > 0:
c_output = c_output.narrow(i, 0, c_output.shape[i] - pad[pad_idx + 1])
prims.copy_to(c_output, c_input)
return output
def contiguous(
a: Tensor, *, memory_format: torch.memory_format = torch.contiguous_format
) -> Tensor:
check(
memory_format != torch.preserve_format,
lambda: "preserve memory format is unsupported by the contiguous operator",
)
if utils.is_contiguous_for_memory_format(a, memory_format=memory_format):
return a
return torch.clone(a, memory_format=memory_format)
@out_wrapper()
def dstack(tensors: TensorSequenceType) -> TensorLikeType:
check(len(tensors) > 0, lambda: "dstack expects a non-empty TensorList")
aligned_tensors = atleast_3d(*tensors)
return cat(aligned_tensors, 2)
@register_decomposition(torch.ops.aten.expand, disable_meta=True)
def expand(a: Tensor, *shape) -> Tensor:
# NOTE: cannot use utils.extract_shape_from_varargs here
# because that also validates the shape, but the shape
# given to expand may be "invalid"
if len(shape) == 1 and isinstance(shape[0], Sequence):
shape = tuple(shape[0])
check(
len(shape) >= len(a.shape),
lambda: "expand: the requested shape has too few dimensions!",
)
offset = len(shape) - len(a.shape)
shape_ = list(shape)
for idx, x in enumerate(a.shape):
offset_idx = idx + offset
requested_length = shape[offset_idx]
check(
requested_length == x or x == 1 or requested_length == -1,
lambda: f"expand: attempting to expand a dimension of length {x}!",
)
shape_[offset_idx] = requested_length if requested_length != -1 else x
# At this point shape must be valid
utils.validate_shape(shape_)
return prims.broadcast_in_dim(
a, shape_, tuple(range(offset, len(a.shape) + offset))
)
def chunk(a: TensorLikeType, chunks: int, dim: int = 0) -> Tuple[TensorLikeType, ...]:
if chunks <= 0:
msg = "Expected at least one chunk, but got {0}!".format(chunks)
raise ValueError(msg)
dim = utils.canonicalize_dim(a.ndim, dim)
length = a.shape[dim]
chunk_size = math.ceil(length / chunks)
full_chunks = math.floor(length / chunk_size)
tail_chunk_size = length % chunk_size
result = []
for i in range(full_chunks):
result.append(narrow(a, dim, i * chunk_size, chunk_size))
if tail_chunk_size != 0:
result.append(narrow(a, dim, full_chunks * chunk_size, tail_chunk_size))
return tuple(result)
# Note: flatten, unlike prim.collapse and prim.collapse_view has an inclusive end_dim
# Note: flatten, unlike other shape operators, returns the input tensor on a no-op (unless
# a 0D tensor is flattened, in which case it's returned in 1D)
# CompositeImplicitAutograd - don't register decomp
def flatten(a: TensorLikeType, start_dim: int = 0, end_dim: int = -1) -> TensorLikeType:
start_dim = utils.canonicalize_dim(a.ndim, start_dim)
end_dim = utils.canonicalize_dim(a.ndim, end_dim)
# Short-circuits on no-op
if start_dim == end_dim and a.ndim != 0:
return a
# Tries to take a view
# TODO: we could look at directing collapse_view to skip its meta function here (unsafe_collapse_view)
new_shape, new_strides = prims._collapse_view_helper(a, start_dim, end_dim + 1)
if new_shape is not None:
return prims.collapse_view(a, start_dim, end_dim + 1)
# Makes a copy if it can't make a view
return prims.collapse(a, start_dim, end_dim + 1)
@register_decomposition(torch.ops.aten.flip)
def flip(a: TensorLikeType, dims: DimsSequenceType) -> TensorLikeType:
if not isinstance(dims, tuple) and not isinstance(dims, list):
raise ValueError("dims has to be a sequence of ints")
dims = utils.canonicalize_dims(a.ndim, dims) # type: ignore[assignment]
utils.validate_no_repeating_dims(dims)
return prims.rev(a, dims)
# CompositeImplicitAutograd - don't register decomp
def fliplr(a: TensorLikeType) -> TensorLikeType:
if a.ndim < 2:
raise RuntimeError("Input must be >= 2-d.")
return flip(a, (1,))
# CompositeImplicitAutograd - don't register decomp
def flipud(a: TensorLikeType) -> TensorLikeType:
if a.ndim < 1:
raise RuntimeError("Input must be >= 1-d.")
return flip(a, (0,))
# CompositeImplicitAutograd - don't register decomp
def narrow(a: TensorLikeType, dim: int, start: int, length: int) -> TensorLikeType:
dim = utils.canonicalize_dim(a.ndim, dim)
return prims.slice_in_dim(a, start, start + length, axis=dim)
def _normalize(
a: Tensor, norm_dims: DimsType, eps: float
) -> Tuple[Tensor, Tensor, Tensor]:
"""Computes mean and 1/std of a tensor along norm_dims.
Used as a helper function for normalization layers.
Args:
a (Tensor): input tensor
norm_dims (DimsType): dimensions to normalize over
eps (float): epsilon for numerical stability
Returns:
out (Tensor): normalized tensor.
mean (Tensor): mean of the tensor along norm_dims.
rstd (Tensor): 1/std of the tensor along norm_dims.
"""
computation_dtype = utils.get_computation_dtype(a.dtype)
a_acc = _maybe_convert_to_dtype(a, computation_dtype)
assert isinstance(a_acc, TensorLike) # to avoid mypy error for var_mean
biased_var, mean = var_mean(a_acc, dim=norm_dims, unbiased=False, keepdim=True)
rstd = torch.rsqrt(biased_var + eps)
out = (a - mean) * rstd
return out, mean, rstd
@register_decomposition(torch.ops.aten.native_layer_norm)
def native_layer_norm(
input: Tensor,
normalized_shape: ShapeType,
weight: Optional[Tensor],
bias: Optional[Tensor],
eps: float,
) -> Tuple[Tensor, Tensor, Tensor]:
normalized_ndim = len(normalized_shape)
utils.check(
normalized_ndim >= 1,
lambda: "Expected normalized_shape to be at least 1-dimensional, i.e., "
+ "containing at least one element, but got normalized_shape = "
+ str(normalized_shape),
)
# torch.Size([1, 2, 3]) == [1, 2, 3] evaluates to False
# while torch.Size([1, 2, 3]) == (1, 2, 3) is True
# therefore we use tuple(normalized_shape)
utils.check(
weight is None or weight.shape == tuple(normalized_shape),
lambda: "Expected weight to be of same shape as normalized_shape, but got "
+ "weight of shape "
+ str(weight.shape) # type: ignore[union-attr]
+ " and normalized_shape = "
+ str(normalized_shape),
)
utils.check(
bias is None or bias.shape == tuple(normalized_shape),
lambda: "Expected bias to be of same shape as normalized_shape, but got "
+ "bias of shape "
+ str(bias.shape) # type: ignore[union-attr]
+ " and normalized_shape = "
+ str(normalized_shape),
)
utils.check(
input.ndim >= normalized_ndim
and input.shape[(input.ndim - normalized_ndim) :] == tuple(normalized_shape),
lambda: "Given normalized_shape="
+ str(normalized_shape)
+ ", expected input with shape "
+ str(normalized_shape)
+ ", but got input of size "
+ str(input.shape),
)
axis = input.ndim - normalized_ndim
reduction_dims = list(range(axis, input.ndim))
out, mean, rstd = _normalize(input, reduction_dims, eps)
if weight is None and bias is not None:
out = out + bias
elif weight is not None and bias is None:
out = out * weight
elif weight is not None and bias is not None:
out = out * weight + bias
out = prims.convert_element_type(out, input.dtype)
if input.device.type == "cpu":
mean = prims.convert_element_type(mean, input.dtype)
rstd = prims.convert_element_type(rstd, input.dtype)
return (out, mean, rstd)
# TODO: Adding this as a meta function causes functorch tests to fail when compiled with debug mode.
# test/test_eager_transforms.py::TestFunctionalizeCPU::test_functionalize_fx_transpose_simple_cpu
@register_decomposition(torch.ops.aten.permute, disable_meta=True)
def permute(a: TensorLikeType, dims: DimsSequenceType) -> TensorLikeType:
_permutation = utils.canonicalize_dims(a.ndim, dims)
return prims.transpose(a, _permutation)
def _reshape_view_helper(a: TensorLikeType, *shape, allow_copy: bool) -> TensorLikeType:
# NOTE: Reshape may be given a shape with a -1 length
# This indicates that the dimension's length should be inferred
# Creates a valid shape
shape = utils.extract_shape_from_varargs(shape, validate=False)
for idx in range(len(shape)):
if shape[idx] == -1:
# Verifies there's only one dimension of length -1 in the shape
if shape.count(-1) > 1:
msg = "Can only infer the length of one dimension, but got shape {0}!".format(
str(shape)
)
raise ValueError(msg)
# TODO: improve error message
if a.numel() > 0:
length = reduce(
operator.floordiv, (x for x in shape if x != -1), a.numel()
)
else:
msg = "Cannot reshape a tensor of zero elements into shape {0} because the unspecified length is ambiguous!".format(
str(shape)
)
raise ValueError(msg)
shape = list(shape) # type: ignore[assignment]
shape[idx] = length # type: ignore[index]
break
# Short-circuits if shape is the same
utils.validate_shape(shape)
if tuple(a.shape) == tuple(shape):
return prims.view_of(a)
numel = reduce(operator.mul, shape) if len(shape) > 0 else 1
if a.numel() != numel:
msg = "Attempting to reshape a tensor with shape {0} and {1} elements to a shape {2} with {3} elements!".format(
str(a.shape), a.numel(), str(shape), numel
)
raise ValueError(msg)
# Special-cases tensors with no elements
if a.numel() == 0:
return as_strided(a, shape, utils.make_contiguous_strides_for(shape))
# Special-cases reshaping zero dim tensors
if a.ndim == 0:
_a = a
for length in shape:
assert length == 1
_a = unsqueeze(_a, -1)
return _a
# Special-cases reshaping to zero dim tensors
if len(shape) == 0:
_a = a
for length in a.shape:
assert length == 1
_a = squeeze(_a, -1)
return _a
# Handles general case: a 1+D tensor reshaped into a distinct 1+D shape
# NOTE [Reshape Algorithm]
# This algorithm works by attempting to greedily construct the desired dimensions in
# the output shape, left to right. It does this by, conceptually, accumulating
# dimensions of the original tensor, also left to right, until the dimension
# can be constructed using prims.split_dim.
# The algorithm also has special handling for tail squeezes/unsqueezes, like
# if a reshape from (5, 5) to (5, 5, 1) or vice versa.
#
# This algorithm does not flatten the original tensor and then split dims as appropriate
# because that would create copies more often than this algorithm. flatten is the only
# operation below which can create a view or a copy, and while it prefers creating
# views it may sometimes create a copy if the tensor's strides do not permit a view.
# As a result, this algorithm tries to minimize flattening.
#
# Note that a better version of this algorithm may exist. Regions which could be
# flattened without creating a copy can be identified in advance, and that might
# allow fewer flatten calls or faster short-circuiting to make a copy.
idx = 0
a_ = a
for length in shape:
# Handles tail unsqueezes
if idx >= a_.ndim:
assert length == 1
last_dim = a_.ndim - 1
# NOTE: using split_dim instead of unsqueeze may seem silly here,
# but it's necessary to get the strides correct
a_ = prims.split_dim(a_, last_dim, a_.shape[last_dim])
idx = idx + 1
continue
# Skips dimensions that are already the correct length
if length == a_.shape[idx]:
idx = idx + 1
continue
# Gathers enough original dimensions such that this new dimension can be created
# Note that this accumulation will terminate because we've verified a and the shape
# specify the same number of elements above
accum = a_.shape[idx]
end = idx
while accum % length != 0:
end = end + 1
accum = accum * a_.shape[end]
if end != idx:
# NOTE: in this case multiple dimensions must be flatten to create the desired dimension
# This flattening is why reshape sometimes creates a copy -- because flattening
# may return a view of a copy
# Checks if collapse can be a view and short-circuits to copying reshape if it can't
new_shape, new_strides = prims._collapse_view_helper(a_, idx, end + 1)
if new_shape is None:
if allow_copy:
return prims.reshape(a, shape)
msg = "Cannot view a tensor with shape {0} and strides {1} as a tensor with shape {2}!".format(
a.shape, a.stride(), shape
)
raise ValueError(msg)
a_ = flatten(a_, idx, end)
# Splits the (possibly flattened) dimension to create the desired dim length
if accum != length:
a_ = prims.split_dim(a_, idx, length)
idx = idx + 1
# Squeezes tail
while idx < a_.ndim:
assert a_.shape[idx] == 1
a_ = squeeze(a_, idx)
return a_
# TODO: Turn this into a decomposition (currently fails on reshape meta tests)
# CompositeImplicitAutograd - don't register decomp
# NOTE: shape is a vararg because Tensor.reshape can be called with as
# Tensor.reshape(a, b, c) or Tensor.reshape((a, b, c)) Function call
# torch.reshape doesn't support unpacked shapes
def reshape(a: TensorLikeType, *shape: ShapeType) -> TensorLikeType:
return _reshape_view_helper(a, *shape, allow_copy=True)
@register_decomposition(torch.ops.aten.roll)
def roll(
a: TensorLikeType, shifts: DimsType, dims: DimsType = tuple()
) -> TensorLikeType:
"""Reference implementation of :func:`torch.roll`."""
dims = utils.canonicalize_dims(a.ndim, dims)
# ATen specifies int[1] type for shifts and dims which expands integers to tuples of length 1
if not isinstance(shifts, Iterable):
shifts = (shifts,)
if not isinstance(dims, Iterable):
dims = (dims,)
# Avoid modulo by zero
if a.numel() == 0:
# Keeping this as ref for now as FakeTensor runs into some issues with complex tensors
return clone(a)
len_shifts = len(shifts)
len_dims = len(dims)
if len_shifts != 1 or len_dims != 1:
if len_shifts == 0:
raise RuntimeError("`shifts` required")
# Takes care of the case when dims is not specified (default)
# By default, the tensor is flattened before shifting, after which the original shape is restored
if len_dims == 0 and len_shifts == 1:
return torch.roll(torch.flatten(a), shifts, 0).view(a.shape)
if len_shifts != len_dims:
raise RuntimeError(
f"shifts and dimensions must align. shifts: {len_shifts}, dims: {len_dims}"
)
assert len_dims > 1
tail_shifts = shifts[1:]
tail_dims = dims[1:]
first_dim_rolled = torch.roll(a, shifts[0], dims[0])
return torch.roll(first_dim_rolled, tail_shifts, tail_dims)
# This path is taken when only one dimension is rolled
# For example to get `first_dim_rolled` above
dim = dims[0]
size = a.shape[dim]
start = (size - shifts[0]) % size
t0 = torch.narrow(a, dim, start, size - start)
t1 = torch.narrow(a, dim, 0, start)
return torch.cat((t0, t1), dim)
@register_decomposition(torch.ops.aten.rot90)
def rot90(
a: TensorLikeType, k: int = 1, dims: DimsSequenceType = (0, 1)
) -> TensorLikeType:
"""Reference implementation of :func:`torch.rot90`."""
dims_ = utils.canonicalize_dims(a.ndim, dims)
# Required to silence MyPy errors
assert isinstance(dims_, (tuple, list))
dims = dims_
if len(dims) != 2:
raise RuntimeError(
f"expected total rotation dims == 2, but got dims = {len(dims)}"
)
if a.ndim < 2:
raise RuntimeError(f"expected total dims >= 2, but got total dims = {a.ndim}")
if dims[0] == dims[1]:
raise RuntimeError(
f"expected rotation dims to be different, but got dim0 = {dims[0]} and dim1 = {dims[1]}"
)
k = k % 4 # Rotation direction is from the second towards the first axis for k < 0
if k == 1:
return torch.transpose(torch.flip(a, (dims[1],)), dims[0], dims[1])
elif k == 2:
return torch.flip(a, dims)
elif k == 3:
return torch.transpose(torch.flip(a, (dims[0],)), dims[0], dims[1])
else:
return clone(a)
def _check_stack_inputs(tensors: TensorSequenceType) -> None:
entry_shape = tensors[0].shape
for i in range(1, len(tensors)):
assert tensors[i].shape == entry_shape, (
f"stack expects each tensor to be equal size, but got {entry_shape} at entry 0"
f"and {tensors[i].shape} at entry {i}"
)
@register_decomposition(torch.ops.aten.stack)
@out_wrapper()
def stack(tensors: TensorSequenceType, dim: int = 0) -> TensorLikeType:
assert len(tensors) > 0, "stack expects a non-empty TensorList"
wrapped_dim = utils.canonicalize_dim(tensors[0].ndim + 1, dim)
# Refs need sparse support to check other condition
if wrapped_dim < tensors[0].ndim: # and not tensors[0].is_sparse:
_check_stack_inputs(tensors)
result_sizes = list(tensors[0].shape)
result_sizes.insert(wrapped_dim, len(tensors))
out = torch.cat(tensors, wrapped_dim)
return out.view(result_sizes)
# If dim == tensors[0].ndim, view cannot efficiently handle it
return torch.cat([t.unsqueeze(wrapped_dim) for t in tensors], dim)
@out_wrapper()
def softmax(
a: TensorLikeType,
dim: int,
*,
dtype: Optional[torch.dtype] = None,
) -> TensorLikeType:
result_dtype = dtype or a.dtype
computation_dtype = utils.get_computation_dtype(a.dtype)
a_ = _maybe_convert_to_dtype(a, computation_dtype)
assert isinstance(a_, TensorLike) # to avoid MyPy error for amax
a_max = amax(a_, dim, keepdim=True)
a_exp = exp(a_ - a_max)
return _maybe_convert_to_dtype(
true_divide(a_exp, sum(a_exp, dim, keepdim=True)), result_dtype
) # type: ignore[return-value]
# CompositeImplicitAutograd - don't register decomp
@out_wrapper()
def hstack(tensors: TensorSequenceType) -> TensorLikeType:
check(len(tensors) > 0, lambda: "hstack expects a non-empty TensorList")
aligned_tensors = atleast_1d(*tensors)
if aligned_tensors[0].ndim == 1:
return cat(aligned_tensors, 0)
return cat(aligned_tensors, 1)
# CompositeImplicitAutograd - don't register decomp
@out_wrapper()
def vstack(tensors: TensorSequenceType) -> TensorLikeType:
check(len(tensors) > 0, lambda: "vstack expects a non-empty TensorList")
aligned_tensors = atleast_2d(*tensors)
return cat(aligned_tensors, 0)
# CompositeImplicitAutograd - don't register decomp
def unflatten(a: TensorLikeType, dim: int, sizes: ShapeType) -> TensorLikeType:
dim = utils.canonicalize_dim(a.ndim, dim)
if not sizes:
raise RuntimeError("unflatten: sizes must be non-empty")
if -1 not in sizes and utils.prod(sizes) != a.shape[dim]:
raise RuntimeError(
f"unflatten: Provided sizes {sizes} don't multiply up to the size of dim {dim} ({a.shape[dim]}) in the input tensor"
)
out_shape = tuple(a.shape[:dim]) + tuple(sizes) + tuple(a.shape[dim + 1 :])
return torch.reshape(a, out_shape)
def unbind(t: TensorLikeType, dim: int = 0) -> TensorSequenceType:
dim = utils.canonicalize_dim(t.ndim, dim)
check(
len(t.shape) > 0,
lambda: "dimension specified as 0 but tensor has no dimensions",
IndexError,
)
return tuple(
torch.squeeze(s, dim) for s in torch.tensor_split(t, t.shape[dim], dim)
)
# Note: although squeeze is documented as having the out= kwarg it doesn't
@register_decomposition(torch.ops.aten.squeeze, disable_meta=True)
def squeeze(a: TensorLikeType, dim: Optional[int] = None) -> TensorLikeType:
if dim is not None:
dim = utils.canonicalize_dim(a.ndim, dim)
# Short-circuits if the tensor has no dimensions
if len(a.shape) == 0:
assert dim == 0
return prims.view_of(a)
# Note: squeeze does not modify tensors when the given dim is not a dimension of length 1
if a.shape[dim] != 1:
return prims.view_of(a)
return prims.squeeze(a, (dim,))
dims = tuple(idx for idx in range(len(a.shape)) if a.shape[idx] == 1)
return prims.squeeze(a, dims)
# Note: does not work with TensorMetas because of data-dependent control-flow
# CompositeImplicitAutograd - don't register decomp
def tensor_split(
a: TensorLikeType,
indices_or_sections: Union[Tensor, DimsType],
dim: int = 0,
) -> Tuple[TensorLikeType, ...]:
_dim = utils.canonicalize_dim(a.ndim, dim)
if a.ndim == 0:
msg = "tensor_split: received a rank zero tensor, but expected a tensor of rank one or greater!"
raise ValueError(msg)
# If indices_or_sections is a tensor, it must be a CPU Long tensor
if isinstance(indices_or_sections, TensorLike):
if not indices_or_sections.device.type == "cpu":
msg = "tensor_split: if indices_or_sections is a tensor it must be on the CPU, but received one on {0}".format(
indices_or_sections.device
)
raise ValueError(msg)
if indices_or_sections.dtype != torch.long:
msg = "tensor_split: if indices_or_sections is a tensor it must have long dtype, "
" but received one with dtype {0}".format(indices_or_sections.dtype)
raise ValueError(msg)
# Case 0 -- indices_or_sections is an integer or a scalar tensor n and a is split along dim into n parts of equal-ish length
if isinstance(indices_or_sections, int) or (
isinstance(indices_or_sections, TensorLike) and indices_or_sections.ndim == 0
):
sections: int = (
indices_or_sections # type: ignore[assignment]
if isinstance(indices_or_sections, Number)
else indices_or_sections.item()
)
if sections <= 0:
msg = "tensor_split: number of sections must be greater than 0, but was {0}".format(
sections
)
raise ValueError(msg)
splits = []
dim_size = a.shape[_dim]
min_split_size = math.floor(dim_size / sections)
num_splits_one_extra = dim_size % sections
start_idx = 0
for split_idx in range(sections):
split_size = (
min_split_size + 1
if (split_idx < num_splits_one_extra)
else min_split_size
)
s = prims.slice_in_dim(a, start_idx, start_idx + split_size, axis=_dim)
splits.append(s)
start_idx = start_idx + split_size
return tuple(splits)
# Case 1 -- indices_or_sections is a sequence of integers or a 1D tensor describing the splits
else:
indices = indices_or_sections
if isinstance(indices_or_sections, TensorLike):
if indices_or_sections.ndim != 1:
msg = "tensor_split: non-scalar indices_or_sections tensors must have only one dimension, "
"but received a tensor with {0} dimensions".format(
indices_or_sections.ndim
)
raise ValueError(msg)
indices = indices_or_sections.tolist()
splits = []
start_idx = 0
for x in indices:
splits.append(prims.slice_in_dim(a, start_idx, x, axis=_dim))
start_idx = x
splits.append(prims.slice_in_dim(a, start_idx, a.shape[_dim], axis=_dim))
return tuple(splits)
# CompositeImplicitAutograd - don't register decomp
def hsplit(
a: TensorLikeType, indices_or_sections: DimsType
) -> Tuple[TensorLikeType, ...]:
check(
a.ndim >= 1,
lambda: (
"torch.hsplit requires a tensor with at least 1 dimension, but got a tensor with "
+ str(a.ndim)
+ " dimensions!"
),
)
dim = 0 if a.ndim == 1 else 1
if isinstance(indices_or_sections, int):
split_size = indices_or_sections
check(
(split_size != 0 and a.shape[dim] % split_size == 0),
lambda: (
"torch.hsplit attempted to split along dimension "
+ str(dim)
+ ", but the size of the dimension "
+ str(a.shape[dim])
+ " is not divisible by the split_size "
+ str(split_size)
+ "!"
),
)
return tensor_split(a, split_size, dim)
check(
isinstance(indices_or_sections, (list, tuple)),
lambda: (
"hsplit(): received an invalid combination of arguments. "
"Expected indices_or_sections to be of type int, list of ints or tuple of ints "
f"but got type {type(indices_or_sections)}"
),
exc_type=TypeError,
)
split_sizes = indices_or_sections
return tensor_split(a, split_sizes, dim)
# CompositeImplicitAutograd - don't register decomp
def vsplit(
a: TensorLikeType, indices_or_sections: DimsType
) -> Tuple[TensorLikeType, ...]:
check(
a.ndim >= 2,
lambda: (
"torch.vsplit requires a tensor with at least 2 dimension, but got a tensor with "
+ str(a.ndim)
+ " dimensions!"
),
)
if isinstance(indices_or_sections, int):
split_size = indices_or_sections
check(
(split_size != 0 and a.shape[0] % split_size == 0),
lambda: (
"torch.vsplit attempted to split along dimension 0 "
+ ", but the size of the dimension "
+ str(a.shape[0])
+ " is not divisible by the split_size "
+ str(split_size)
+ "!"
),
)
return tensor_split(a, split_size, 0)
check(
isinstance(indices_or_sections, (list, tuple)),
lambda: (
"vsplit(): received an invalid combination of arguments. "
"Expected indices_or_sections to be of type int, list of ints or tuple of ints "
f"but got type {type(indices_or_sections)}"
),
exc_type=TypeError,
)
split_sizes = indices_or_sections
return tensor_split(a, split_sizes, 0)
# CompositeImplicitAutograd - don't register decomp
def dsplit(a: TensorLikeType, sections: DimsType) -> TensorSequenceType:
if a.ndim < 3:
raise RuntimeError(
f"torch.dsplit requires a tensor with at least 3 dimension, but got a tensor with {a.ndim} dimensions!"
)
if isinstance(sections, int) and (sections == 0 or a.shape[2] % sections != 0):
raise RuntimeError(
"torch._refs.dsplit attempted to split along dimension 2, "
+ f"but the size of the dimension {a.shape[2]} is not divisible by the split_size {sections}!"
)
return tensor_split(a, sections, 2)
@register_decomposition(torch.ops.aten.t.default, disable_meta=True)
def t(a: TensorLikeType):
# TODO: Add sparse support
# if a.is_sparse:
# sparse_dim = a.sparse_dim()
# dense_dim = a.dense_dim()
# if not (sparse_dim <= 2 and dense_dim == 0):
# raise RuntimeError(
# f"t() expects a tensor with <= 2 sparse and 0 dense dimensions, but got {sparse_dim} sparse and"
# f"{dense_dim} dense dimensions"
# )
if a.ndim > 2:
raise RuntimeError(
f"t() expects a tensor with <= 2 dimensions, but self is {a.ndim}D"
)
return torch.transpose(a, 0, 0 if a.ndim < 2 else 1)
def transpose(a: TensorLikeType, dim0: int, dim1: int) -> TensorLikeType:
_dim0, _dim1 = utils.canonicalize_dims(a.ndim, (dim0, dim1)) # type: ignore[misc]
if a.ndim <= 1 or dim0 == dim1:
return prims.view_of(a)
_permutation = list(range(0, a.ndim))
_permutation[_dim0] = _dim1
_permutation[_dim1] = _dim0
return prims.transpose(a, _permutation)
# Aliases for transpose
swap_axes = transpose
@register_decomposition(torch.ops.aten.unsqueeze, disable_meta=True)
def unsqueeze(a: TensorLikeType, dim: int) -> TensorLikeType:
# Note that unsqueeze canonicalizes with rank + 1 because it allows
# a new innermost dimension to be specified
dim = utils.canonicalize_dim(a.ndim + 1, dim)
return prims.expand_dims(a, (dim,))
# NOTE: shape is a vararg because Tensor.reshape can be called with as
# Tensor.view(a, b, c) or Tensor.view((a, b, c)) Function call torch.view
# doesn't support unpacked shapes
# TODO: Turn this into a decomposition (currently fails on reshape meta tests)
@register_decomposition(torch.ops.aten.view, disable_meta=True)
def view(a: TensorLikeType, *shape: ShapeType) -> TensorLikeType:
return _reshape_view_helper(a, *shape, allow_copy=False)
# CompositeImplicitAutograd - don't register decomp
def ravel(a: TensorLikeType) -> TensorLikeType:
return reshape(a, (-1,))
@out_wrapper()
def empty(
*shape,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
layout: Optional[torch.layout] = None,
requires_grad: bool = False,
pin_memory: bool = False,
memory_format: torch.memory_format = torch.contiguous_format,
) -> TensorLikeType:
check(
memory_format != torch.preserve_format,
lambda: "torch.empty: the Preserve memory format is not supported",
)
shape = utils.extract_shape_from_varargs(shape)
if memory_format == torch.contiguous_format:
strides = utils.make_contiguous_strides_for(shape)
elif memory_format == torch.channels_last_3d:
strides = utils.make_channels_last_3d_strides_for(shape)
else: # memory_format == torch.channels_last
check(
memory_format == torch.channels_last,
lambda: f"torch.empty: received an unknown memory format {memory_format}!",
)
strides = utils.make_channels_last_2d_strides_for(shape)
return torch.empty_strided(
shape,
strides,
dtype=dtype,
layout=layout,
device=device,
pin_memory=pin_memory,
requires_grad=requires_grad,
)
@register_decomposition(torch.ops.aten.new_empty)
def new_empty(
a: TensorLikeType,
size: ShapeType,
*,
dtype: Optional[torch.dtype] = None,
layout: Optional[torch.layout] = None,
device: Optional[torch.device] = None,
pin_memory: bool = False,
) -> TensorLikeType:
dtype = a.dtype if dtype is None else dtype
device = a.device if device is None else device
return torch.empty(
size,
dtype=dtype,
device=device,
pin_memory=pin_memory,
layout=layout,
)
@register_decomposition(torch.ops.aten.new_zeros)
def new_zeros(
a: TensorLikeType,
size: ShapeType,
*,
dtype: Optional[torch.dtype] = None,
layout: Optional[torch.layout] = None,
device: Optional[torch.device] = None,
pin_memory: bool = False,
) -> TensorLikeType:
r = a.new_empty(
size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
)
r.zero_()
return r
@register_decomposition(torch.ops.aten.new_ones)
def new_ones(
a: TensorLikeType,
size: ShapeType,
*,
dtype: Optional[torch.dtype] = None,
layout: Optional[torch.layout] = None,
device: Optional[torch.device] = None,
pin_memory: bool = False,
) -> TensorLikeType:
r = a.new_empty(
size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
)
r.fill_(1)
return r
@register_decomposition(torch.ops.aten.new_full)
def new_full(
a: TensorLikeType,
size: ShapeType,
fill_value: NumberType,
*,
dtype: Optional[torch.dtype] = None,
layout: Optional[torch.layout] = None,
device: Optional[torch.device] = None,
pin_memory: bool = False,
) -> TensorLikeType:
r = a.new_empty(
size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
)
r.fill_(fill_value) # type: ignore[arg-type]
return r
def empty_like(
a: TensorLikeType,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
layout: Optional[torch.layout] = None,
requires_grad: bool = False,
pin_memory: bool = False,
memory_format: torch.memory_format = torch.preserve_format,
) -> TensorLikeType:
dtype = a.dtype if dtype is None else dtype
device = a.device if device is None else device
strides: Tuple[int, ...]
if memory_format != torch.preserve_format:
return torch.empty(
a.shape,
dtype=dtype,
layout=layout,
device=device,
requires_grad=requires_grad,
pin_memory=pin_memory,
memory_format=memory_format,
)
# memory_format == torch.preserve_format
strides = utils.compute_elementwise_output_strides(a)
return torch.empty_strided(
a.shape,
strides,
dtype=dtype,
layout=layout,
device=device,
pin_memory=pin_memory,
requires_grad=requires_grad,
)
@overload
def arange(
end: NumberType,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
layout: torch.layout = torch.strided,
pin_memory: bool = False,
requires_grad: bool = False,
) -> TensorLikeType:
pass
@overload
def arange(
start: NumberType,
end: NumberType,
step: NumberType = 1,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
layout: torch.layout = torch.strided,
pin_memory: bool = False,
requires_grad: bool = False,
) -> TensorLikeType:
pass
# See https://github.com/pytorch/pytorch/issues/82364
# @register_decomposition(torch.ops.aten.arange)
# @out_wrapper()
@register_decomposition(
[
torch.ops.aten.arange.default,
torch.ops.aten.arange.start,
torch.ops.aten.arange.start_step,
]
)
def arange(
a: Optional[NumberType] = None,
b: Optional[NumberType] = None,
step: NumberType = 1,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
layout: torch.layout = torch.strided,
pin_memory: bool = False,
requires_grad: bool = False,
) -> TensorLikeType:
assert (a is not None and b is not None) or (a is not None and b is None)
if a is not None and b is not None:
return prims.arange(
a,
b,
step,
dtype=dtype,
device=device,
# layout=layout,
# pin_memory=pin_memory,
requires_grad=requires_grad,
)
elif a is not None and b is None:
return prims.arange(
0,
a,
step,
dtype=dtype,
device=device,
# layout=layout,
# pin_memory=pin_memory,
requires_grad=requires_grad,
)
else:
raise AssertionError()
@register_decomposition(torch.ops.aten.linspace)
@out_wrapper()
def linspace(
start: NumberType,
end: NumberType,
steps: NumberType,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
layout: torch.layout = torch.strided,
pin_memory: bool = False,
requires_grad: bool = False,
) -> TensorLikeType:
if dtype is None:
dtype = torch.get_default_dtype()
# NB: NumPy actually doesn't do this cast, but for this ref, I'd rather have this
# cast than not, because it allows us to always go into the precise path
# if dtype is integral and not worry about whether start/end are float
if prims.utils.is_integer_dtype(dtype):
if isinstance(start, float):
start = int(start)
if isinstance(end, float):
end = int(end)
if py_any(isinstance(arg, complex) for arg in (start, end, steps)):
raise NotImplementedError
assert not isinstance(start, complex) and not isinstance(end, complex) # for mypy
check(
isinstance(steps, int),
lambda: "steps must be int, not float",
exc_type=TypeError,
)
assert isinstance(steps, int) # for mypy
check(steps >= 0, lambda: "number of steps must be non-negative")
factory_kwargs = {
"device": device,
# "layout":layout,
# "pin_memory":pin_memory,
"requires_grad": requires_grad,
}
if steps == 0:
ret = torch.full((0,), 0, dtype=dtype, **factory_kwargs) # type: ignore[call-overload]
elif steps == 1:
ret = torch.full((1,), start, dtype=dtype, **factory_kwargs) # type: ignore[call-overload]
elif start == end:
ret = torch.full((steps,), start, dtype=dtype, **factory_kwargs) # type: ignore[call-overload]
else:
if prims.utils.is_integer_dtype(dtype):
# We need to cast to int, so to avoid off-by-one issues
# do the entire computation with ints when we can
assert isinstance(start, int) and isinstance(end, int)
step_size_x_denom = end - start
eps = 1 if end > start else -1
denom = steps - 1
ret = prims.to_dtype(
torch.arange(
start * denom,
end * denom + eps,
step_size_x_denom,
dtype=torch.int64,
**factory_kwargs, # type: ignore[arg-type]
)
/ denom,
dtype,
)
else:
step_size = (end - start) / (steps - 1)
eps = step_size / 2
ret = prims.to_dtype(
torch.arange( # type: ignore[call-overload]
start, end + eps, step_size, dtype=torch.float64, **factory_kwargs
),
dtype,
)
return ret
@register_decomposition(torch.ops.aten.logspace)
@out_wrapper()
def logspace(
start: NumberType,
end: NumberType,
steps: NumberType,
base: NumberType = 10,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
layout: torch.layout = torch.strided,
pin_memory: bool = False,
requires_grad: bool = False,
) -> TensorLikeType:
if dtype is None:
dtype = torch.get_default_dtype()
# NB: NumPy doesn't have this cast
if prims.utils.is_integer_dtype(dtype):
if isinstance(start, float):
start = int(start)
if isinstance(end, float):
end = int(end)
assert not isinstance(base, complex) # for mypy
if base < 0:
raise NotImplementedError
ret = torch.linspace(
start,
end,
steps,
dtype=torch.float64,
device=device,
layout=layout,
pin_memory=pin_memory,
requires_grad=requires_grad,
)
return prims.to_dtype(torch.pow(base, ret), dtype)
@overload
def meshgrid(tensors: Sequence[TensorLikeType], indexing: str):
pass
@overload
def meshgrid(*tensors: TensorLikeType, indexing: str):
pass
@register_decomposition(torch.ops.aten.meshgrid)
def meshgrid(
*tensors: Union[TensorLikeType, List[TensorLikeType], Tuple[TensorLikeType]],
indexing: str,
) -> List[TensorLikeType]:
# This ref simultaneously handles two overloads (see stubs above)
# The `indexing` argument is currently optional for torch.meshgrid, but we
# plan to make the argument required: https://github.com/pytorch/pytorch/issues/50276
if isinstance(tensors[0], list) or isinstance(tensors[0], tuple):
assert len(tensors) == 1
tensors = tuple(tensors[0])
check(
py_all(isinstance(a, TensorLike) for a in tensors),
lambda: "meshgrid expects its inputs to be tensors",
)
check(len(tensors) > 0, lambda: "meshgrid expects a non-empty TensorList")
for i in range(len(tensors) - 1):
check(
tensors[i].dtype == tensors[i + 1].dtype, # type: ignore[union-attr]
lambda: "meshgrid expects all tensors to have the same dtype",
)
check(
tensors[i].device == tensors[i + 1].device, # type: ignore[union-attr]
lambda: "meshgrid expects all tensors to have the same device",
)
swap_first_and_second_tensors = False
if indexing == "xy":
swap_first_and_second_tensors = len(tensors) >= 2
if swap_first_and_second_tensors:
tensors = (tensors[1], tensors[0], *tensors[2:])
else:
check(
indexing == "ij",
lambda: (
'torch.meshgrid: indexing must be one of "xy" or "ij", '
f"but received: {indexing}"
),
)
result_shape: List[int] = []
for t in tensors:
assert isinstance(t, TensorLike) # mypy
check(
t.ndim == 0 or t.ndim == 1,
lambda: f"torch.meshgrid: Expected 0D or 1D tensor in the tensor list but got: {t}",
)
result_shape.append(t.numel())
grids: List[TensorLikeType] = []
for i, t in enumerate(tensors):
assert isinstance(t, TensorLike) # mypy
if t.ndim == 0:
t = t.view((1,))
grids.append(prims.broadcast_in_dim(t, result_shape, (i,)))
if swap_first_and_second_tensors:
# Swap outputs if we originally swapped at the beginning
grids[0], grids[1] = grids[1], grids[0]
return grids
# NOTE: for convenience, shape can be a tuple of ints or a tuple containing a tuple of ints
@register_decomposition(torch.ops.aten.empty_strided)
def empty_strided(
shape: Union[ShapeType, Tuple[ShapeType]],
strides: StrideType,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
layout: Optional[torch.layout] = None,
requires_grad: bool = False,
pin_memory: bool = False,
) -> TensorLikeType:
if pin_memory:
raise NotImplementedError("PrimTorch doesn't support pinned memory")
if layout is not None and layout is not torch.strided:
raise NotImplementedError(f"PrimTorch doesn't support layout={layout}")
shape = utils.extract_shape_from_varargs(shape)
dtype = torch.get_default_dtype() if dtype is None else dtype
device = torch.device("cpu") if device is None else device
return prims.empty_strided(
shape,
strides,
dtype=dtype,
device=device,
requires_grad=requires_grad,
)
# TODO: missing kwargs (e.g. layout)
@out_wrapper()
def full(
shape: ShapeType,
fill_value: NumberType,
*,
dtype: torch.dtype,
device: torch.device,
requires_grad: bool,
) -> TensorLikeType:
e = empty(shape, dtype=dtype, device=device, requires_grad=requires_grad)
return fill(e, fill_value)
def full_like(
a: TensorLikeType,
fill_value: NumberType,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
requires_grad: bool = False,
) -> TensorLikeType:
e = empty_like(a, dtype=dtype, device=device, requires_grad=requires_grad)
return fill(e, fill_value)
ones = partial(full, fill_value=True)
ones_like = partial(full_like, fill_value=True)
# TODO: missing kwargs (e.g. layout)
def scalar_tensor(
a: NumberType,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
) -> TensorLikeType:
dtype = dtype if dtype is not None else utils.type_to_dtype(type(a))
device = device if device is not None else torch.device("cpu")
return prims.scalar_tensor(a, dtype=dtype, device=device)
zeros = partial(full, fill_value=False)
zeros_like = partial(full_like, fill_value=False)
@register_decomposition(torch.ops.aten.uniform)
def uniform(
shape: ShapeType,
low: Union[bool, int, float] = 0.0,
high: Union[bool, int, float] = 1.0,
*,
dtype: torch.dtype,
device: DeviceLikeType,
) -> TensorLikeType:
utils.validate_shape(shape)
assert isinstance(low, (bool, int, float))
assert isinstance(high, (bool, int, float))
low = float(low)
high = float(high)
assert isinstance(dtype, torch.dtype)
device = utils.canonicalize_device(device)
return prims.uniform(shape, low=low, high=high, dtype=dtype, device=device)
@register_decomposition(
[torch.ops.aten.masked_fill.Scalar, torch.ops.aten.masked_fill.Tensor]
)
def masked_fill(a: TensorLikeType, mask: TensorLikeType, value: TensorOrNumberLikeType):
python_type = utils.dtype_to_type(a.dtype)
if isinstance(value, Number):
value_type = type(value)
else:
# NOTE: Could not use value = item(value) as it resulted in
# RuntimeError: Cannot cast FakeTensor(cpu) to number
value_ndim = value.ndim
check(
value_ndim == 0,
lambda: f"only supports a 0-dimensional value tensor, but got tensor with {value_ndim} dimension",
)
# `masked_fill` allows cpu scalar to be moved to cuda but not otherwise.
check(
a.device.type == "cuda" or value.device == a.device,
lambda: "Expected `value` to be on same device as `a`",
)
value_type = utils.dtype_to_type(value.dtype)
if utils.is_cpu_scalar_tensor(value):
value = value.item()
if value_type is complex:
# only downcasting from complex to lower type is not allowed.
# We allow casting `value` to lower type for other case
# Eg. float -> int.
# Ref: https://github.com/pytorch/pytorch/issues/79195
check(
utils.is_weakly_lesser_type(value_type, python_type),
lambda: f"could not convert to type {python_type} without overflow",
)
# Since `where` allows type-promotion,
# cast value to correct type before passing to `where`
if isinstance(value, Number):
return torch.where(mask, python_type(value), a)
assert isinstance(value, TensorLike)
return torch.where(mask, prims.to_dtype(value, a.dtype), a)
# CompositeImplicitAutograd - don't register decomp
def allclose(
a: TensorLikeType,
b: TensorLikeType,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
) -> bool:
"""
Reference implementation of torch.allclose
"""
_check_close_args(name="torch.allclose", a=a, b=b, rtol=rtol, atol=atol)
return bool(
torch.all(torch.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)).item()
)
# TODO: add OpInfo for torch.equal and refs.equal
def equal(a: TensorLikeType, b: TensorLikeType) -> bool:
utils.check_same_device(a, b, allow_cpu_scalar_tensors=False)
utils.check_same_dtype(a, b)
# Shape check
if a.ndim != b.ndim:
return False
for x, y in zip(a.shape, b.shape):
if x != y:
return False
# Short-circuits if there are no elements to validate
if a.numel() == 0:
return True
return item(all(eq(a, b))) # type: ignore[return-value]
@out_wrapper(exact_dtype=True)
def norm(
input: TensorLikeType,
p: Optional[Union[float, str]] = "fro",
dim: Optional[DimsType] = None,
keepdim: bool = False,
*,
dtype: Optional[torch.dtype] = None,
) -> TensorLikeType:
# In these cases we compute the "Frobenius norm"
if (
p == "fro" and (dim is None or isinstance(dim, int) or len(dim) <= 2)
) or p is None:
p = 2
if isinstance(dim, int):
dim = [dim]
if isinstance(p, str):
# Here we either call the nuclear norm, or we call matrix_norm with some arguments
# that will throw an error
if dim is None:
dim = tuple(range(input.ndim))
return torch.linalg.matrix_norm(input, p, dim, keepdim, dtype=dtype)
else:
return torch.linalg.vector_norm(input, p, dim, keepdim, dtype=dtype)
@register_decomposition(torch.ops.aten.trace)
def trace(self: TensorLikeType) -> TensorLikeType:
utils.check(
self.ndim == 2, lambda: "expected a matrix, but got tensor with dim {self.ndim}"
)
return torch.sum(torch.diag(self, 0))
import torch._refs.fft
import torch._refs.linalg
import torch._refs.nn.functional
import torch._refs.special
| pytorch-master | torch/_refs/__init__.py |
import math
from typing import Iterable, List, NamedTuple, Optional, Sequence, Tuple, Union
from typing_extensions import Literal
import torch
import torch._prims as prims
import torch._prims_common as utils
from torch._decomp import register_decomposition
from torch._prims_common import check, DimsType, ShapeType, TensorLikeType
from torch._prims_common.wrappers import out_wrapper
__all__ = [
# Transforms
"fft",
"fft2",
"fftn",
"hfft",
"hfft2",
"hfftn",
"rfft",
"rfft2",
"rfftn",
"ifft",
"ifft2",
"ifftn",
"ihfft",
"ihfft2",
"ihfftn",
"irfft",
"irfft2",
"irfftn",
# Helpers
"fftshift",
"ifftshift",
]
NormType = Union[None, Literal["forward"], Literal["backward"], Literal["ortho"]]
_NORM_VALUES = {None, "forward", "backward", "ortho"}
def _apply_norm(
x: TensorLikeType, norm: NormType, signal_numel: int, forward: bool
) -> TensorLikeType:
"""Apply normalization to the un-normalized FFT result"""
check(norm in _NORM_VALUES, lambda: f"Invalid normalization mode: {norm}")
if norm == "ortho":
return x * (1 / math.sqrt(signal_numel))
normalize = (not forward and (norm is None or norm == "backward")) or (
forward and norm == "forward"
)
return x * (1 / signal_numel) if normalize else x
def _promote_type_fft(dtype: torch.dtype, require_complex: bool) -> torch.dtype:
"""Helper to promote a dtype to one supported by the FFT primitives"""
if dtype.is_complex:
return dtype
# Promote integral to default float type
if not dtype.is_floating_point:
dtype = torch.get_default_dtype()
if require_complex:
dtype = utils.corresponding_complex_dtype(dtype)
return dtype
def _maybe_promote_tensor_fft(
t: TensorLikeType, require_complex: bool = False
) -> TensorLikeType:
"""Helper to promote a tensor to a dtype supported by the FFT primitives"""
cur_type = t.dtype
new_type = _promote_type_fft(cur_type, require_complex)
if cur_type == new_type:
return t
return prims.convert_element_type(t, new_type)
def _resize_fft_input(
x: TensorLikeType, dims: Tuple[int, ...], sizes: Tuple[int, ...]
) -> TensorLikeType:
"""
Fixes the shape of x such that x.size(dims[i]) == sizes[i],
either by zero-padding, or by slicing x starting from 0.
"""
assert len(dims) == len(sizes)
must_copy = False
x_sizes = x.shape
pad_amount = [0] * len(x_sizes) * 2
for i in range(len(dims)):
if sizes[i] == -1:
continue
if x_sizes[dims[i]] < sizes[i]:
must_copy = True
pad_idx = len(pad_amount) - 2 * dims[i] - 1
pad_amount[pad_idx] = sizes[i] - x_sizes[dims[i]]
if x_sizes[dims[i]] > sizes[i]:
x = x.narrow(dims[i], 0, sizes[i])
return torch.constant_pad_nd(x, pad_amount) if must_copy else x
def _fft_c2r(
func_name: str,
input: TensorLikeType,
n: Optional[int],
dim: int,
norm: NormType,
forward: bool,
) -> TensorLikeType:
"""Common code for performing any complex to real FFT (irfft or hfft)"""
input = _maybe_promote_tensor_fft(input, require_complex=True)
dims = (utils.canonicalize_dim(input.ndim, dim),)
last_dim_size = n if n is not None else 2 * (input.shape[dim] - 1)
check(last_dim_size >= 1, lambda: f"Invalid number of data points ({n}) specified")
if n is not None:
input = _resize_fft_input(input, dims=dims, sizes=(last_dim_size // 2 + 1,))
if forward:
input = torch.conj(input)
output = prims.fft_c2r(input, dim=dims, last_dim_size=last_dim_size)
return _apply_norm(output, norm=norm, signal_numel=last_dim_size, forward=forward)
def _fft_r2c(
func_name: str,
input: TensorLikeType,
n: Optional[int],
dim: int,
norm: NormType,
forward: bool,
onesided: bool,
) -> TensorLikeType:
"""Common code for performing any real to complex FFT (rfft or ihfft)"""
check(
not input.dtype.is_complex,
lambda: f"{func_name} expects a floating point input tensor, but got {input.dtype}",
)
input = _maybe_promote_tensor_fft(input)
dims = (utils.canonicalize_dim(input.ndim, dim),)
if n is not None:
input = _resize_fft_input(input, dims, (n,))
ret = prims.fft_r2c(input, dim=dims, onesided=onesided)
ret = _apply_norm(ret, norm, input.shape[dim], forward)
return ret if forward else torch.conj(ret)
def _fft_c2c(
func_name: str,
input: TensorLikeType,
n: Optional[int],
dim: int,
norm: NormType,
forward: bool,
) -> TensorLikeType:
"""Common code for performing any complex to complex FFT (fft or ifft)"""
check(
input.dtype.is_complex,
lambda: f"{func_name} expects a complex input tensor, but got {input.dtype}",
)
dims = (utils.canonicalize_dim(input.ndim, dim),)
if n is not None:
input = _resize_fft_input(input, dims, (n,))
ret = prims.fft_c2c(input, dim=dims, forward=forward)
return _apply_norm(ret, norm, input.shape[dim], forward)
@register_decomposition(torch.ops.aten.fft_fft)
@out_wrapper()
def fft(
input: TensorLikeType,
n: Optional[int] = None,
dim: int = -1,
norm: NormType = None,
) -> TensorLikeType:
if input.dtype.is_complex:
return _fft_c2c("fft", input, n, dim, norm, forward=True)
else:
return _fft_r2c("fft", input, n, dim, norm, forward=True, onesided=False)
@register_decomposition(torch.ops.aten.fft_ifft)
@out_wrapper()
def ifft(
input: TensorLikeType,
n: Optional[int] = None,
dim: int = -1,
norm: NormType = None,
) -> TensorLikeType:
if input.dtype.is_complex:
return _fft_c2c("ifft", input, n, dim, norm, forward=False)
else:
return _fft_r2c("ifft", input, n, dim, norm, forward=False, onesided=False)
@register_decomposition(torch.ops.aten.fft_rfft)
@out_wrapper()
def rfft(
input: TensorLikeType,
n: Optional[int] = None,
dim: int = -1,
norm: NormType = None,
) -> TensorLikeType:
return _fft_r2c("rfft", input, n, dim, norm, forward=True, onesided=True)
@register_decomposition(torch.ops.aten.fft_irfft)
@out_wrapper()
def irfft(
input: TensorLikeType,
n: Optional[int] = None,
dim: int = -1,
norm: NormType = None,
) -> TensorLikeType:
return _fft_c2r("irfft", input, n, dim, norm, forward=False)
@register_decomposition(torch.ops.aten.fft_hfft)
@out_wrapper()
def hfft(
input: TensorLikeType,
n: Optional[int] = None,
dim: int = -1,
norm: NormType = None,
) -> TensorLikeType:
return _fft_c2r("hfft", input, n, dim, norm, forward=True)
@register_decomposition(torch.ops.aten.fft_ihfft)
@out_wrapper()
def ihfft(
input: TensorLikeType,
n: Optional[int] = None,
dim: int = -1,
norm: NormType = None,
) -> TensorLikeType:
return _fft_r2c("ihfft", input, n, dim, norm, forward=False, onesided=True)
class _ShapeAndDims(NamedTuple):
shape: Tuple[int, ...]
dims: Tuple[int, ...]
def _canonicalize_fft_shape_and_dim_args(
input: TensorLikeType, shape: Optional[ShapeType], dim: Optional[DimsType]
) -> _ShapeAndDims:
"""Convert the shape and dim arguments into a canonical form where neither are optional"""
input_dim = input.ndim
input_sizes = input.shape
if dim is not None:
if not isinstance(dim, Sequence):
dim = (dim,)
ret_dims = utils.canonicalize_dims(input_dim, dim)
# Check dims are unique
check(len(set(dim)) == len(dim), lambda: "FFT dims must be unique")
if shape is not None:
if not isinstance(shape, Sequence):
shape = (shape,)
# Has shape, might have dim
check(
dim is None or len(dim) == len(shape),
lambda: "When given, dim and shape arguments must have the same length",
)
transform_ndim = len(shape)
check(
transform_ndim <= input_dim,
lambda: f"Got shape with {transform_ndim} values but input tensor "
f"only has {input_dim} dimensions.",
)
# If shape is given, dims defaults to the last len(shape) dimensions
if dim is None:
ret_dims = tuple(range(input_dim - transform_ndim, input_dim))
# Translate any -1 values in shape to the default length
ret_shape = tuple(
s if s != -1 else input_sizes[d] for (s, d) in zip(shape, ret_dims)
)
elif dim is None:
# No shape, no dim
ret_dims = tuple(range(input_dim))
ret_shape = tuple(input_sizes)
else:
# No shape, has dim
ret_shape = tuple(input_sizes[d] for d in ret_dims)
for n in ret_shape:
check(n > 0, lambda: f"Invalid number of data points ({n}) specified")
return _ShapeAndDims(shape=ret_shape, dims=ret_dims)
def _prod(xs: Iterable[int]) -> int:
"""Compute product of a list"""
prod = 1
for x in xs:
prod *= x
return prod
def _fftn_c2c(
function_name: str,
input: TensorLikeType,
shape: Tuple[int, ...],
dim: Tuple[int, ...],
norm: NormType,
forward: bool,
) -> TensorLikeType:
"""Common code for n-dimensional complex to complex FFTs (fftn or ifftn)"""
check(
input.dtype.is_complex,
lambda: f"{function_name} expects a complex input tensor, "
f"but got {input.dtype}",
)
x = _resize_fft_input(input, dim, shape)
output = prims.fft_c2c(x, dim=dim, forward=forward)
return _apply_norm(output, norm=norm, signal_numel=_prod(shape), forward=forward)
@register_decomposition(torch.ops.aten.fft_fftn)
@out_wrapper()
def fftn(
input: TensorLikeType,
s: Optional[ShapeType] = None,
dim: Optional[DimsType] = None,
norm: NormType = None,
) -> TensorLikeType:
(shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim)
x = _maybe_promote_tensor_fft(input, require_complex=True)
return _fftn_c2c("fftn", x, shape, dim, norm, forward=True)
@register_decomposition(torch.ops.aten.fft_ifftn)
@out_wrapper()
def ifftn(
input: TensorLikeType,
s: Optional[ShapeType] = None,
dim: Optional[DimsType] = None,
norm: NormType = None,
) -> TensorLikeType:
(shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim)
x = _maybe_promote_tensor_fft(input, require_complex=True)
return _fftn_c2c("ifftn", x, shape, dim, norm, forward=False)
@register_decomposition(torch.ops.aten.fft_rfftn)
@out_wrapper()
def rfftn(
input: TensorLikeType,
s: Optional[ShapeType] = None,
dim: Optional[DimsType] = None,
norm: NormType = None,
) -> TensorLikeType:
check(
not input.dtype.is_complex,
lambda: f"rfftn expects a real-valued input tensor, but got {input.dtype}",
)
shape, dim = _canonicalize_fft_shape_and_dim_args(input, s, dim)
input = _maybe_promote_tensor_fft(input, require_complex=False)
input = _resize_fft_input(input, dim, shape)
out = prims.fft_r2c(input, dim=dim, onesided=True)
return _apply_norm(out, norm=norm, signal_numel=_prod(shape), forward=True)
@register_decomposition(torch.ops.aten.fft_ihfftn)
@out_wrapper()
def ihfftn(
input: TensorLikeType,
s: Optional[ShapeType] = None,
dim: Optional[DimsType] = None,
norm: NormType = None,
) -> TensorLikeType:
check(
not input.dtype.is_complex,
lambda: f"ihfftn expects a real-valued input tensor, but got {input.dtype}",
)
shape, dim = _canonicalize_fft_shape_and_dim_args(input, s, dim)
check(len(shape) > 0, lambda: "ihfftn must transform at least one axis")
input = _maybe_promote_tensor_fft(input, require_complex=False)
input = _resize_fft_input(input, dim, shape)
tmp = prims.fft_r2c(input, dim=dim[-1:], onesided=True)
if len(dim) == 1:
tmp = _apply_norm(tmp, norm=norm, signal_numel=shape[0], forward=False)
return prims.conj(tmp)
tmp = prims.conj_physical(tmp)
tmp = prims.fft_c2c(tmp, dim=dim[:-1], forward=False)
return _apply_norm(tmp, norm=norm, signal_numel=_prod(shape), forward=False)
class _CanonicalizeC2rReturn(NamedTuple):
shape: Tuple[int, ...]
dim: Tuple[int, ...]
last_dim_size: int
def _canonicalize_fft_c2r_shape_and_dim_args(
fname: str,
input: TensorLikeType,
s: Optional[ShapeType],
dim: Optional[DimsType],
) -> _CanonicalizeC2rReturn:
"""Canonicalize shape and dim arguments for n-dimensional c2r transforms,
as well as calculating the last_dim_size which is shape[dim[-1]] for the output"""
(shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim)
check(len(shape) > 0, lambda: f"{fname} must transform at least one axis")
if s is None or s[-1] == -1:
last_dim_size = 2 * (input.shape[dim[-1]] - 1)
else:
last_dim_size = shape[-1]
check(
last_dim_size >= 1,
lambda: f"Invalid number of data points ({last_dim_size}) specified",
)
shape_list = list(shape)
shape_list[-1] = last_dim_size // 2 + 1
return _CanonicalizeC2rReturn(
shape=tuple(shape_list), dim=dim, last_dim_size=last_dim_size
)
@register_decomposition(torch.ops.aten.fft_irfftn)
@out_wrapper()
def irfftn(
input: TensorLikeType,
s: Optional[ShapeType] = None,
dim: Optional[DimsType] = None,
norm: NormType = None,
) -> TensorLikeType:
shape, dim, last_dim_size = _canonicalize_fft_c2r_shape_and_dim_args(
"irfftn", input, s, dim
)
input = _maybe_promote_tensor_fft(input, require_complex=True)
input = _resize_fft_input(input, dim, shape)
out = prims.fft_c2r(input, dim=dim, last_dim_size=last_dim_size)
return _apply_norm(out, norm, _prod(out.shape[d] for d in dim), forward=False)
@register_decomposition(torch.ops.aten.fft_hfftn)
@out_wrapper()
def hfftn(
input: TensorLikeType,
s: Optional[ShapeType] = None,
dim: Optional[DimsType] = None,
norm: NormType = None,
) -> TensorLikeType:
shape, dim, last_dim_size = _canonicalize_fft_c2r_shape_and_dim_args(
"hfftn", input, s, dim
)
input = _maybe_promote_tensor_fft(input, require_complex=True)
input = _resize_fft_input(input, dim, shape)
tmp = prims.fft_c2c(input, dim=dim[:-1], forward=True) if len(dim) > 1 else input
tmp = _apply_norm(tmp, norm, _prod(shape[:-1]), forward=True)
tmp = prims.conj_physical(tmp)
out = prims.fft_c2r(tmp, dim=dim[-1:], last_dim_size=last_dim_size)
return _apply_norm(out, norm, last_dim_size, forward=True)
@register_decomposition(torch.ops.aten.fft_fft2)
@out_wrapper()
def fft2(
input: TensorLikeType,
s: Optional[ShapeType] = None,
dim: Optional[DimsType] = (-2, -1),
norm: NormType = None,
) -> TensorLikeType:
return torch.fft.fftn(input, s=s, dim=dim, norm=norm)
@register_decomposition(torch.ops.aten.fft_ifft2)
@out_wrapper()
def ifft2(
input: TensorLikeType,
s: Optional[ShapeType] = None,
dim: Optional[DimsType] = (-2, -1),
norm: NormType = None,
) -> TensorLikeType:
return torch.fft.ifftn(input, s=s, dim=dim, norm=norm)
@register_decomposition(torch.ops.aten.fft_rfft2)
@out_wrapper()
def rfft2(
input: TensorLikeType,
s: Optional[ShapeType] = None,
dim: Optional[DimsType] = (-2, -1),
norm: NormType = None,
) -> TensorLikeType:
return torch.fft.rfftn(input, s=s, dim=dim, norm=norm)
@register_decomposition(torch.ops.aten.fft_irfft2)
@out_wrapper()
def irfft2(
input: TensorLikeType,
s: Optional[ShapeType] = None,
dim: Optional[DimsType] = (-2, -1),
norm: NormType = None,
) -> TensorLikeType:
return torch.fft.irfftn(input, s=s, dim=dim, norm=norm)
@register_decomposition(torch.ops.aten.fft_hfft2)
@out_wrapper()
def hfft2(
input: TensorLikeType,
s: Optional[ShapeType] = None,
dim: Optional[DimsType] = (-2, -1),
norm: NormType = None,
) -> TensorLikeType:
return torch.fft.hfftn(input, s=s, dim=dim, norm=norm)
@register_decomposition(torch.ops.aten.fft_ihfft2)
@out_wrapper()
def ihfft2(
input: TensorLikeType,
s: Optional[ShapeType] = None,
dim: Optional[DimsType] = (-2, -1),
norm: NormType = None,
) -> TensorLikeType:
return torch.fft.ihfftn(input, s=s, dim=dim, norm=norm)
def _default_alldims(dim: Optional[DimsType], x: TensorLikeType) -> List[int]:
"""Convert Optional[DimsType] to a simple list, defaulting to all dimensions"""
if dim is None:
return list(range(x.ndim))
elif not isinstance(dim, Sequence):
return [dim]
else:
return list(dim)
@register_decomposition(torch.ops.aten.fft_fftshift)
def fftshift(input: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType:
dims = _default_alldims(dim, input)
shift = [input.shape[d] // 2 for d in dims]
return torch.roll(input, shift, dims)
@register_decomposition(torch.ops.aten.fft_ifftshift)
def ifftshift(input: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType:
dims = _default_alldims(dim, input)
shift = [(input.shape[d] + 1) // 2 for d in dims]
return torch.roll(input, shift, dims)
| pytorch-master | torch/_refs/fft.py |
from functools import partial
from typing import List, Optional, Tuple, Union
import torch
import torch._prims as prims
import torch._prims_common as utils
import torch._refs as refs
import torch._refs.linalg as linalg
from torch import Tensor
from torch._prims_common import (
check,
check_fp_or_complex,
check_is_matrix,
DimsType,
NumberType,
TensorLikeType,
)
from torch._prims_common.wrappers import out_wrapper
__all__ = [
"svd",
"vector_norm",
"matrix_norm",
"norm",
]
def check_norm_dtype(dtype: Optional[torch.dtype], x_dtype: torch.dtype, fn_name: str):
"""
Checks related to the dtype kwarg in `linalg.*norm` functions
"""
if dtype is not None:
check(
utils.is_float_dtype(dtype) or utils.is_complex_dtype(dtype),
lambda: f"{fn_name}: dtype should be floating point or complex. Got {dtype}",
)
check(
utils.is_complex_dtype(dtype) == utils.is_complex_dtype(x_dtype),
lambda: "{fn_name}: dtype should be {d} for {d} inputs. Got {dtype}".format(
fn_name=fn_name,
d="complex" if utils.is_complex_dtype(x_dtype) else "real",
dtype=dtype,
),
)
check(
utils.get_higher_dtype(dtype, x_dtype) == dtype,
lambda: f"{fn_name}: the dtype of the input ({x_dtype}) should be convertible "
"without narrowing to the specified dtype ({dtype})",
)
# Utilities should come BEFORE this import
from torch._decomp import register_decomposition
@register_decomposition(torch.ops.aten.linalg_vector_norm)
@out_wrapper(exact_dtype=True)
def vector_norm(
x: TensorLikeType,
ord: float = 2.0,
dim: Optional[DimsType] = None,
keepdim: bool = False,
*,
dtype: Optional[torch.dtype] = None,
) -> Tensor:
# Checks
check_fp_or_complex(x.dtype, "linalg.vector_norm")
if isinstance(dim, int):
dim = [dim] # type: ignore[assignment]
elif not isinstance(dim, List) and dim is not None:
# refs.amin just accepts List rather than DimType (Tuple)
dim = list(dim) # type: ignore[assignment]
if x.numel() == 0 and (ord < 0.0 or ord == float("inf")):
check(
dim is not None and len(dim) != 0,
lambda: f"linalg.vector_norm cannot compute the {ord} norm on an empty tensor "
"because the operation does not have an identity",
)
shape = x.shape
assert dim is not None # mypy does not seem to be able to see through check?
for d in dim:
check(
shape[d] != 0,
lambda: f"linalg.vector_norm cannot compute the {ord} norm on the "
f"dimension {d} because this dimension is empty and the "
"operation does not have an identity",
)
check_norm_dtype(dtype, x.dtype, "linalg.vector_norm")
computation_dtype, result_dtype = utils.reduction_dtypes(
x, utils.REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT, dtype
)
to_result_dtype = partial(prims.convert_element_type, dtype=result_dtype)
# Implementation
if ord == 0.0:
return refs.sum(refs.ne(x, 0.0), dim=dim, keepdim=keepdim, dtype=result_dtype)
elif ord == float("inf"):
return to_result_dtype(refs.amax(torch.abs(x), dim=dim, keepdim=keepdim))
elif ord == float("-inf"):
return to_result_dtype(refs.amin(torch.abs(x), dim=dim, keepdim=keepdim))
else:
# From here on the computation dtype is important as the reduction is non-trivial
x = prims.convert_element_type(x, computation_dtype)
reduce_sum = partial(refs.sum, dim=dim, keepdim=keepdim)
if not (ord % 2.0 == 0.0 and utils.is_float_dtype(x.dtype)):
x = torch.abs(x)
return to_result_dtype(torch.pow(reduce_sum(torch.pow(x, ord)), 1.0 / ord))
def backshift_permutation(dim0, dim1, ndim):
# Auxiliary function for matrix_norm
# Computes the permutation that moves the two given dimensions to the back
ret = [i for i in range(ndim) if i != dim0 and i != dim1]
ret.extend((dim0, dim1))
return ret
def inverse_permutation(perm):
# Given a permutation, returns its inverse. It's equivalent to argsort on an array
return [i for i, j in sorted(enumerate(perm), key=lambda i_j: i_j[1])]
# CompositeImplicitAutograd
@out_wrapper(exact_dtype=True)
def matrix_norm(
A: TensorLikeType,
ord: Union[float, str] = "fro",
dim: DimsType = (-2, -1),
keepdim: bool = False,
*,
dtype: Optional[torch.dtype] = None,
) -> TensorLikeType:
# shape
check_is_matrix(A, "linalg.matrix_norm")
# dim
dim = utils.canonicalize_dims(A.ndim, dim)
if isinstance(dim, int):
dim = (dim,) # type: ignore[assignment]
check(len(dim) == 2, lambda: "linalg.matrix_norm: dim must be a 2-tuple. Got {dim}")
check(
dim[0] != dim[1],
lambda: "linalg.matrix_norm: dims must be different. Got ({dim[0]}, {dim[1]})",
)
# dtype arg
check_norm_dtype(dtype, A.dtype, "linalg.matrix_norm")
if isinstance(ord, str):
# ord
check(
ord in ("fro", "nuc"),
lambda: "linalg.matrix_norm: Order {ord} not supported.",
)
# dtype
check_fp_or_complex(
A.dtype, "linalg.matrix_norm", allow_low_precision_dtypes=ord != "nuc"
)
if ord == "fro":
return vector_norm(A, 2, dim, keepdim, dtype=dtype)
else: # ord == "nuc"
if dtype is not None:
A = prims.convert_element_type(A, dtype)
perm = backshift_permutation(dim[0], dim[1], A.ndim)
result = torch.sum(svdvals(prims.transpose(A, perm)), -1, keepdim)
if keepdim:
inv_perm = inverse_permutation(perm)
result = prims.transpose(torch.unsqueeze(result, -1), inv_perm)
return result
else:
# ord
abs_ord = abs(ord)
check(
abs_ord in (2, 1, float("inf")),
lambda: "linalg.matrix_norm: Order {ord} not supported.",
)
# dtype
check_fp_or_complex(
A.dtype, "linalg.matrix_norm", allow_low_precision_dtypes=ord != 2
)
max_min = partial(torch.amax if ord > 0.0 else torch.amin, keepdim=keepdim)
if abs_ord == 2.0:
if dtype is not None:
A = prims.convert_element_type(A, dtype)
perm = backshift_permutation(dim[0], dim[1], A.ndim)
result = max_min(svdvals(prims.transpose(A, perm)), dim=-1)
if keepdim:
inv_perm = inverse_permutation(perm)
result = prims.transpose(torch.unsqueeze(result, -1), inv_perm)
return result
else: # 1, -1, inf, -inf
dim0, dim1 = dim
if abs_ord == float("inf"):
dim0, dim1 = dim1, dim0
if not keepdim and (dim0 < dim1):
dim1 -= 1
return max_min(
vector_norm(A, 1.0, dim=dim0, keepdim=keepdim, dtype=dtype), dim1
)
# CompositeImplicitAutograd
@out_wrapper(exact_dtype=True)
def norm(
A: TensorLikeType,
ord: Optional[Union[float, str]] = None,
dim: Optional[DimsType] = None,
keepdim: bool = False,
*,
dtype: Optional[torch.dtype] = None,
) -> TensorLikeType:
if dim is not None:
if isinstance(dim, int):
dim = (dim,) # type: ignore[assignment]
check(
len(dim) in (1, 2),
lambda: "linalg.norm: If dim is specified, it must be of length 1 or 2. Got {dim}",
)
elif ord is not None:
check(
A.ndim in (1, 2),
lambda: "linalg.norm: If dim is not specified but ord is, the input must be 1D or 2D. Got {A.ndim}D",
)
if ord is not None and (
(dim is not None and len(dim) == 2) or (dim is None and A.ndim == 2)
):
if dim is None:
dim = (0, 1)
return matrix_norm(A, ord, dim, keepdim, dtype=dtype)
else:
if ord is None:
ord = 2.0
return vector_norm(A, ord, dim, keepdim, dtype=dtype)
# CompositeImplicitAutograd
@out_wrapper("U", "S", "Vh", exact_dtype=True)
def svd(A: TensorLikeType, full_matrices: bool = True) -> Tuple[Tensor, Tensor, Tensor]:
return prims.svd(A, full_matrices=full_matrices)
# CompositeImplicitAutograd
@out_wrapper(exact_dtype=True)
def svdvals(A: TensorLikeType) -> Tensor:
return svd(A, full_matrices=False)[1]
| pytorch-master | torch/_refs/linalg/__init__.py |
from typing import List
__all__: List[str] = []
| pytorch-master | torch/_refs/nn/__init__.py |
from typing import Optional, Union
import torch
import torch._prims as prims
import torch._prims_common as utils
import torch._refs as refs
from torch._decomp import register_decomposition
from torch._prims_common import (
check,
ELEMENTWISE_TYPE_PROMOTION_KIND,
NumberType,
ShapeType,
TensorLike,
TensorLikeType,
)
from torch._prims_common.wrappers import (
elementwise_type_promotion_wrapper,
elementwise_unary_scalar_wrapper,
out_wrapper,
)
from torch._refs import (
_make_elementwise_binary_reference,
_make_elementwise_unary_reference,
)
__all__ = [
"celu",
"dropout",
"elu",
"hardshrink",
"hardtanh",
"hinge_embedding_loss",
"l1_loss",
"margin_ranking_loss",
"mish",
"mse_loss",
"prelu",
"relu",
"relu6",
"selu",
"softplus",
"softshrink",
"tanhshrink",
"threshold",
]
Tensor = torch.Tensor
# celu is implemented specially because it has an alpha argument
# celu is very similar to elu
@register_decomposition(torch.ops.aten.celu)
@elementwise_type_promotion_wrapper(
type_promoting_args=("a",),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def celu(
a: TensorLikeType, alpha: Optional[NumberType] = None, inplace: bool = False
) -> TensorLikeType:
"""
Reference implementation of torch.nn.functional.celu
"""
if inplace:
raise NotImplementedError
rhs: TensorLikeType
if alpha is not None:
python_type = utils.dtype_to_type(a.dtype)
if not utils.is_weakly_lesser_type(type(alpha), python_type):
msg = (
"alpha argument of type {0} cannot be safely cast to type {1}!".format(
type(alpha), python_type
)
)
raise ValueError(msg)
rhs = alpha * torch.expm1(torch.true_divide(a, alpha)) # type: ignore[arg-type]
else:
rhs = torch.expm1(a)
return torch.where(a > 0, a, rhs)
# TODO: should we allow the user to set a different dtype for the mask generation?
@register_decomposition(torch.ops.aten.dropout)
def dropout(
a: TensorLikeType, p: float = 0.5, training: bool = True, inplace: bool = False
) -> TensorLikeType:
if inplace:
raise NotImplementedError
if not training:
return a
assert p <= 1
assert p >= 0
if p == 1:
return refs.zeros_like(a)
if p == 0:
return a
p1m = 1 - p
scale = 1 / p1m
mask = refs.lt(
refs.uniform(a.shape, low=0.0, high=1.0, dtype=torch.float32, device=a.device),
p1m,
)
return refs.mul(refs.mul(a, mask), scale)
# elu is implemented specially because it has an alpha argument
# This cannot be used as a decomposition because the aten op takes in 2 extra kwargs
@elementwise_type_promotion_wrapper(
type_promoting_args=("a",),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def elu(
a: TensorLikeType, alpha: Optional[NumberType] = None, inplace: bool = False
) -> TensorLikeType:
"""
Reference implementation of torch.nn.functional.elu
"""
if inplace:
raise NotImplementedError
rhs: TensorLikeType
if alpha is not None:
python_type = utils.dtype_to_type(a.dtype)
if not utils.is_weakly_lesser_type(type(alpha), python_type):
msg = (
"alpha argument of type {0} cannot be safely cast to type {1}!".format(
type(alpha), python_type
)
)
raise ValueError(msg)
rhs = alpha * torch.expm1(a)
else:
rhs = torch.expm1(a)
return torch.where(a > 0, a, rhs)
@register_decomposition(torch.ops.aten.relu)
@elementwise_type_promotion_wrapper(
type_promoting_args=("a",),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def relu(a: TensorLikeType, inplace: bool = False) -> TensorLikeType:
"""
Reference implementation of torch.nn.functional.relu
"""
if inplace:
raise NotImplementedError
return torch.where(torch.le(a, 0), 0, a)
def layer_norm(
input: Tensor,
normalized_shape: ShapeType,
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
eps: float = 1e-5,
) -> Tensor:
"""
Reference implementation of :func:`torch.nn.functional.layer_norm`.
"""
return torch.native_layer_norm(input, normalized_shape, weight, bias, eps)[0]
@register_decomposition(torch.ops.aten.leaky_relu)
@elementwise_type_promotion_wrapper(
type_promoting_args=("a",),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def leaky_relu(
a: TensorLikeType, negative_slope: float = 0.01, inplace: bool = False
) -> TensorLikeType:
"""
Reference implementation of torch.nn.functional.leaky_relu
"""
if inplace:
raise NotImplementedError
python_type = utils.dtype_to_type(a.dtype)
if not utils.is_weakly_lesser_type(type(negative_slope), python_type):
msg = f"negative_slope argument of type {type(negative_slope)} cannot be safely cast to type {python_type}!"
raise ValueError(msg)
return torch.where(torch.gt(a, 0), a, torch.mul(a, negative_slope))
@register_decomposition(torch.ops.aten.mish)
@elementwise_type_promotion_wrapper(
type_promoting_args=("a",),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def mish(a: TensorLikeType, inplace: bool = False) -> TensorLikeType:
"""
Reference implementation of torch.nn.functional.mish
"""
if inplace:
raise NotImplementedError
return a * torch.tanh(torch.nn.functional.softplus(a))
@register_decomposition(torch.ops.aten.selu)
@elementwise_type_promotion_wrapper(
type_promoting_args=("a",),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def selu(a: TensorLikeType, inplace: bool = False) -> TensorLikeType:
"""
Reference implementation of torch.nn.functional.selu
"""
if inplace:
raise NotImplementedError
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
rhs = alpha * torch.expm1(a)
return scale * torch.where(a > 0, a, rhs)
# softplus is implemented specially because it has beta and threshold arguments
@register_decomposition(torch.ops.aten.softplus)
@out_wrapper()
@elementwise_type_promotion_wrapper(
type_promoting_args=("a",),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def softplus(
a: TensorLikeType,
beta: Optional[NumberType] = None,
threshold: NumberType = 20,
inplace: bool = False,
) -> TensorLikeType:
"""
Reference implementation of torch.nn.functional.softplus
"""
if inplace:
raise NotImplementedError
rhs: TensorLikeType
if beta is not None:
python_type = utils.dtype_to_type(a.dtype)
if not utils.is_weakly_lesser_type(type(beta), python_type):
msg = "beta argument of type {0} cannot be safely cast to type {1}!".format(
type(beta), python_type
)
raise ValueError(msg)
scaled_input = a * beta
rhs = torch.true_divide(torch.log1p(torch.exp(scaled_input)), beta) # type: ignore[arg-type]
else:
scaled_input = a
rhs = torch.log1p(torch.exp(scaled_input))
return torch.where(scaled_input > threshold, a, rhs)
@register_decomposition(torch.ops.aten.hardshrink)
@out_wrapper()
def hardshrink(a: TensorLikeType, lambd: float = 0.5):
# Formula for reference,
# hardshrink(x) = x if x > lambd
# = x if x < -lambd
# = 0 otherwise
return refs.where(refs.logical_and(a >= -lambd, a <= lambd), 0, a)
@register_decomposition(torch.ops.aten.softshrink)
@out_wrapper()
def softshrink(a: TensorLikeType, lambd: float = 0.5):
# Formula for reference,
# softshrink(x) = x - lambd if x > lambd
# = x + lambd if x < -lambd
# = 0 otherwise
check(
lambd >= 0,
lambda: f"lambda must be greater or equal to 0, but found to be {lambd}",
)
ge_mask = a > lambd
le_mask = a < -lambd
zero_mask = torch.logical_not(refs.logical_or(ge_mask, le_mask))
result = refs.where(ge_mask, a - lambd, a)
result = refs.where(le_mask, a + lambd, result)
return refs.where(zero_mask, 0, result)
# Losses
def _apply_loss_reduction(loss: TensorLikeType, reduction: str) -> TensorLikeType:
if reduction == "sum":
return refs.sum(loss)
elif reduction == "mean":
return refs.mean(loss)
else: # reduction == "none"
return loss
def _check_reduction_value(reduction: str):
if reduction not in ("mean", "sum", "none"):
raise ValueError("{} is not a valid value for reduction".format(reduction))
# This helper function maps depreciated arguments, "size_average" and "reduce"
# to their corresponding "reduction" string argument
def _get_string_reduction_arg(
*, size_average: Optional[bool], reduce: Optional[bool]
) -> str:
if size_average is None:
size_average = True
if reduce is None:
reduce = True
if size_average and reduce:
ret = "mean"
elif reduce:
ret = "sum"
else:
ret = "none"
return ret
@register_decomposition(torch.ops.aten.l1_loss)
def l1_loss(
input: TensorLikeType,
target: TensorLikeType,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> TensorLikeType:
if size_average is not None or reduce is not None:
# TODO: raise exception instead of converting value
# msg = "size_average and reduce args are deprecated, please use reduction argument."
reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
_check_reduction_value(reduction)
loss = torch.abs(input - target)
return _apply_loss_reduction(loss, reduction)
@register_decomposition(torch.ops.aten.margin_ranking_loss)
def margin_ranking_loss(
input1: TensorLikeType,
input2: TensorLikeType,
target: TensorLikeType,
margin: float = 0.0,
reduction: str = "mean",
) -> TensorLikeType:
# Formula of loss (implementation gets confusing with all the refs.foo)
# loss_without_reduction = max(0, −target * (input1 − input2) + margin)
if input1.ndim != input2.ndim or input1.ndim != target.ndim:
raise RuntimeError(
(
"margin_ranking_loss : All input tensors should have same dimension but got sizes: "
"input1: {}, input2: {}, target: {} ".format(
input1.shape, input2.shape, target.shape
)
)
)
_check_reduction_value(reduction)
neg_target = refs.neg(target)
input_diff = refs.sub(input1, input2)
mul_target_input = refs.mul(neg_target, input_diff)
add_margin = refs.add(mul_target_input, margin)
loss = refs.maximum(add_margin, 0)
return _apply_loss_reduction(loss, reduction)
def mse_loss(
input: TensorLikeType,
target: TensorLikeType,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> TensorLikeType:
if size_average is not None or reduce is not None:
# TODO: raise exception instead of converting value
# msg = "size_average and reduce args are deprecated, please use reduction argument."
reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)
_check_reduction_value(reduction)
loss = torch.pow(input - target, 2)
return _apply_loss_reduction(loss, reduction)
@register_decomposition(torch.ops.aten.hinge_embedding_loss)
def hinge_embedding_loss(
input: TensorLikeType,
target: TensorLikeType,
margin: float = 1.0,
reduction: str = "mean",
) -> TensorLikeType:
# Formula of loss (implementation gets confusing with all the refs.foo)
# loss_without_reduction = input if y == 1
# = max(0, margin - input) if y == -1
_check_reduction_value(reduction)
margin_clamp = refs.maximum(refs.sub(margin, input), 0)
output_margin = refs.where(refs.ne(target, 1), margin_clamp, 0)
output_self = refs.where(refs.ne(target, -1), input, 0)
loss = refs.add(output_margin, output_self)
return _apply_loss_reduction(loss, reduction)
# tanhshrink does not use _make_elementwise_unary_reference because it does not support out
@elementwise_unary_scalar_wrapper
@elementwise_type_promotion_wrapper(
type_promoting_args=("a",),
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
)
def tanhshrink(a: TensorLikeType) -> TensorLikeType:
"""
Reference implementation of torch.nn.functional.tanhshrink
"""
if not isinstance(a, TensorLike):
raise RuntimeError(
"Expected a tensor input for an elementwise unary operation!"
)
return refs.sub(a, refs.tanh(a))
@register_decomposition(torch.ops.aten.threshold)
@elementwise_type_promotion_wrapper(
type_promoting_args=("a",),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def threshold(
a: TensorLikeType,
threshold: NumberType,
value: Union[bool, int, float],
inplace: bool = False,
) -> TensorLikeType:
"""
Reference implementation of torch.nn.functional.threshold
"""
if inplace:
raise NotImplementedError
return torch.where(a <= threshold, value, a)
@register_decomposition(torch.ops.aten.hardtanh)
@elementwise_unary_scalar_wrapper
@elementwise_type_promotion_wrapper(
type_promoting_args=("a"),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def hardtanh(
a: TensorLikeType,
min_val: NumberType = -1,
max_val: NumberType = 1,
inplace: bool = False,
) -> TensorLikeType:
"""
Reference implementation of torch.nn.functional.hardtanh
"""
if inplace:
raise NotImplementedError
if utils.is_boolean_dtype(a.dtype):
raise RuntimeError("Bool inputs not supported for hardtanh")
# preserve legacy behavior of boundaries not causing type promotion
if utils.is_integer_dtype(a.dtype):
min_val = int(min_val) # type: ignore[arg-type]
max_val = int(max_val) # type: ignore[arg-type]
if not (a.dtype != torch.uint8 or (min_val >= 0 and max_val >= 0)):
raise RuntimeError(
"Cannot do hardtanh on an unsigned type with negative limits"
)
return torch.clamp(a, min_val, max_val) # type: ignore[arg-type]
@register_decomposition(torch.ops.aten.gelu)
@out_wrapper()
@elementwise_unary_scalar_wrapper
@elementwise_type_promotion_wrapper(
type_promoting_args=("a",),
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def gelu(a: TensorLikeType, approximate: str = "none") -> TensorLikeType:
"""
Reference implementation of torch.nn.functional.gelu
"""
if not isinstance(a, TensorLike):
raise RuntimeError(
"Expected a tensor input for an elementwise unary operation!"
)
M_SQRT2 = 1.41421356237309504880
M_SQRT1_2 = 0.70710678118654752440
M_2_SQRTPI = 1.12837916709551257390
if approximate == "tanh":
kBeta = M_SQRT2 * M_2_SQRTPI * 0.5
kKappa = 0.044715
a_cube = a * a * a
inner = kBeta * (a + kKappa * a_cube)
return 0.5 * a * (1 + torch.tanh(inner))
elif approximate == "none":
kAlpha = M_SQRT1_2
return a * 0.5 * (1 + torch.erf(a * kAlpha))
else:
raise RuntimeError("approximate argument must be either none or tanh.")
@register_decomposition(torch.ops.aten.prelu)
@elementwise_type_promotion_wrapper(
type_promoting_args=("a", "weight"),
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
)
def prelu(a: TensorLikeType, weight: TensorLikeType) -> TensorLikeType:
"""
Reference implementation of torch.nn.functional.prelu
"""
check(
isinstance(a, TensorLike),
lambda: f"prelu: Expected `a` to be tensor, but got: {type(a)}",
)
check(
isinstance(weight, TensorLike),
lambda: f"prelu: Expected `weight` to be tensor, but got: {type(weight)}",
)
if weight.numel() != 1:
check(a.ndim > 0, lambda: "Not allow zero-dim input tensor.")
channel_size = a.shape[1] if a.ndim >= 2 else 1
check(
weight.numel() == channel_size,
lambda: f"Mismatch of parameter numbers and input channel size. Found parameter numbers ="
f" {weight.numel()} and channel size = {channel_size}.",
)
check(
weight.ndim == 0 or weight.ndim == 1,
lambda: f"prelu: Expected `weight` to be a scalar or 1D tensor, but got: "
f"ndim = {weight.ndim}",
)
weight = prims.broadcast_in_dim(
weight, a.shape, tuple() if weight.ndim == 0 else (1,)
)
return refs.where(a > 0, a, a * weight)
@register_decomposition(torch.ops.aten.relu6)
def relu6(a: TensorLikeType, inplace: bool = False) -> TensorLikeType:
"""
Reference implementation of torch.nn.functional.relu6
"""
if inplace:
raise NotImplementedError
# See https://github.com/pytorch/pytorch/pull/81142#discussion_r918220126
# It may be better to use clamp here, but we use hardtanh to replicate
# the behavior of the existing implementation
return refs.nn.functional.hardtanh(a, 0, 6)
| pytorch-master | torch/_refs/nn/functional/__init__.py |
from typing import Optional
import torch
import torch._prims as prims
import torch._prims_common as utils
import torch._refs as refs
from torch import Tensor
from torch._decomp import register_decomposition
from torch._prims_common import ELEMENTWISE_TYPE_PROMOTION_KIND, TensorLikeType
from torch._prims_common.wrappers import elementwise_type_promotion_wrapper, out_wrapper
from torch._refs import (
_make_elementwise_binary_reference,
_make_elementwise_unary_reference,
)
__all__ = [
"i0e",
"i1",
"i1e",
"logit",
"zeta",
]
@_make_elementwise_unary_reference(
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, aten_op=torch.ops.aten.special_i0e
)
def i0e(a):
return prims.bessel_i0e(a)
@_make_elementwise_unary_reference(
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, aten_op=torch.ops.aten.special_i1
)
def i1(a):
return prims.bessel_i1(a)
@_make_elementwise_unary_reference(
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, aten_op=torch.ops.aten.special_i1e
)
def i1e(a):
return prims.bessel_i1e(a)
@register_decomposition(torch.ops.aten.logit)
@out_wrapper()
@elementwise_type_promotion_wrapper(
type_promoting_args=("self",),
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
)
def logit(self: TensorLikeType, eps: Optional[float] = None) -> TensorLikeType:
if eps is None:
eps = -1.0
lo = eps
hi = 1 - eps
self = torch.clamp(self, lo, hi)
return torch.log(torch.true_divide(self, torch.sub(1, self)))
zeta = _make_elementwise_binary_reference(
prims.zeta,
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
aten_op=torch.ops.aten.special_zeta,
)
| pytorch-master | torch/_refs/special/__init__.py |
import sys
import torch
from torch._C import _add_docstr, _fft # type: ignore[attr-defined]
from torch._torch_docs import factory_common_args, common_args
__all__ = ['fft', 'ifft', 'fft2', 'ifft2', 'fftn', 'ifftn',
'rfft', 'irfft', 'rfft2', 'irfft2', 'rfftn', 'irfftn',
'hfft', 'ihfft', 'fftfreq', 'rfftfreq', 'fftshift', 'ifftshift',
'Tensor']
Tensor = torch.Tensor
# Note: This not only adds the doc strings for the spectral ops, but
# connects the torch.fft Python namespace to the torch._C._fft builtins.
fft = _add_docstr(_fft.fft_fft, r"""
fft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor
Computes the one dimensional discrete Fourier transform of :attr:`input`.
Note:
The Fourier domain representation of any real signal satisfies the
Hermitian property: `X[i] = conj(X[-i])`. This function always returns both
the positive and negative frequency terms even though, for real inputs, the
negative frequencies are redundant. :func:`~torch.fft.rfft` returns the
more compact one-sided representation where only the positive frequencies
are returned.
Note:
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
However it only supports powers of 2 signal length in every transformed dimension.
Args:
input (Tensor): the input tensor
n (int, optional): Signal length. If given, the input will either be zero-padded
or trimmed to this length before computing the FFT.
dim (int, optional): The dimension along which to take the one dimensional FFT.
norm (str, optional): Normalization mode. For the forward transform
(:func:`~torch.fft.fft`), these correspond to:
* ``"forward"`` - normalize by ``1/n``
* ``"backward"`` - no normalization
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal)
Calling the backward transform (:func:`~torch.fft.ifft`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.ifft`
the exact inverse.
Default is ``"backward"`` (no normalization).
Keyword args:
{out}
Example:
>>> t = torch.arange(4)
>>> t
tensor([0, 1, 2, 3])
>>> torch.fft.fft(t)
tensor([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j])
>>> t = torch.tensor([0.+1.j, 2.+3.j, 4.+5.j, 6.+7.j])
>>> torch.fft.fft(t)
tensor([12.+16.j, -8.+0.j, -4.-4.j, 0.-8.j])
""".format(**common_args))
ifft = _add_docstr(_fft.fft_ifft, r"""
ifft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor
Computes the one dimensional inverse discrete Fourier transform of :attr:`input`.
Note:
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
However it only supports powers of 2 signal length in every transformed dimension.
Args:
input (Tensor): the input tensor
n (int, optional): Signal length. If given, the input will either be zero-padded
or trimmed to this length before computing the IFFT.
dim (int, optional): The dimension along which to take the one dimensional IFFT.
norm (str, optional): Normalization mode. For the backward transform
(:func:`~torch.fft.ifft`), these correspond to:
* ``"forward"`` - no normalization
* ``"backward"`` - normalize by ``1/n``
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal)
Calling the forward transform (:func:`~torch.fft.fft`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.ifft`
the exact inverse.
Default is ``"backward"`` (normalize by ``1/n``).
Keyword args:
{out}
Example:
>>> t = torch.tensor([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j])
>>> torch.fft.ifft(t)
tensor([0.+0.j, 1.+0.j, 2.+0.j, 3.+0.j])
""".format(**common_args))
fft2 = _add_docstr(_fft.fft_fft2, r"""
fft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor
Computes the 2 dimensional discrete Fourier transform of :attr:`input`.
Equivalent to :func:`~torch.fft.fftn` but FFTs only the last two dimensions by default.
Note:
The Fourier domain representation of any real signal satisfies the
Hermitian property: ``X[i, j] = conj(X[-i, -j])``. This
function always returns all positive and negative frequency terms even
though, for real inputs, half of these values are redundant.
:func:`~torch.fft.rfft2` returns the more compact one-sided representation
where only the positive frequencies of the last dimension are returned.
Note:
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
However it only supports powers of 2 signal length in every transformed dimensions.
Args:
input (Tensor): the input tensor
s (Tuple[int], optional): Signal size in the transformed dimensions.
If given, each dimension ``dim[i]`` will either be zero-padded or
trimmed to the length ``s[i]`` before computing the FFT.
If a length ``-1`` is specified, no padding is done in that dimension.
Default: ``s = [input.size(d) for d in dim]``
dim (Tuple[int], optional): Dimensions to be transformed.
Default: last two dimensions.
norm (str, optional): Normalization mode. For the forward transform
(:func:`~torch.fft.fft2`), these correspond to:
* ``"forward"`` - normalize by ``1/n``
* ``"backward"`` - no normalization
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal)
Where ``n = prod(s)`` is the logical FFT size.
Calling the backward transform (:func:`~torch.fft.ifft2`) with the same
normalization mode will apply an overall normalization of ``1/n``
between the two transforms. This is required to make
:func:`~torch.fft.ifft2` the exact inverse.
Default is ``"backward"`` (no normalization).
Keyword args:
{out}
Example:
>>> x = torch.rand(10, 10, dtype=torch.complex64)
>>> fft2 = torch.fft.fft2(x)
The discrete Fourier transform is separable, so :func:`~torch.fft.fft2`
here is equivalent to two one-dimensional :func:`~torch.fft.fft` calls:
>>> two_ffts = torch.fft.fft(torch.fft.fft(x, dim=0), dim=1)
>>> torch.testing.assert_close(fft2, two_ffts, check_stride=False)
""".format(**common_args))
ifft2 = _add_docstr(_fft.fft_ifft2, r"""
ifft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor
Computes the 2 dimensional inverse discrete Fourier transform of :attr:`input`.
Equivalent to :func:`~torch.fft.ifftn` but IFFTs only the last two dimensions by default.
Note:
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
However it only supports powers of 2 signal length in every transformed dimensions.
Args:
input (Tensor): the input tensor
s (Tuple[int], optional): Signal size in the transformed dimensions.
If given, each dimension ``dim[i]`` will either be zero-padded or
trimmed to the length ``s[i]`` before computing the IFFT.
If a length ``-1`` is specified, no padding is done in that dimension.
Default: ``s = [input.size(d) for d in dim]``
dim (Tuple[int], optional): Dimensions to be transformed.
Default: last two dimensions.
norm (str, optional): Normalization mode. For the backward transform
(:func:`~torch.fft.ifft2`), these correspond to:
* ``"forward"`` - no normalization
* ``"backward"`` - normalize by ``1/n``
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal)
Where ``n = prod(s)`` is the logical IFFT size.
Calling the forward transform (:func:`~torch.fft.fft2`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.ifft2`
the exact inverse.
Default is ``"backward"`` (normalize by ``1/n``).
Keyword args:
{out}
Example:
>>> x = torch.rand(10, 10, dtype=torch.complex64)
>>> ifft2 = torch.fft.ifft2(x)
The discrete Fourier transform is separable, so :func:`~torch.fft.ifft2`
here is equivalent to two one-dimensional :func:`~torch.fft.ifft` calls:
>>> two_iffts = torch.fft.ifft(torch.fft.ifft(x, dim=0), dim=1)
>>> torch.testing.assert_close(ifft2, two_iffts, check_stride=False)
""".format(**common_args))
fftn = _add_docstr(_fft.fft_fftn, r"""
fftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor
Computes the N dimensional discrete Fourier transform of :attr:`input`.
Note:
The Fourier domain representation of any real signal satisfies the
Hermitian property: ``X[i_1, ..., i_n] = conj(X[-i_1, ..., -i_n])``. This
function always returns all positive and negative frequency terms even
though, for real inputs, half of these values are redundant.
:func:`~torch.fft.rfftn` returns the more compact one-sided representation
where only the positive frequencies of the last dimension are returned.
Note:
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
However it only supports powers of 2 signal length in every transformed dimensions.
Args:
input (Tensor): the input tensor
s (Tuple[int], optional): Signal size in the transformed dimensions.
If given, each dimension ``dim[i]`` will either be zero-padded or
trimmed to the length ``s[i]`` before computing the FFT.
If a length ``-1`` is specified, no padding is done in that dimension.
Default: ``s = [input.size(d) for d in dim]``
dim (Tuple[int], optional): Dimensions to be transformed.
Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
norm (str, optional): Normalization mode. For the forward transform
(:func:`~torch.fft.fftn`), these correspond to:
* ``"forward"`` - normalize by ``1/n``
* ``"backward"`` - no normalization
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal)
Where ``n = prod(s)`` is the logical FFT size.
Calling the backward transform (:func:`~torch.fft.ifftn`) with the same
normalization mode will apply an overall normalization of ``1/n``
between the two transforms. This is required to make
:func:`~torch.fft.ifftn` the exact inverse.
Default is ``"backward"`` (no normalization).
Keyword args:
{out}
Example:
>>> x = torch.rand(10, 10, dtype=torch.complex64)
>>> fftn = torch.fft.fftn(x)
The discrete Fourier transform is separable, so :func:`~torch.fft.fftn`
here is equivalent to two one-dimensional :func:`~torch.fft.fft` calls:
>>> two_ffts = torch.fft.fft(torch.fft.fft(x, dim=0), dim=1)
>>> torch.testing.assert_close(fftn, two_ffts, check_stride=False)
""".format(**common_args))
ifftn = _add_docstr(_fft.fft_ifftn, r"""
ifftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor
Computes the N dimensional inverse discrete Fourier transform of :attr:`input`.
Note:
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
However it only supports powers of 2 signal length in every transformed dimensions.
Args:
input (Tensor): the input tensor
s (Tuple[int], optional): Signal size in the transformed dimensions.
If given, each dimension ``dim[i]`` will either be zero-padded or
trimmed to the length ``s[i]`` before computing the IFFT.
If a length ``-1`` is specified, no padding is done in that dimension.
Default: ``s = [input.size(d) for d in dim]``
dim (Tuple[int], optional): Dimensions to be transformed.
Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
norm (str, optional): Normalization mode. For the backward transform
(:func:`~torch.fft.ifftn`), these correspond to:
* ``"forward"`` - no normalization
* ``"backward"`` - normalize by ``1/n``
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal)
Where ``n = prod(s)`` is the logical IFFT size.
Calling the forward transform (:func:`~torch.fft.fftn`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.ifftn`
the exact inverse.
Default is ``"backward"`` (normalize by ``1/n``).
Keyword args:
{out}
Example:
>>> x = torch.rand(10, 10, dtype=torch.complex64)
>>> ifftn = torch.fft.ifftn(x)
The discrete Fourier transform is separable, so :func:`~torch.fft.ifftn`
here is equivalent to two one-dimensional :func:`~torch.fft.ifft` calls:
>>> two_iffts = torch.fft.ifft(torch.fft.ifft(x, dim=0), dim=1)
>>> torch.testing.assert_close(ifftn, two_iffts, check_stride=False)
""".format(**common_args))
rfft = _add_docstr(_fft.fft_rfft, r"""
rfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor
Computes the one dimensional Fourier transform of real-valued :attr:`input`.
The FFT of a real signal is Hermitian-symmetric, ``X[i] = conj(X[-i])`` so
the output contains only the positive frequencies below the Nyquist frequency.
To compute the full output, use :func:`~torch.fft.fft`
Note:
Supports torch.half on CUDA with GPU Architecture SM53 or greater.
However it only supports powers of 2 signal length in every transformed dimension.
Args:
input (Tensor): the real input tensor
n (int, optional): Signal length. If given, the input will either be zero-padded
or trimmed to this length before computing the real FFT.
dim (int, optional): The dimension along which to take the one dimensional real FFT.
norm (str, optional): Normalization mode. For the forward transform
(:func:`~torch.fft.rfft`), these correspond to:
* ``"forward"`` - normalize by ``1/n``
* ``"backward"`` - no normalization
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal)
Calling the backward transform (:func:`~torch.fft.irfft`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.irfft`
the exact inverse.
Default is ``"backward"`` (no normalization).
Keyword args:
{out}
Example:
>>> t = torch.arange(4)
>>> t
tensor([0, 1, 2, 3])
>>> torch.fft.rfft(t)
tensor([ 6.+0.j, -2.+2.j, -2.+0.j])
Compare against the full output from :func:`~torch.fft.fft`:
>>> torch.fft.fft(t)
tensor([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j])
Notice that the symmetric element ``T[-1] == T[1].conj()`` is omitted.
At the Nyquist frequency ``T[-2] == T[2]`` is it's own symmetric pair,
and therefore must always be real-valued.
""".format(**common_args))
irfft = _add_docstr(_fft.fft_irfft, r"""
irfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor
Computes the inverse of :func:`~torch.fft.rfft`.
:attr:`input` is interpreted as a one-sided Hermitian signal in the Fourier
domain, as produced by :func:`~torch.fft.rfft`. By the Hermitian property, the
output will be real-valued.
Note:
Some input frequencies must be real-valued to satisfy the Hermitian
property. In these cases the imaginary component will be ignored.
For example, any imaginary component in the zero-frequency term cannot
be represented in a real output and so will always be ignored.
Note:
The correct interpretation of the Hermitian input depends on the length of
the original data, as given by :attr:`n`. This is because each input shape
could correspond to either an odd or even length signal. By default, the
signal is assumed to be even length and odd signals will not round-trip
properly. So, it is recommended to always pass the signal length :attr:`n`.
Note:
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
However it only supports powers of 2 signal length in every transformed dimension.
With default arguments, size of the transformed dimension should be (2^n + 1) as argument
`n` defaults to even output size = 2 * (transformed_dim_size - 1)
Args:
input (Tensor): the input tensor representing a half-Hermitian signal
n (int, optional): Output signal length. This determines the length of the
output signal. If given, the input will either be zero-padded or trimmed to this
length before computing the real IFFT.
Defaults to even output: ``n=2*(input.size(dim) - 1)``.
dim (int, optional): The dimension along which to take the one dimensional real IFFT.
norm (str, optional): Normalization mode. For the backward transform
(:func:`~torch.fft.irfft`), these correspond to:
* ``"forward"`` - no normalization
* ``"backward"`` - normalize by ``1/n``
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real IFFT orthonormal)
Calling the forward transform (:func:`~torch.fft.rfft`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.irfft`
the exact inverse.
Default is ``"backward"`` (normalize by ``1/n``).
Keyword args:
{out}
Example:
>>> t = torch.linspace(0, 1, 5)
>>> t
tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
>>> T = torch.fft.rfft(t)
>>> T
tensor([ 2.5000+0.0000j, -0.6250+0.8602j, -0.6250+0.2031j])
Without specifying the output length to :func:`~torch.fft.irfft`, the output
will not round-trip properly because the input is odd-length:
>>> torch.fft.irfft(T)
tensor([0.1562, 0.3511, 0.7812, 1.2114])
So, it is recommended to always pass the signal length :attr:`n`:
>>> roundtrip = torch.fft.irfft(T, t.numel())
>>> torch.testing.assert_close(roundtrip, t, check_stride=False)
""".format(**common_args))
rfft2 = _add_docstr(_fft.fft_rfft2, r"""
rfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor
Computes the 2-dimensional discrete Fourier transform of real :attr:`input`.
Equivalent to :func:`~torch.fft.rfftn` but FFTs only the last two dimensions by default.
The FFT of a real signal is Hermitian-symmetric, ``X[i, j] = conj(X[-i, -j])``,
so the full :func:`~torch.fft.fft2` output contains redundant information.
:func:`~torch.fft.rfft2` instead omits the negative frequencies in the last
dimension.
Note:
Supports torch.half on CUDA with GPU Architecture SM53 or greater.
However it only supports powers of 2 signal length in every transformed dimensions.
Args:
input (Tensor): the input tensor
s (Tuple[int], optional): Signal size in the transformed dimensions.
If given, each dimension ``dim[i]`` will either be zero-padded or
trimmed to the length ``s[i]`` before computing the real FFT.
If a length ``-1`` is specified, no padding is done in that dimension.
Default: ``s = [input.size(d) for d in dim]``
dim (Tuple[int], optional): Dimensions to be transformed.
Default: last two dimensions.
norm (str, optional): Normalization mode. For the forward transform
(:func:`~torch.fft.rfft2`), these correspond to:
* ``"forward"`` - normalize by ``1/n``
* ``"backward"`` - no normalization
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real FFT orthonormal)
Where ``n = prod(s)`` is the logical FFT size.
Calling the backward transform (:func:`~torch.fft.irfft2`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.irfft2`
the exact inverse.
Default is ``"backward"`` (no normalization).
Keyword args:
{out}
Example:
>>> t = torch.rand(10, 10)
>>> rfft2 = torch.fft.rfft2(t)
>>> rfft2.size()
torch.Size([10, 6])
Compared against the full output from :func:`~torch.fft.fft2`, we have all
elements up to the Nyquist frequency.
>>> fft2 = torch.fft.fft2(t)
>>> torch.testing.assert_close(fft2[..., :6], rfft2, check_stride=False)
The discrete Fourier transform is separable, so :func:`~torch.fft.rfft2`
here is equivalent to a combination of :func:`~torch.fft.fft` and
:func:`~torch.fft.rfft`:
>>> two_ffts = torch.fft.fft(torch.fft.rfft(t, dim=1), dim=0)
>>> torch.testing.assert_close(rfft2, two_ffts, check_stride=False)
""".format(**common_args))
irfft2 = _add_docstr(_fft.fft_irfft2, r"""
irfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor
Computes the inverse of :func:`~torch.fft.rfft2`.
Equivalent to :func:`~torch.fft.irfftn` but IFFTs only the last two dimensions by default.
:attr:`input` is interpreted as a one-sided Hermitian signal in the Fourier
domain, as produced by :func:`~torch.fft.rfft2`. By the Hermitian property, the
output will be real-valued.
Note:
Some input frequencies must be real-valued to satisfy the Hermitian
property. In these cases the imaginary component will be ignored.
For example, any imaginary component in the zero-frequency term cannot
be represented in a real output and so will always be ignored.
Note:
The correct interpretation of the Hermitian input depends on the length of
the original data, as given by :attr:`s`. This is because each input shape
could correspond to either an odd or even length signal. By default, the
signal is assumed to be even length and odd signals will not round-trip
properly. So, it is recommended to always pass the signal shape :attr:`s`.
Note:
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
However it only supports powers of 2 signal length in every transformed dimensions.
With default arguments, the size of last dimension should be (2^n + 1) as argument
`s` defaults to even output size = 2 * (last_dim_size - 1)
Args:
input (Tensor): the input tensor
s (Tuple[int], optional): Signal size in the transformed dimensions.
If given, each dimension ``dim[i]`` will either be zero-padded or
trimmed to the length ``s[i]`` before computing the real FFT.
If a length ``-1`` is specified, no padding is done in that dimension.
Defaults to even output in the last dimension:
``s[-1] = 2*(input.size(dim[-1]) - 1)``.
dim (Tuple[int], optional): Dimensions to be transformed.
The last dimension must be the half-Hermitian compressed dimension.
Default: last two dimensions.
norm (str, optional): Normalization mode. For the backward transform
(:func:`~torch.fft.irfft2`), these correspond to:
* ``"forward"`` - no normalization
* ``"backward"`` - normalize by ``1/n``
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real IFFT orthonormal)
Where ``n = prod(s)`` is the logical IFFT size.
Calling the forward transform (:func:`~torch.fft.rfft2`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.irfft2`
the exact inverse.
Default is ``"backward"`` (normalize by ``1/n``).
Keyword args:
{out}
Example:
>>> t = torch.rand(10, 9)
>>> T = torch.fft.rfft2(t)
Without specifying the output length to :func:`~torch.fft.irfft2`, the output
will not round-trip properly because the input is odd-length in the last
dimension:
>>> torch.fft.irfft2(T).size()
torch.Size([10, 8])
So, it is recommended to always pass the signal shape :attr:`s`.
>>> roundtrip = torch.fft.irfft2(T, t.size())
>>> roundtrip.size()
torch.Size([10, 9])
>>> torch.testing.assert_close(roundtrip, t, check_stride=False)
""".format(**common_args))
rfftn = _add_docstr(_fft.fft_rfftn, r"""
rfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor
Computes the N-dimensional discrete Fourier transform of real :attr:`input`.
The FFT of a real signal is Hermitian-symmetric,
``X[i_1, ..., i_n] = conj(X[-i_1, ..., -i_n])`` so the full
:func:`~torch.fft.fftn` output contains redundant information.
:func:`~torch.fft.rfftn` instead omits the negative frequencies in the
last dimension.
Note:
Supports torch.half on CUDA with GPU Architecture SM53 or greater.
However it only supports powers of 2 signal length in every transformed dimensions.
Args:
input (Tensor): the input tensor
s (Tuple[int], optional): Signal size in the transformed dimensions.
If given, each dimension ``dim[i]`` will either be zero-padded or
trimmed to the length ``s[i]`` before computing the real FFT.
If a length ``-1`` is specified, no padding is done in that dimension.
Default: ``s = [input.size(d) for d in dim]``
dim (Tuple[int], optional): Dimensions to be transformed.
Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
norm (str, optional): Normalization mode. For the forward transform
(:func:`~torch.fft.rfftn`), these correspond to:
* ``"forward"`` - normalize by ``1/n``
* ``"backward"`` - no normalization
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real FFT orthonormal)
Where ``n = prod(s)`` is the logical FFT size.
Calling the backward transform (:func:`~torch.fft.irfftn`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.irfftn`
the exact inverse.
Default is ``"backward"`` (no normalization).
Keyword args:
{out}
Example:
>>> t = torch.rand(10, 10)
>>> rfftn = torch.fft.rfftn(t)
>>> rfftn.size()
torch.Size([10, 6])
Compared against the full output from :func:`~torch.fft.fftn`, we have all
elements up to the Nyquist frequency.
>>> fftn = torch.fft.fftn(t)
>>> torch.testing.assert_close(fftn[..., :6], rfftn, check_stride=False)
The discrete Fourier transform is separable, so :func:`~torch.fft.rfftn`
here is equivalent to a combination of :func:`~torch.fft.fft` and
:func:`~torch.fft.rfft`:
>>> two_ffts = torch.fft.fft(torch.fft.rfft(t, dim=1), dim=0)
>>> torch.testing.assert_close(rfftn, two_ffts, check_stride=False)
""".format(**common_args))
irfftn = _add_docstr(_fft.fft_irfftn, r"""
irfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor
Computes the inverse of :func:`~torch.fft.rfftn`.
:attr:`input` is interpreted as a one-sided Hermitian signal in the Fourier
domain, as produced by :func:`~torch.fft.rfftn`. By the Hermitian property, the
output will be real-valued.
Note:
Some input frequencies must be real-valued to satisfy the Hermitian
property. In these cases the imaginary component will be ignored.
For example, any imaginary component in the zero-frequency term cannot
be represented in a real output and so will always be ignored.
Note:
The correct interpretation of the Hermitian input depends on the length of
the original data, as given by :attr:`s`. This is because each input shape
could correspond to either an odd or even length signal. By default, the
signal is assumed to be even length and odd signals will not round-trip
properly. So, it is recommended to always pass the signal shape :attr:`s`.
Note:
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
However it only supports powers of 2 signal length in every transformed dimensions.
With default arguments, the size of last dimension should be (2^n + 1) as argument
`s` defaults to even output size = 2 * (last_dim_size - 1)
Args:
input (Tensor): the input tensor
s (Tuple[int], optional): Signal size in the transformed dimensions.
If given, each dimension ``dim[i]`` will either be zero-padded or
trimmed to the length ``s[i]`` before computing the real FFT.
If a length ``-1`` is specified, no padding is done in that dimension.
Defaults to even output in the last dimension:
``s[-1] = 2*(input.size(dim[-1]) - 1)``.
dim (Tuple[int], optional): Dimensions to be transformed.
The last dimension must be the half-Hermitian compressed dimension.
Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
norm (str, optional): Normalization mode. For the backward transform
(:func:`~torch.fft.irfftn`), these correspond to:
* ``"forward"`` - no normalization
* ``"backward"`` - normalize by ``1/n``
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real IFFT orthonormal)
Where ``n = prod(s)`` is the logical IFFT size.
Calling the forward transform (:func:`~torch.fft.rfftn`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.irfftn`
the exact inverse.
Default is ``"backward"`` (normalize by ``1/n``).
Keyword args:
{out}
Example:
>>> t = torch.rand(10, 9)
>>> T = torch.fft.rfftn(t)
Without specifying the output length to :func:`~torch.fft.irfft`, the output
will not round-trip properly because the input is odd-length in the last
dimension:
>>> torch.fft.irfftn(T).size()
torch.Size([10, 8])
So, it is recommended to always pass the signal shape :attr:`s`.
>>> roundtrip = torch.fft.irfftn(T, t.size())
>>> roundtrip.size()
torch.Size([10, 9])
>>> torch.testing.assert_close(roundtrip, t, check_stride=False)
""".format(**common_args))
hfft = _add_docstr(_fft.fft_hfft, r"""
hfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor
Computes the one dimensional discrete Fourier transform of a Hermitian
symmetric :attr:`input` signal.
Note:
:func:`~torch.fft.hfft`/:func:`~torch.fft.ihfft` are analogous to
:func:`~torch.fft.rfft`/:func:`~torch.fft.irfft`. The real FFT expects
a real signal in the time-domain and gives a Hermitian symmetry in the
frequency-domain. The Hermitian FFT is the opposite; Hermitian symmetric in
the time-domain and real-valued in the frequency-domain. For this reason,
special care needs to be taken with the length argument :attr:`n`, in the
same way as with :func:`~torch.fft.irfft`.
Note:
Because the signal is Hermitian in the time-domain, the result will be
real in the frequency domain. Note that some input frequencies must be
real-valued to satisfy the Hermitian property. In these cases the imaginary
component will be ignored. For example, any imaginary component in
``input[0]`` would result in one or more complex frequency terms which
cannot be represented in a real output and so will always be ignored.
Note:
The correct interpretation of the Hermitian input depends on the length of
the original data, as given by :attr:`n`. This is because each input shape
could correspond to either an odd or even length signal. By default, the
signal is assumed to be even length and odd signals will not round-trip
properly. So, it is recommended to always pass the signal length :attr:`n`.
Note:
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
However it only supports powers of 2 signal length in every transformed dimension.
With default arguments, size of the transformed dimension should be (2^n + 1) as argument
`n` defaults to even output size = 2 * (transformed_dim_size - 1)
Args:
input (Tensor): the input tensor representing a half-Hermitian signal
n (int, optional): Output signal length. This determines the length of the
real output. If given, the input will either be zero-padded or trimmed to this
length before computing the Hermitian FFT.
Defaults to even output: ``n=2*(input.size(dim) - 1)``.
dim (int, optional): The dimension along which to take the one dimensional Hermitian FFT.
norm (str, optional): Normalization mode. For the forward transform
(:func:`~torch.fft.hfft`), these correspond to:
* ``"forward"`` - normalize by ``1/n``
* ``"backward"`` - no normalization
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian FFT orthonormal)
Calling the backward transform (:func:`~torch.fft.ihfft`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.ihfft`
the exact inverse.
Default is ``"backward"`` (no normalization).
Keyword args:
{out}
Example:
Taking a real-valued frequency signal and bringing it into the time domain
gives Hermitian symmetric output:
>>> t = torch.linspace(0, 1, 5)
>>> t
tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
>>> T = torch.fft.ifft(t)
>>> T
tensor([ 0.5000-0.0000j, -0.1250-0.1720j, -0.1250-0.0406j, -0.1250+0.0406j,
-0.1250+0.1720j])
Note that ``T[1] == T[-1].conj()`` and ``T[2] == T[-2].conj()`` is
redundant. We can thus compute the forward transform without considering
negative frequencies:
>>> torch.fft.hfft(T[:3], n=5)
tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
Like with :func:`~torch.fft.irfft`, the output length must be given in order
to recover an even length output:
>>> torch.fft.hfft(T[:3])
tensor([0.1250, 0.2809, 0.6250, 0.9691])
""".format(**common_args))
ihfft = _add_docstr(_fft.fft_ihfft, r"""
ihfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor
Computes the inverse of :func:`~torch.fft.hfft`.
:attr:`input` must be a real-valued signal, interpreted in the Fourier domain.
The IFFT of a real signal is Hermitian-symmetric, ``X[i] = conj(X[-i])``.
:func:`~torch.fft.ihfft` represents this in the one-sided form where only the
positive frequencies below the Nyquist frequency are included. To compute the
full output, use :func:`~torch.fft.ifft`.
Note:
Supports torch.half on CUDA with GPU Architecture SM53 or greater.
However it only supports powers of 2 signal length in every transformed dimension.
Args:
input (Tensor): the real input tensor
n (int, optional): Signal length. If given, the input will either be zero-padded
or trimmed to this length before computing the Hermitian IFFT.
dim (int, optional): The dimension along which to take the one dimensional Hermitian IFFT.
norm (str, optional): Normalization mode. For the backward transform
(:func:`~torch.fft.ihfft`), these correspond to:
* ``"forward"`` - no normalization
* ``"backward"`` - normalize by ``1/n``
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal)
Calling the forward transform (:func:`~torch.fft.hfft`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.ihfft`
the exact inverse.
Default is ``"backward"`` (normalize by ``1/n``).
Keyword args:
{out}
Example:
>>> t = torch.arange(5)
>>> t
tensor([0, 1, 2, 3, 4])
>>> torch.fft.ihfft(t)
tensor([ 2.0000-0.0000j, -0.5000-0.6882j, -0.5000-0.1625j])
Compare against the full output from :func:`~torch.fft.ifft`:
>>> torch.fft.ifft(t)
tensor([ 2.0000-0.0000j, -0.5000-0.6882j, -0.5000-0.1625j, -0.5000+0.1625j,
-0.5000+0.6882j])
""".format(**common_args))
hfft2 = _add_docstr(_fft.fft_hfft2, r"""
hfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor
Computes the 2-dimensional discrete Fourier transform of a Hermitian symmetric
:attr:`input` signal. Equivalent to :func:`~torch.fft.hfftn` but only
transforms the last two dimensions by default.
:attr:`input` is interpreted as a one-sided Hermitian signal in the time
domain. By the Hermitian property, the Fourier transform will be real-valued.
Note:
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
However it only supports powers of 2 signal length in every transformed dimensions.
With default arguments, the size of last dimension should be (2^n + 1) as argument
`s` defaults to even output size = 2 * (last_dim_size - 1)
Args:
input (Tensor): the input tensor
s (Tuple[int], optional): Signal size in the transformed dimensions.
If given, each dimension ``dim[i]`` will either be zero-padded or
trimmed to the length ``s[i]`` before computing the Hermitian FFT.
If a length ``-1`` is specified, no padding is done in that dimension.
Defaults to even output in the last dimension:
``s[-1] = 2*(input.size(dim[-1]) - 1)``.
dim (Tuple[int], optional): Dimensions to be transformed.
The last dimension must be the half-Hermitian compressed dimension.
Default: last two dimensions.
norm (str, optional): Normalization mode. For the forward transform
(:func:`~torch.fft.hfft2`), these correspond to:
* ``"forward"`` - normalize by ``1/n``
* ``"backward"`` - no normalization
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian FFT orthonormal)
Where ``n = prod(s)`` is the logical FFT size.
Calling the backward transform (:func:`~torch.fft.ihfft2`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.ihfft2`
the exact inverse.
Default is ``"backward"`` (no normalization).
Keyword args:
{out}
Example:
Starting from a real frequency-space signal, we can generate a
Hermitian-symmetric time-domain signal:
>>> T = torch.rand(10, 9)
>>> t = torch.fft.ihfft2(T)
Without specifying the output length to :func:`~torch.fft.hfftn`, the
output will not round-trip properly because the input is odd-length in the
last dimension:
>>> torch.fft.hfft2(t).size()
torch.Size([10, 10])
So, it is recommended to always pass the signal shape :attr:`s`.
>>> roundtrip = torch.fft.hfft2(t, T.size())
>>> roundtrip.size()
torch.Size([10, 9])
>>> torch.allclose(roundtrip, T)
True
""".format(**common_args))
ihfft2 = _add_docstr(_fft.fft_ihfft2, r"""
ihfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor
Computes the 2-dimensional inverse discrete Fourier transform of real
:attr:`input`. Equivalent to :func:`~torch.fft.ihfftn` but transforms only the
two last dimensions by default.
Note:
Supports torch.half on CUDA with GPU Architecture SM53 or greater.
However it only supports powers of 2 signal length in every transformed dimensions.
Args:
input (Tensor): the input tensor
s (Tuple[int], optional): Signal size in the transformed dimensions.
If given, each dimension ``dim[i]`` will either be zero-padded or
trimmed to the length ``s[i]`` before computing the Hermitian IFFT.
If a length ``-1`` is specified, no padding is done in that dimension.
Default: ``s = [input.size(d) for d in dim]``
dim (Tuple[int], optional): Dimensions to be transformed.
Default: last two dimensions.
norm (str, optional): Normalization mode. For the backward transform
(:func:`~torch.fft.ihfft2`), these correspond to:
* ``"forward"`` - no normalization
* ``"backward"`` - normalize by ``1/n``
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian IFFT orthonormal)
Where ``n = prod(s)`` is the logical IFFT size.
Calling the forward transform (:func:`~torch.fft.hfft2`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.ihfft2`
the exact inverse.
Default is ``"backward"`` (normalize by ``1/n``).
Keyword args:
{out}
Example:
>>> T = torch.rand(10, 10)
>>> t = torch.fft.ihfft2(t)
>>> t.size()
torch.Size([10, 6])
Compared against the full output from :func:`~torch.fft.ifft2`, the
Hermitian time-space signal takes up only half the space.
>>> fftn = torch.fft.ifft2(t)
>>> torch.allclose(fftn[..., :6], rfftn)
True
The discrete Fourier transform is separable, so :func:`~torch.fft.ihfft2`
here is equivalent to a combination of :func:`~torch.fft.ifft` and
:func:`~torch.fft.ihfft`:
>>> two_ffts = torch.fft.ifft(torch.fft.ihfft(t, dim=1), dim=0)
>>> torch.allclose(t, two_ffts)
True
""".format(**common_args))
hfftn = _add_docstr(_fft.fft_hfftn, r"""
hfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor
Computes the n-dimensional discrete Fourier transform of a Herimitian symmetric
:attr:`input` signal.
:attr:`input` is interpreted as a one-sided Hermitian signal in the time
domain. By the Hermitian property, the Fourier transform will be real-valued.
Note:
:func:`~torch.fft.hfftn`/:func:`~torch.fft.ihfftn` are analogous to
:func:`~torch.fft.rfftn`/:func:`~torch.fft.irfftn`. The real FFT expects
a real signal in the time-domain and gives Hermitian symmetry in the
frequency-domain. The Hermitian FFT is the opposite; Hermitian symmetric in
the time-domain and real-valued in the frequency-domain. For this reason,
special care needs to be taken with the shape argument :attr:`s`, in the
same way as with :func:`~torch.fft.irfftn`.
Note:
Some input frequencies must be real-valued to satisfy the Hermitian
property. In these cases the imaginary component will be ignored.
For example, any imaginary component in the zero-frequency term cannot
be represented in a real output and so will always be ignored.
Note:
The correct interpretation of the Hermitian input depends on the length of
the original data, as given by :attr:`s`. This is because each input shape
could correspond to either an odd or even length signal. By default, the
signal is assumed to be even length and odd signals will not round-trip
properly. It is recommended to always pass the signal shape :attr:`s`.
Note:
Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater.
However it only supports powers of 2 signal length in every transformed dimensions.
With default arguments, the size of last dimension should be (2^n + 1) as argument
`s` defaults to even output size = 2 * (last_dim_size - 1)
Args:
input (Tensor): the input tensor
s (Tuple[int], optional): Signal size in the transformed dimensions.
If given, each dimension ``dim[i]`` will either be zero-padded or
trimmed to the length ``s[i]`` before computing the real FFT.
If a length ``-1`` is specified, no padding is done in that dimension.
Defaults to even output in the last dimension:
``s[-1] = 2*(input.size(dim[-1]) - 1)``.
dim (Tuple[int], optional): Dimensions to be transformed.
The last dimension must be the half-Hermitian compressed dimension.
Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
norm (str, optional): Normalization mode. For the forward transform
(:func:`~torch.fft.hfftn`), these correspond to:
* ``"forward"`` - normalize by ``1/n``
* ``"backward"`` - no normalization
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian FFT orthonormal)
Where ``n = prod(s)`` is the logical FFT size.
Calling the backward transform (:func:`~torch.fft.ihfftn`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.ihfftn`
the exact inverse.
Default is ``"backward"`` (no normalization).
Keyword args:
{out}
Example:
Starting from a real frequency-space signal, we can generate a
Hermitian-symmetric time-domain signal:
>>> T = torch.rand(10, 9)
>>> t = torch.fft.ihfftn(T)
Without specifying the output length to :func:`~torch.fft.hfftn`, the
output will not round-trip properly because the input is odd-length in the
last dimension:
>>> torch.fft.hfftn(t).size()
torch.Size([10, 10])
So, it is recommended to always pass the signal shape :attr:`s`.
>>> roundtrip = torch.fft.hfftn(t, T.size())
>>> roundtrip.size()
torch.Size([10, 9])
>>> torch.allclose(roundtrip, T)
True
""".format(**common_args))
ihfftn = _add_docstr(_fft.fft_ihfftn, r"""
ihfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor
Computes the N-dimensional inverse discrete Fourier transform of real :attr:`input`.
:attr:`input` must be a real-valued signal, interpreted in the Fourier domain.
The n-dimensional IFFT of a real signal is Hermitian-symmetric,
``X[i, j, ...] = conj(X[-i, -j, ...])``. :func:`~torch.fft.ihfftn` represents
this in the one-sided form where only the positive frequencies below the
Nyquist frequency are included in the last signal dimension. To compute the
full output, use :func:`~torch.fft.ifftn`.
Note:
Supports torch.half on CUDA with GPU Architecture SM53 or greater.
However it only supports powers of 2 signal length in every transformed dimensions.
Args:
input (Tensor): the input tensor
s (Tuple[int], optional): Signal size in the transformed dimensions.
If given, each dimension ``dim[i]`` will either be zero-padded or
trimmed to the length ``s[i]`` before computing the Hermitian IFFT.
If a length ``-1`` is specified, no padding is done in that dimension.
Default: ``s = [input.size(d) for d in dim]``
dim (Tuple[int], optional): Dimensions to be transformed.
Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
norm (str, optional): Normalization mode. For the backward transform
(:func:`~torch.fft.ihfftn`), these correspond to:
* ``"forward"`` - no normalization
* ``"backward"`` - normalize by ``1/n``
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian IFFT orthonormal)
Where ``n = prod(s)`` is the logical IFFT size.
Calling the forward transform (:func:`~torch.fft.hfftn`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.ihfftn`
the exact inverse.
Default is ``"backward"`` (normalize by ``1/n``).
Keyword args:
{out}
Example:
>>> T = torch.rand(10, 10)
>>> ihfftn = torch.fft.ihfftn(T)
>>> ihfftn.size()
torch.Size([10, 6])
Compared against the full output from :func:`~torch.fft.ifftn`, we have all
elements up to the Nyquist frequency.
>>> ifftn = torch.fft.ifftn(t)
>>> torch.allclose(ifftn[..., :6], ihfftn)
True
The discrete Fourier transform is separable, so :func:`~torch.fft.ihfftn`
here is equivalent to a combination of :func:`~torch.fft.ihfft` and
:func:`~torch.fft.ifft`:
>>> two_iffts = torch.fft.ifft(torch.fft.ihfft(t, dim=1), dim=0)
>>> torch.allclose(ihfftn, two_iffts)
True
""".format(**common_args))
fftfreq = _add_docstr(_fft.fft_fftfreq, r"""
fftfreq(n, d=1.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Computes the discrete Fourier Transform sample frequencies for a signal of size :attr:`n`.
Note:
By convention, :func:`~torch.fft.fft` returns positive frequency terms
first, followed by the negative frequencies in reverse order, so that
``f[-i]`` for all :math:`0 < i \leq n/2`` in Python gives the negative
frequency terms. For an FFT of length :attr:`n` and with inputs spaced in
length unit :attr:`d`, the frequencies are::
f = [0, 1, ..., (n - 1) // 2, -(n // 2), ..., -1] / (d * n)
Note:
For even lengths, the Nyquist frequency at ``f[n/2]`` can be thought of as
either negative or positive. :func:`~torch.fft.fftfreq` follows NumPy's
convention of taking it to be negative.
Args:
n (int): the FFT length
d (float, optional): The sampling length scale.
The spacing between individual samples of the FFT input.
The default assumes unit spacing, dividing that result by the actual
spacing gives the result in physical frequency units.
Keyword Args:
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example:
>>> torch.fft.fftfreq(5)
tensor([ 0.0000, 0.2000, 0.4000, -0.4000, -0.2000])
For even input, we can see the Nyquist frequency at ``f[2]`` is given as
negative:
>>> torch.fft.fftfreq(4)
tensor([ 0.0000, 0.2500, -0.5000, -0.2500])
""".format(**factory_common_args))
rfftfreq = _add_docstr(_fft.fft_rfftfreq, r"""
rfftfreq(n, d=1.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Computes the sample frequencies for :func:`~torch.fft.rfft` with a signal of size :attr:`n`.
Note:
:func:`~torch.fft.rfft` returns Hermitian one-sided output, so only the
positive frequency terms are returned. For a real FFT of length :attr:`n`
and with inputs spaced in length unit :attr:`d`, the frequencies are::
f = torch.arange((n + 1) // 2) / (d * n)
Note:
For even lengths, the Nyquist frequency at ``f[n/2]`` can be thought of as
either negative or positive. Unlike :func:`~torch.fft.fftfreq`,
:func:`~torch.fft.rfftfreq` always returns it as positive.
Args:
n (int): the real FFT length
d (float, optional): The sampling length scale.
The spacing between individual samples of the FFT input.
The default assumes unit spacing, dividing that result by the actual
spacing gives the result in physical frequency units.
Keyword Args:
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example:
>>> torch.fft.rfftfreq(5)
tensor([0.0000, 0.2000, 0.4000])
>>> torch.fft.rfftfreq(4)
tensor([0.0000, 0.2500, 0.5000])
Compared to the output from :func:`~torch.fft.fftfreq`, we see that the
Nyquist frequency at ``f[2]`` has changed sign:
>>> torch.fft.fftfreq(4)
tensor([ 0.0000, 0.2500, -0.5000, -0.2500])
""".format(**factory_common_args))
fftshift = _add_docstr(_fft.fft_fftshift, r"""
fftshift(input, dim=None) -> Tensor
Reorders n-dimensional FFT data, as provided by :func:`~torch.fft.fftn`, to have
negative frequency terms first.
This performs a periodic shift of n-dimensional data such that the origin
``(0, ..., 0)`` is moved to the center of the tensor. Specifically, to
``input.shape[dim] // 2`` in each selected dimension.
Note:
By convention, the FFT returns positive frequency terms first, followed by
the negative frequencies in reverse order, so that ``f[-i]`` for all
:math:`0 < i \leq n/2` in Python gives the negative frequency terms.
:func:`~torch.fft.fftshift` rearranges all frequencies into ascending order
from negative to positive with the zero-frequency term in the center.
Note:
For even lengths, the Nyquist frequency at ``f[n/2]`` can be thought of as
either negative or positive. :func:`~torch.fft.fftshift` always puts the
Nyquist term at the 0-index. This is the same convention used by
:func:`~torch.fft.fftfreq`.
Args:
input (Tensor): the tensor in FFT order
dim (int, Tuple[int], optional): The dimensions to rearrange.
Only dimensions specified here will be rearranged, any other dimensions
will be left in their original order.
Default: All dimensions of :attr:`input`.
Example:
>>> f = torch.fft.fftfreq(4)
>>> f
tensor([ 0.0000, 0.2500, -0.5000, -0.2500])
>>> torch.fft.fftshift(f)
tensor([-0.5000, -0.2500, 0.0000, 0.2500])
Also notice that the Nyquist frequency term at ``f[2]`` was moved to the
beginning of the tensor.
This also works for multi-dimensional transforms:
>>> x = torch.fft.fftfreq(5, d=1/5) + 0.1 * torch.fft.fftfreq(5, d=1/5).unsqueeze(1)
>>> x
tensor([[ 0.0000, 1.0000, 2.0000, -2.0000, -1.0000],
[ 0.1000, 1.1000, 2.1000, -1.9000, -0.9000],
[ 0.2000, 1.2000, 2.2000, -1.8000, -0.8000],
[-0.2000, 0.8000, 1.8000, -2.2000, -1.2000],
[-0.1000, 0.9000, 1.9000, -2.1000, -1.1000]])
>>> torch.fft.fftshift(x)
tensor([[-2.2000, -1.2000, -0.2000, 0.8000, 1.8000],
[-2.1000, -1.1000, -0.1000, 0.9000, 1.9000],
[-2.0000, -1.0000, 0.0000, 1.0000, 2.0000],
[-1.9000, -0.9000, 0.1000, 1.1000, 2.1000],
[-1.8000, -0.8000, 0.2000, 1.2000, 2.2000]])
:func:`~torch.fft.fftshift` can also be useful for spatial data. If our
data is defined on a centered grid (``[-(N//2), (N-1)//2]``) then we can
use the standard FFT defined on an uncentered grid (``[0, N)``) by first
applying an :func:`~torch.fft.ifftshift`.
>>> x_centered = torch.arange(-5, 5)
>>> x_uncentered = torch.fft.ifftshift(x_centered)
>>> fft_uncentered = torch.fft.fft(x_uncentered)
Similarly, we can convert the frequency domain components to centered
convention by applying :func:`~torch.fft.fftshift`.
>>> fft_centered = torch.fft.fftshift(fft_uncentered)
The inverse transform, from centered Fourier space back to centered spatial
data, can be performed by applying the inverse shifts in reverse order:
>>> x_centered_2 = torch.fft.fftshift(torch.fft.ifft(torch.fft.ifftshift(fft_centered)))
>>> torch.testing.assert_close(x_centered.to(torch.complex64), x_centered_2, check_stride=False)
""")
ifftshift = _add_docstr(_fft.fft_ifftshift, r"""
ifftshift(input, dim=None) -> Tensor
Inverse of :func:`~torch.fft.fftshift`.
Args:
input (Tensor): the tensor in FFT order
dim (int, Tuple[int], optional): The dimensions to rearrange.
Only dimensions specified here will be rearranged, any other dimensions
will be left in their original order.
Default: All dimensions of :attr:`input`.
Example:
>>> f = torch.fft.fftfreq(5)
>>> f
tensor([ 0.0000, 0.2000, 0.4000, -0.4000, -0.2000])
A round-trip through :func:`~torch.fft.fftshift` and
:func:`~torch.fft.ifftshift` gives the same result:
>>> shifted = torch.fft.fftshift(f)
>>> torch.fft.ifftshift(shifted)
tensor([ 0.0000, 0.2000, 0.4000, -0.4000, -0.2000])
""")
| pytorch-master | torch/fft/__init__.py |
from contextlib import contextmanager
try:
from torch._C import _itt
except ImportError:
class _ITTStub(object):
@staticmethod
def _fail(*args, **kwargs):
raise RuntimeError("ITT functions not installed. Are you sure you have a ITT build?")
rangePush = _fail
rangePop = _fail
mark = _fail
_itt = _ITTStub() # type: ignore[assignment]
__all__ = ['range_push', 'range_pop', 'mark', 'range']
def range_push(msg):
"""
Arguments:
msg (str): ASCII message to associate with range
"""
return _itt.rangePush(msg)
def range_pop():
"""
"""
return _itt.rangePop()
def mark(msg):
"""
Describe an instantaneous event that occurred at some point.
Arguments:
msg (str): ASCII message to associate with the event.
"""
return _itt.mark(msg)
@contextmanager
def range(msg, *args, **kwargs):
"""
Context manager / decorator that pushes an ITT range at the beginning
of its scope, and pops it at the end. If extra arguments are given,
they are passed as arguments to msg.format().
Args:
msg (str): message to associate with the range
"""
range_push(msg.format(*args, **kwargs))
yield
range_pop()
| pytorch-master | torch/profiler/itt.py |
from collections import deque
import json
import math
import os
import re
from typing import Dict, List, Set
import torch
from torch.profiler import profile
import torch.utils.benchmark as benchmark
from torch.profiler._utils import index_of_first_match
from torch._C._autograd import (_ProfilerEvent, _ExtraFields_TorchOp,
_ExtraFields_PyCCall, _ExtraFields_PyCall,
_EventType)
class Pattern:
'''
Base class for all patterns, subclass this class and implement match()
to define custom patterns.
In subclass, define description and skip property.
'''
def __init__(self, prof: profile, should_benchmark: bool = False):
self.prof = prof
self.should_benchmark = should_benchmark
self.name = "Please specify a name for pattern"
self.description = "Please specify a description for pattern"
self.url = ""
assert prof.profiler is not None and prof.profiler.kineto_results is not None
self.event_tree = prof.profiler.kineto_results.experimental_event_tree(
)
self.tid_root: Dict[int, List[_ProfilerEvent]] = {}
for event in self.event_tree:
self.tid_root.setdefault(event.start_tid, []).append(event)
@property
def skip(self):
return False
def report(self, event: _ProfilerEvent):
msg = f"{self.description}\n[Source Code Location] {source_code_location(event)}"
return msg
def eventTreeTraversal(self):
'''
Traverse the event tree and yield all events.
Override this method in subclass to customize the traversal.
'''
yield from eventTreeDFS(self.event_tree)
def summary(self, events: List[_ProfilerEvent]):
default_summary = f"{self.name}: {len(events)} events matched."
if self.should_benchmark:
# If benchmark summary is not empty, use it.
return self.benchmark_summary(
events) if hasattr( # type: ignore[attr-defined]
self, 'benchmark') else default_summary
return default_summary
def benchmark_summary(self, events: List[_ProfilerEvent]):
def format_time(time_ns: int):
unit_lst = ["ns", "us", "ms"]
for unit in unit_lst:
if time_ns < 1000:
return f"{time_ns:.2f} {unit}"
time_ns //= 1000
return f"{time_ns:.2f} s"
assert hasattr(self, 'benchmark'), 'Please implement benchmark()'
shapes_factor_map = self.benchmark( # type: ignore[attr-defined]
events)
original_time = sum(event.duration_time_ns for event in events)
new_time = sum(shapes_factor_map[input_shapes(event)] *
event.duration_time_ns for event in events)
return (
f"{self.name}: {len(events)} events matched. "
f"Total Estimated Speedup: {format_time(original_time - new_time)} ({round(original_time/new_time, 2)}X)"
)
def match(self, event: _ProfilerEvent):
'''
Return True if the event matches the pattern.
This method should be overriden in subclass.
'''
raise NotImplementedError
def matched_events(self):
if self.skip:
return []
matched_events = []
for event in self.eventTreeTraversal():
if self.match(event):
matched_events.append(event)
return matched_events
def root_of(self, event: _ProfilerEvent):
while event.parent:
event = event.parent
return event
def siblings_of(self, event: _ProfilerEvent):
if event.parent:
children = event.parent.children
else:
children = self.tid_root[event.start_tid]
index = children.index(event)
return children[:index], children[index + 1:]
def next_of(self, event: _ProfilerEvent):
_, next_events = self.siblings_of(event)
return next_events[0] if next_events else None
def prev_of(self, event: _ProfilerEvent):
prev_events, _ = self.siblings_of(event)
return prev_events[-1] if prev_events else None
def go_up_until(self, event: _ProfilerEvent, predicate):
if not event:
return None
while event.parent and not predicate(event):
event = event.parent
return event
# Patterns
class NamePattern(Pattern):
def __init__(self,
prof: profile,
name: str,
should_benchmark: bool = False):
super().__init__(prof, should_benchmark)
self.description = f"Matched Name Event: {name}"
self.name = name
def match(self, event: _ProfilerEvent):
return re.search(self.name, event.name()) is not None
class ExtraCUDACopyPattern(Pattern):
'''
This pattern identifies if we creates a constant tensor on CPU and immediately moves it to GPU.
example: torch.zeros((100, 100)).to("cuda")
Pattern:
build-in method |build-in method
... | aten::to
aten::fill_/aten::zero_ | aten::_to_copy
Algorithm:
We start at node aten::to, go parent events' previous events,
and check if we have a aten::fill_/aten::zero_ as we keep going down the tree.
We always select the last child in the children list when we go down the tree.
If at any step we failed, it is not a match.
'''
def __init__(self, prof: profile, should_benchmark: bool = False):
super().__init__(prof, should_benchmark)
self.name = "Extra CUDA Copy Pattern"
self.description = "Filled a CPU tensor and immediately moved it to GPU. Please initalize it on GPU."
self.url = "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#create-tensors-directly-on-the-target-device"
self.init_ops = {
"aten::fill_", "aten::zero_", "aten::normal_", "aten::uniform_"
}
@property
def skip(self):
return not self.prof.with_stack or not self.prof.record_shapes
def match(self, event):
# TODO: We should also check tensor identities
if event.name() != "aten::to":
return False
to_event = event
if not event.children:
return False
event = event.children[-1]
if event.name() != "aten::_to_copy":
return False
if not event.children:
return False
event = event.children[-1]
if event.name() != "aten::copy_":
return False
# aten::copy_ should have the first 2 args dtype the same
dtypes = input_dtypes(event)
if len(dtypes) < 2:
return False
if dtypes[0] != dtypes[1]:
return False
event = to_event
# Up one level
event = event.parent
if event is None:
return False
# Check if we have a aten::fill_ in previous leaf
event = self.prev_of(event)
if event is None:
return False
while event.children:
event = event.children[-1]
# aten::zero_ is a special optimzation case where fill_ is not called
if event.name() in self.init_ops:
return True
return event.name() in self.init_ops
# TODO: Check if tensor is reused
def benchmark(self, events: List[_ProfilerEvent]):
shapes_factor_map = {input_shapes(event): 0.0 for event in events}
for shape in shapes_factor_map:
size = shape[0]
to_timer = benchmark.Timer(stmt='torch.ones(size).to("cuda")',
globals={'size': size})
de_timer = benchmark.Timer(stmt='torch.ones(size, device="cuda")',
globals={'size': size})
to_time = to_timer.timeit(10).mean
de_time = de_timer.timeit(10).mean
shapes_factor_map[shape] = de_time / to_time
return shapes_factor_map
class ForLoopIndexingPattern(Pattern):
'''
This pattern identifies if we use a for loop to index a tensor that
can be vectorized.
example:
tensor = torch.empty((100, 100))
for i in range(100):
tensor[i] = i
Pattern:
aten::select | ... | aten::select | ... (Repeat)
Algorithm:
We start at node aten::select, and we check if we can find this alternating patterns.
We also keep a dictionary to avoid duplicate match in the for loop.
'''
def __init__(self, prof: profile, should_benchmark: bool = False):
super().__init__(prof, should_benchmark)
self.name = "For Loop Indexing Pattern"
self.description = "For loop indexing detected. Vectorization recommended."
self.visited: Set[int] = set()
def eventTreeTraversal(self):
'''
We need to use BFS traversal order to avoid duplicate match.
'''
yield from eventTreeBFS(self.event_tree)
def match(self, event: _ProfilerEvent):
if event.name() != "aten::select":
return False
if event.id in self.visited:
return False
repeat_count = 1
_, next = self.siblings_of(event)
if len(next) <= 1:
return False
# Custom event list matching
def same_ops(list1, list2):
if len(list1) != len(list2):
return False
for op1, op2 in zip(list1, list2):
if op1.name() != op2.name():
return False
return True
# Record the ops between two aten::select
next_select_idx = index_of_first_match(
next, lambda e: e.name() == "aten::select")
if next_select_idx is None:
return False
indexing_ops = [event] + next[:next_select_idx]
next = next[len(indexing_ops) - 1:]
for i in range(0, len(next), len(indexing_ops)):
if same_ops(indexing_ops, next[i:i + len(indexing_ops)]):
repeat_count += 1
self.visited.add(next[i].id)
else:
break
return repeat_count >= 10
class FP32MatMulPattern(Pattern):
def __init__(self, prof: profile, should_benchmark: bool = False):
super().__init__(prof, should_benchmark)
self.name = "FP32 MatMul Pattern"
self.description = (
"You are currently using GPU that supports TF32. "
"Please enable TF32 by setting 'torch.backends.cuda.matmul.allow_tf32 = True'"
)
self.url = "https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
@property
def skip(self):
# Anything less than sm_80 is not Ampere which doesn't support TF32
has_tf32 = all(
int(arch[3:]) >= 80 for arch in torch.cuda.get_arch_list())
return has_tf32 is False or super().skip or not self.prof.record_shapes
def match(self, event: _ProfilerEvent):
# If we saw this pattern once, we don't need to match it again
if event.tag != _EventType.TorchOp:
return False
assert isinstance(event.extra_fields, _ExtraFields_TorchOp)
if event.name() == "aten::mm":
if event.extra_fields.allow_tf32_cublas is False:
return True
return False
def report(self, event: _ProfilerEvent):
return self.description
def benchmark(self, events: List[_ProfilerEvent]):
shapes_factor_map = {input_shapes(event): 0.0 for event in events}
for shape in shapes_factor_map:
matrixA = torch.randn(shape[0], device="cuda", dtype=torch.float32)
matrixB = torch.randn(shape[1], device="cuda", dtype=torch.float32)
fp32_timer = benchmark.Timer(stmt='torch.mm(matrixA, matrixB)',
globals={
"matrixA": matrixA,
"matrixB": matrixB
})
tf32_timer = benchmark.Timer(
stmt='torch.mm(matrixA, matrixB)',
setup='torch.backends.cuda.matmul.allow_tf32 = True',
globals={
"matrixA": matrixA,
"matrixB": matrixB
})
torch.backends.cuda.matmul.allow_tf32 = False
fp32_time = fp32_timer.timeit(10).mean
tf32_time = tf32_timer.timeit(10).mean
shapes_factor_map[shape] = tf32_time / fp32_time
return shapes_factor_map
class OptimizerSingleTensorPattern(Pattern):
'''
This pattern identifies if we are using the single-tensor version of an optimizer.
example:
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
By adding foreach=True to enable multi-tensor optimizer, we can gain speedup when
the kernels are relatively small.
Pattern:
XXXXX: _single_tenser_<OPTIMIZER_NAME>
Algorithm:
String match
'''
def __init__(self, prof: profile, should_benchmark: bool = False):
super().__init__(prof, should_benchmark)
self.name = "Optimizer Single Tensor Pattern"
self.optimizers_with_foreach = ["adam", "sgd", "adamw"]
self.description = (
"Deteced optimizer running with single tensor implementation. "
"Please enable multi tensor implementation by passing 'foreach=True' into optimizer."
)
self.url = ""
def match(self, event: _ProfilerEvent):
for optimizer in self.optimizers_with_foreach:
if event.name().endswith(f"_single_tensor_{optimizer}"):
return True
return False
class SynchronizedDataLoaderPattern(Pattern):
'''
This pattern identifies if we are using num_workers=0 in DataLoader.
example:
torch.utils.data.DataLoader(dataset, batch_size=batch_size)
Add num_workers=N to the arguments. N depends on system configuration.
Pattern:
dataloader.py(...): __iter__
dataloader.py(...): _get_iterator
NOT dataloader.py(...): check_worker_number_rationality
Algorithm:
If we don't see check_worker_number_rationality call in the dataloader __iter__,
It is not an asynchronous dataloader.
'''
def __init__(self, prof: profile, should_benchmark: bool = False):
super().__init__(prof, should_benchmark)
self.name = "Synchronized DataLoader Pattern"
self.description = (
"Detected DataLoader running with synchronized implementation. "
"Please enable asynchronous dataloading by setting num_workers > 0 when initializing DataLoader."
)
self.url = (
"https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html"
"#enable-async-data-loading-and-augmentation")
def match(self, event: _ProfilerEvent):
def is_dataloader_function(name: str, function_name: str):
return name.startswith(
os.path.join("torch", "utils", "data",
"dataloader.py")) and name.endswith(function_name)
if not is_dataloader_function(event.name(), "__iter__"):
return False
if not event.children:
return False
event = event.children[0]
if not is_dataloader_function(event.name(), "_get_iterator"):
return False
if not event.children:
return False
event = event.children[0]
return not is_dataloader_function(event.name(),
"check_worker_number_rationality")
# TODO: We should also check if the loader is bottleneck.
class GradNotSetToNonePattern(Pattern):
'''
This pattern identifies if we are not setting grad to None in zero_grad.
example:
optimizer.zero_grad()
By setting set_to_none=True, we can gain speedup
Pattern:
XXXXX: _zero_grad
NOT aten::zeros
aten::zero_
aten::zero_ is called on each parameter in the model.
We also want to make sure it is not called by aten::zeros.
Algorithm:
String match
'''
def __init__(self, prof: profile, should_benchmark: bool = False):
super().__init__(prof, should_benchmark)
self.name = "Gradient Set To Zero Instead of None Pattern"
self.description = (
"Detected gradient set to zero instead of None. "
"Please add 'set_to_none=True' when calling zero_grad().")
self.url = (
"https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html"
"#disable-gradient-calculation-for-validation-or-inference")
def match(self, event: _ProfilerEvent):
if not event.name().endswith(": zero_grad"):
return False
if not event.children:
return False
for sub_event in eventTreeDFS(event.children):
if sub_event.name(
) == "aten::zero_" and sub_event.parent.name() != "aten::zeros":
return True
# TODO: We should also check if the optimizer's numerical behavior will change.
return False
class Conv2dBiasFollowedByBatchNorm2dPattern(Pattern):
'''
This pattern identifies if we are enabling bias in Conv2d which is followed by BatchNorm2d.
Bias doesn't do anything when followed by batchnorm.
Pattern:
nn.Module: Conv2d | nn.Module: BatchNorm2d
...
aten::conv2d AND dtype of third argument is not null
The third argument is the bias
Algorithm:
String match
'''
def __init__(self, prof: profile, should_benchmark: bool = False):
super().__init__(prof, should_benchmark)
self.name = "Enabling Bias in Conv2d Followed By BatchNorm Pattern"
self.description = "Detected bias enabled in Conv2d that is followed by BatchNorm2d. Please set 'bias=False' in Conv2d."
self.url = (
"https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html"
"#disable-bias-for-convolutions-directly-followed-by-a-batch-norm")
@property
def skip(self):
return self.prof.record_shapes is False or super().skip
def match(self, event: _ProfilerEvent):
if event.name() != "aten::conv2d":
return False
if len(input_dtypes(event)) < 3 or input_dtypes(event)[2] == "":
return False
# This means bias=True
event = self.go_up_until(
event, lambda e: e.name().startswith("nn.Module: Conv2d"))
if not event:
return False
event = self.next_of(event)
if not event:
return False
return event.name().startswith("nn.Module: BatchNorm2d")
class MatMulDimInFP16Pattern(Pattern):
def __init__(self, prof: profile, should_benchmark: bool = False):
super().__init__(prof, should_benchmark)
self.name = "Matrix Multiplication Dimension Not Aligned Pattern"
self.description = "Detected matmul with dimension not aligned. Please use matmul with aligned dimension."
self.url = "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#use-mixed-precision-and-amp"
@property
def skip(self):
return not self.prof.with_stack or not self.prof.record_shapes
def match(self, event: _ProfilerEvent):
def mutiple_of(shapes, multiple):
return all(dim % multiple == 0 for shape in shapes
for dim in shape[-2:])
if event.name() not in ("aten::mm", "aten::bmm", "aten::addmm"):
return False
if not input_dtypes(event):
return False
arg_dtype = input_dtypes(event)[0]
# TODO: Have a better way to check dtype_size
if (arg_dtype.endswith("c10::BFloat16")
or arg_dtype.endswith("c10::Half")) and not mutiple_of(
input_shapes(event), 8):
return True
return False
def benchmark(self, events: List[_ProfilerEvent]):
def closest_multiple(shapes, multiple):
return [multiple * math.ceil(shape / multiple) for shape in shapes]
shapes_factor_map = {input_shapes(event): 0.0 for event in events}
for shape in shapes_factor_map:
matrixA = torch.randn(shape[0], device="cuda", dtype=torch.float16)
matrixB = torch.randn(shape[1], device="cuda", dtype=torch.float16)
not_aligned_dim_timer = benchmark.Timer(
stmt='torch.mm(matrixA, matrixB)',
globals={
"matrixA": matrixA,
"matrixB": matrixB
})
matrixA = torch.randn(closest_multiple(shape[0], 8),
device="cuda",
dtype=torch.float16)
matrixB = torch.randn(closest_multiple(shape[1], 8),
device="cuda",
dtype=torch.float16)
aligned_dim_timer = benchmark.Timer(
stmt='torch.mm(matrixA, matrixB)',
globals={
"matrixA": matrixA,
"matrixB": matrixB
})
not_aligned_dim_time = not_aligned_dim_timer.timeit(10).mean
aligned_dim_time = aligned_dim_timer.timeit(10).mean
shapes_factor_map[shape] = aligned_dim_time / not_aligned_dim_time
return shapes_factor_map
def source_code_location(event: _ProfilerEvent):
while event:
if event.tag == _EventType.PyCall or event.tag == _EventType.PyCCall:
assert isinstance(event.extra_fields,
_ExtraFields_PyCall) or isinstance(
event.extra_fields, _ExtraFields_PyCCall)
if not event.extra_fields.caller.file_name.startswith("torch" +
os.sep):
return f"{event.extra_fields.caller.file_name}:{event.extra_fields.caller.line_number}"
event = event.parent
return "No source code location found"
def input_shapes(event: _ProfilerEvent):
assert isinstance(event.extra_fields, _ExtraFields_TorchOp)
return tuple([tuple(shape) for shape in event.extra_fields.inputs.shapes])
def input_dtypes(event: _ProfilerEvent):
assert isinstance(event.extra_fields, _ExtraFields_TorchOp)
return tuple(t for t in event.extra_fields.inputs.dtypes)
def eventTreeDFS(event_tree: List[_ProfilerEvent]):
'''
Standard DFS traversal of the event tree.
'''
stack = deque(event_tree)
while stack:
curr_event = stack.pop()
yield curr_event
for child_event in curr_event.children:
stack.append(child_event)
def eventTreeBFS(event_tree: List[_ProfilerEvent]):
'''
Standard BFS traversal of the event tree.
'''
stack = deque(event_tree)
while stack:
curr_event = stack.popleft()
yield curr_event
for child_event in curr_event.children:
stack.append(child_event)
def report_all_anti_patterns(prof,
should_benchmark: bool = False,
print_enable: bool = True,
json_report_dir: str = None):
report_dict: Dict = {}
anti_patterns = [
ExtraCUDACopyPattern(prof, should_benchmark),
# ForLoopIndexingPattern(prof, should_benchmark),
FP32MatMulPattern(prof, should_benchmark),
OptimizerSingleTensorPattern(prof, should_benchmark),
SynchronizedDataLoaderPattern(prof, should_benchmark),
GradNotSetToNonePattern(prof, should_benchmark),
Conv2dBiasFollowedByBatchNorm2dPattern(prof, should_benchmark),
MatMulDimInFP16Pattern(prof, should_benchmark)
]
reported = set()
summaries = []
message_list = [f"{'-'*40}TorchTidy Report{'-'*40}"]
message_list.append("Matched Events:")
for anti_pattern in anti_patterns:
matched_events = anti_pattern.matched_events()
if not matched_events:
continue
summaries.append(anti_pattern.summary(matched_events))
for event in matched_events:
report_msg = anti_pattern.report(event)
if report_msg not in reported:
message_list.append(report_msg)
reported.add(report_msg)
src_location, line_no = source_code_location(event).split(":")
report_dict.setdefault(src_location, []).append({
"line_number": int(line_no),
"name": anti_pattern.name,
"url": anti_pattern.url,
"message": anti_pattern.description,
})
if json_report_dir is not None:
json_report_path = os.path.join(json_report_dir,
"torchtidy_report.json")
if os.path.exists(json_report_path):
with open(json_report_path, "r") as f:
exisiting_report = json.load(f)
exisiting_report.update(report_dict)
report_dict = exisiting_report
with open(json_report_path, "w") as f:
json.dump(report_dict, f, indent=4)
message_list.append("Summary:")
message_list += summaries
message_list.append(f"{'-'*40}TorchTidy Report{'-'*40}")
if print_enable:
print("\n".join(message_list))
| pytorch-master | torch/profiler/_pattern_matcher.py |
r'''
PyTorch Profiler is a tool that allows the collection of performance metrics during training and inference.
Profiler's context manager API can be used to better understand what model operators are the most expensive,
examine their input shapes and stack traces, study device kernel activity and visualize the execution trace.
.. note::
An earlier version of the API in :mod:`torch.autograd` module is considered legacy and will be deprecated.
'''
from .profiler import profile, _KinetoProfile, \
schedule, supported_activities, tensorboard_trace_handler, ProfilerAction, \
_ExperimentalConfig, ExecutionGraphObserver
from torch._C._autograd import ProfilerActivity, kineto_available, _supported_activities, DeviceType
from torch.autograd.profiler import record_function
__all__ = ['profile', 'schedule', 'supported_activities',
'tensorboard_trace_handler', 'ProfilerAction', 'ProfilerActivity',
'kineto_available', 'DeviceType', 'record_function', 'ExecutionGraphObserver']
from . import itt
| pytorch-master | torch/profiler/__init__.py |
import gzip
import json
import os
import tempfile
from enum import Enum
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
from warnings import warn
import torch
import torch.autograd.profiler as prof
from torch._C._autograd import (
_ExperimentalConfig,
_add_execution_graph_observer,
_remove_execution_graph_observer,
_enable_execution_graph_observer,
_disable_execution_graph_observer,
)
from torch.autograd import ProfilerActivity, kineto_available
__all__ = ['supported_activities', 'ProfilerAction', 'schedule', 'tensorboard_trace_handler', 'profile',
'ExecutionGraphObserver']
def supported_activities():
"""
Returns a set of supported profiler tracing activities.
Note: profiler uses CUPTI library to trace on-device CUDA kernels.
In case when CUDA is enabled but CUPTI is not available, passing
``ProfilerActivity.CUDA`` to profiler results in using the legacy CUDA
profiling code (same as in the legacy ``torch.autograd.profiler``).
This, in turn, results in including CUDA time in the profiler table output,
but not in the JSON trace.
"""
return torch.autograd._supported_activities()
class _KinetoProfile(object):
"""Low-level profiler wrap the autograd profile
Args:
activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values:
``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``.
Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA.
record_shapes (bool): save information about operator's input shapes.
profile_memory (bool): track tensor memory allocation/deallocation.
with_stack (bool): record source information (file and line number) for the ops.
with_flops (bool): use formula to estimate the FLOPS of specific operators
(matrix multiplication and 2D convolution).
with_modules (bool): record module hierarchy (including function names)
corresponding to the callstack of the op. e.g. If module A's forward call's
module B's forward which contains an aten::add op,
then aten::add's module hierarchy is A.B
Note that this support exist, at the moment, only for TorchScript models
and not eager mode models.
experimental_config (_ExperimentalConfig) : A set of experimental options
used by profiler libraries like Kineto. Note, backward compatibility is not guaranteed.
.. note::
This API is experimental and subject to change in the future.
Enabling shape and stack tracing results in additional overhead.
When record_shapes=True is specified, profiler will temporarily hold references to the tensors;
that may further prevent certain optimizations that depend on the reference count and introduce
extra tensor copies.
"""
def __init__(
self,
*,
activities: Optional[Iterable[ProfilerActivity]] = None,
record_shapes: bool = False,
profile_memory: bool = False,
with_stack: bool = False,
with_flops: bool = False,
with_modules: bool = False,
experimental_config: Optional[_ExperimentalConfig] = None):
self.activities = set(activities) if activities else supported_activities()
self.record_shapes = record_shapes
self.with_flops = with_flops
self.profile_memory = profile_memory
self.with_stack = with_stack
self.with_modules = with_modules
self.experimental_config = experimental_config
self.profiler: Optional[prof.profile] = None
def start(self):
self.prepare_trace()
self.start_trace()
def stop(self):
self.stop_trace()
def prepare_trace(self):
self.profiler = prof.profile(
use_cuda=(ProfilerActivity.CUDA in self.activities),
use_cpu=(ProfilerActivity.CPU in self.activities),
record_shapes=self.record_shapes,
with_flops=self.with_flops,
profile_memory=self.profile_memory,
with_stack=self.with_stack,
with_modules=self.with_modules,
use_kineto=True,
experimental_config=self.experimental_config,
)
self.profiler._prepare_trace()
def start_trace(self):
assert self.profiler is not None
self.profiler._start_trace()
if kineto_available():
dist_info = self._get_distributed_info()
if dist_info:
self.add_metadata_json("distributedInfo", json.dumps(dist_info))
def stop_trace(self):
assert self.profiler is not None
self.profiler.__exit__(None, None, None)
def export_chrome_trace(self, path: str):
"""
Exports the collected trace in Chrome JSON format.
"""
assert self.profiler
if path.endswith('.gz'):
fp = tempfile.NamedTemporaryFile('w+t', suffix='.json', delete=False)
fp.close()
retvalue = self.profiler.export_chrome_trace(fp.name)
with open(fp.name) as fin:
with gzip.open(path, 'wt') as fout:
fout.writelines(fin)
os.remove(fp.name)
return retvalue
else:
return self.profiler.export_chrome_trace(path)
def export_stacks(self, path: str, metric: str = "self_cpu_time_total"):
"""Save stack traces in a file in a format suitable for visualization.
Args:
path (str): save stacks file to this location;
metric (str): metric to use: "self_cpu_time_total" or "self_cuda_time_total"
.. note::
Example of using FlameGraph tool:
- git clone https://github.com/brendangregg/FlameGraph
- cd FlameGraph
- ./flamegraph.pl --title "CPU time" --countname "us." profiler.stacks > perf_viz.svg
"""
assert self.profiler
return self.profiler.export_stacks(path, metric)
def key_averages(self, group_by_input_shape: bool = False, group_by_stack_n: int = 0):
"""Averages events, grouping them by operator name and (optionally) input shapes and
stack.
.. note::
To use shape/stack functionality make sure to set record_shapes/with_stack
when creating profiler context manager.
"""
assert self.profiler
return self.profiler.key_averages(group_by_input_shape, group_by_stack_n)
def events(self):
"""
Returns the list of unaggregated profiler events,
to be used in the trace callback or after the profiling is finished
"""
assert self.profiler
return self.profiler.function_events
def add_metadata(self, key: str, value: str):
"""
Adds a user defined metadata with a string key and a string value
into the trace file
"""
wrapped_value = "\"" + value.replace('"', '\\"') + "\""
torch.autograd._add_metadata_json(key, wrapped_value)
def add_metadata_json(self, key: str, value: str):
"""
Adds a user defined metadata with a string key and a valid json value
into the trace file
"""
torch.autograd._add_metadata_json(key, value)
def _get_distributed_info(self):
import torch.distributed as dist
if not dist.is_available() or not dist.is_initialized():
return None
return {
"backend": dist.get_backend(),
"rank": dist.get_rank(),
"world_size": dist.get_world_size()
}
class ProfilerAction(Enum):
"""
Profiler actions that can be taken at the specified intervals
"""
NONE = 0
WARMUP = 1
RECORD = 2
RECORD_AND_SAVE = 3
def schedule(*, wait: int, warmup: int, active: int, repeat: int = 0, skip_first: int = 0) -> Callable:
"""
Returns a callable that can be used as profiler ``schedule`` argument. The profiler will skip
the first ``skip_first`` steps, then wait for ``wait`` steps, then do the warmup for the next ``warmup`` steps,
then do the active recording for the next ``active`` steps and then repeat the cycle starting with ``wait`` steps.
The optional number of cycles is specified with the ``repeat`` parameter, the zero value means that
the cycles will continue until the profiling is finished.
"""
def schedule_fn(step: int) -> ProfilerAction:
assert step >= 0
if step < skip_first:
return ProfilerAction.NONE
else:
step -= skip_first
num_steps = wait + warmup + active
if repeat > 0 and step / num_steps >= repeat:
return ProfilerAction.NONE
mod_step = step % num_steps
if mod_step < wait:
return ProfilerAction.NONE
elif mod_step < wait + warmup:
return ProfilerAction.WARMUP
else:
return ProfilerAction.RECORD if mod_step < num_steps - 1 \
else ProfilerAction.RECORD_AND_SAVE
assert wait >= 0 and warmup >= 0 and active > 0 and \
repeat >= 0 and skip_first >= 0, "Invalid profiler schedule arguments"
if warmup == 0:
warn("Profiler won't be using warmup, this can skew profiler results")
return schedule_fn
def _default_schedule_fn(_: int) -> ProfilerAction:
"""
Default profiler behavior - immediately starts recording the events,
keeps doing it on every profiler step.
"""
return ProfilerAction.RECORD
def tensorboard_trace_handler(dir_name: str, worker_name: Optional[str] = None, use_gzip: bool = False):
"""
Outputs tracing files to directory of ``dir_name``, then that directory can be
directly delivered to tensorboard as logdir.
``worker_name`` should be unique for each worker in distributed scenario,
it will be set to '[hostname]_[pid]' by default.
"""
import os
import socket
import time
def handler_fn(prof) -> None:
nonlocal worker_name
if not os.path.isdir(dir_name):
try:
os.makedirs(dir_name, exist_ok=True)
except Exception:
raise RuntimeError("Can't create directory: " + dir_name)
if not worker_name:
worker_name = "{}_{}".format(socket.gethostname(), str(os.getpid()))
file_name = "{}.{}.pt.trace.json".format(worker_name, int(time.time() * 1000))
if use_gzip:
file_name = file_name + '.gz'
prof.export_chrome_trace(os.path.join(dir_name, file_name))
return handler_fn
class profile(_KinetoProfile):
"""Profiler context manager.
Args:
activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values:
``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``.
Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA.
schedule (Callable): callable that takes step (int) as a single parameter and returns
``ProfilerAction`` value that specifies the profiler action to perform at each step.
on_trace_ready (Callable): callable that is called at each step when ``schedule``
returns ``ProfilerAction.RECORD_AND_SAVE`` during the profiling.
record_shapes (bool): save information about operator's input shapes.
profile_memory (bool): track tensor memory allocation/deallocation.
with_stack (bool): record source information (file and line number) for the ops.
with_flops (bool): use formula to estimate the FLOPs (floating point operations) of specific operators
(matrix multiplication and 2D convolution).
with_modules (bool): record module hierarchy (including function names)
corresponding to the callstack of the op. e.g. If module A's forward call's
module B's forward which contains an aten::add op,
then aten::add's module hierarchy is A.B
Note that this support exist, at the moment, only for TorchScript models
and not eager mode models.
experimental_config (_ExperimentalConfig) : A set of experimental options
used for Kineto library features. Note, backward compatibility is not guaranteed.
use_cuda (bool):
.. deprecated:: 1.8.1
use ``activities`` instead.
.. note::
Use :func:`~torch.profiler.schedule` to generate the callable schedule.
Non-default schedules are useful when profiling long training jobs
and allow the user to obtain multiple traces at the different iterations
of the training process.
The default schedule simply records all the events continuously for the
duration of the context manager.
.. note::
Use :func:`~torch.profiler.tensorboard_trace_handler` to generate result files for TensorBoard:
``on_trace_ready=torch.profiler.tensorboard_trace_handler(dir_name)``
After profiling, result files can be found in the specified directory. Use the command:
``tensorboard --logdir dir_name``
to see the results in TensorBoard.
For more information, see
`PyTorch Profiler TensorBoard Plugin <https://github.com/pytorch/kineto/tree/master/tb_plugin>`__
.. note::
Enabling shape and stack tracing results in additional overhead.
When record_shapes=True is specified, profiler will temporarily hold references to the tensors;
that may further prevent certain optimizations that depend on the reference count and introduce
extra tensor copies.
Examples:
.. code-block:: python
with torch.profiler.profile(
activities=[
torch.profiler.ProfilerActivity.CPU,
torch.profiler.ProfilerActivity.CUDA,
]
) as p:
code_to_profile()
print(p.key_averages().table(
sort_by="self_cuda_time_total", row_limit=-1))
Using the profiler's ``schedule``, ``on_trace_ready`` and ``step`` functions:
.. code-block:: python
# Non-default profiler schedule allows user to turn profiler on and off
# on different iterations of the training loop;
# trace_handler is called every time a new trace becomes available
def trace_handler(prof):
print(prof.key_averages().table(
sort_by="self_cuda_time_total", row_limit=-1))
# prof.export_chrome_trace("/tmp/test_trace_" + str(prof.step_num) + ".json")
with torch.profiler.profile(
activities=[
torch.profiler.ProfilerActivity.CPU,
torch.profiler.ProfilerActivity.CUDA,
],
# In this example with wait=1, warmup=1, active=2,
# profiler will skip the first step/iteration,
# start warming up on the second, record
# the third and the forth iterations,
# after which the trace will become available
# and on_trace_ready (when set) is called;
# the cycle repeats starting with the next step
schedule=torch.profiler.schedule(
wait=1,
warmup=1,
active=2),
on_trace_ready=trace_handler
# on_trace_ready=torch.profiler.tensorboard_trace_handler('./log')
# used when outputting for tensorboard
) as p:
for iter in range(N):
code_iteration_to_profile(iter)
# send a signal to the profiler that the next iteration has started
p.step()
"""
def __init__(
self,
*,
activities: Optional[Iterable[ProfilerActivity]] = None,
schedule: Optional[Callable[[int], ProfilerAction]] = None,
on_trace_ready: Optional[Callable[..., Any]] = None,
record_shapes: bool = False,
profile_memory: bool = False,
with_stack: bool = False,
with_flops: bool = False,
with_modules: bool = False,
experimental_config: Optional[_ExperimentalConfig] = None,
# deprecated:
use_cuda: Optional[bool] = None):
activities_set = set(activities) if activities else supported_activities()
if use_cuda is not None:
warn("use_cuda is deprecated, use activities argument instead")
if use_cuda:
activities_set.add(ProfilerActivity.CUDA)
elif ProfilerActivity.CUDA in activities_set:
activities_set.remove(ProfilerActivity.CUDA)
assert len(activities_set) > 0, "No valid profiler activities found"
super().__init__(
activities=activities,
record_shapes=record_shapes,
profile_memory=profile_memory,
with_stack=with_stack,
with_flops=with_flops,
with_modules=with_modules,
experimental_config=experimental_config,
)
if schedule:
self.schedule = schedule
# add step markers into the trace and table view
self.record_steps = True
else:
self.schedule = _default_schedule_fn
self.record_steps = False
self.on_trace_ready = on_trace_ready
self.step_num = 0
self.current_action = self.schedule(self.step_num)
self.step_rec_fn: Optional[prof.record_function] = None
self.action_map: Dict[Tuple[ProfilerAction, Optional[ProfilerAction]], List[Any]] = {
# key is (prev_action, current_action), value is action list corresponding to the state pair.
(ProfilerAction.NONE, ProfilerAction.NONE): [],
(ProfilerAction.NONE, ProfilerAction.WARMUP): [self.prepare_trace],
(ProfilerAction.NONE, ProfilerAction.RECORD): [self.prepare_trace, self.start_trace],
(ProfilerAction.NONE, ProfilerAction.RECORD_AND_SAVE): [self.prepare_trace, self.start_trace],
(ProfilerAction.WARMUP, ProfilerAction.NONE): [
partial(warn, "Incorrect schedule: WARMUP followed by NONE"),
self.start_trace,
self.stop_trace],
(ProfilerAction.WARMUP, ProfilerAction.WARMUP): [],
(ProfilerAction.WARMUP, ProfilerAction.RECORD): [self.start_trace],
(ProfilerAction.WARMUP, ProfilerAction.RECORD_AND_SAVE): [self.start_trace],
(ProfilerAction.RECORD, ProfilerAction.NONE): [
partial(warn, "Incorrect schedule: RECORD followed by NONE"),
self.stop_trace],
(ProfilerAction.RECORD, ProfilerAction.WARMUP): [
partial(warn, "Incorrect schedule: RECORD followed by WARMUP"),
self.stop_trace],
(ProfilerAction.RECORD, ProfilerAction.RECORD): [],
(ProfilerAction.RECORD, ProfilerAction.RECORD_AND_SAVE): [],
(ProfilerAction.RECORD_AND_SAVE, ProfilerAction.NONE): [self.stop_trace, self._trace_ready],
(ProfilerAction.RECORD_AND_SAVE, ProfilerAction.WARMUP): [self.stop_trace, self._trace_ready, self.prepare_trace],
(ProfilerAction.RECORD_AND_SAVE, ProfilerAction.RECORD): [
self.stop_trace,
self._trace_ready,
self.prepare_trace,
self.start_trace],
(ProfilerAction.RECORD_AND_SAVE, ProfilerAction.RECORD_AND_SAVE): [
self.stop_trace,
self._trace_ready,
self.prepare_trace,
self.start_trace],
# used for exit action
(ProfilerAction.WARMUP, None): [self.start_trace, self.stop_trace],
(ProfilerAction.RECORD, None): [self.stop_trace, self._trace_ready],
(ProfilerAction.RECORD_AND_SAVE, None): [self.stop_trace, self._trace_ready]
}
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def start(self):
self._transit_action(ProfilerAction.NONE, self.current_action)
if self.record_steps:
self.step_rec_fn = prof.record_function("ProfilerStep#" + str(self.step_num))
self.step_rec_fn.__enter__()
def stop(self):
if self.record_steps and self.step_rec_fn:
self.step_rec_fn.__exit__(None, None, None)
self._transit_action(self.current_action, None)
def step(self):
"""
Signals the profiler that the next profiling step has started.
"""
if self.record_steps and self.step_rec_fn:
self.step_rec_fn.__exit__(None, None, None)
prev_action = self.current_action
cur_step = self.step_num
self.step_num += 1
self.current_action = self.schedule(self.step_num)
self._transit_action(prev_action, self.current_action)
prof.kineto_step()
if self.record_steps:
self.step_rec_fn = prof.record_function("ProfilerStep#" + str(cur_step))
self.step_rec_fn.__enter__()
def _trace_ready(self):
if self.on_trace_ready:
self.on_trace_ready(self)
def _transit_action(self, prev_action, current_action):
action_list = self.action_map.get((prev_action, current_action))
if action_list:
for action in action_list:
action()
class ExecutionGraphObserver:
"""Execution Graph Observer
Each process can have a single ExecutionGraphObserver instance. The observer
can be added to record function callbacks via calling register_callback()
explicitly. Without calling unregister_callback(), repeated calls to
register_callback() will not add additional observers to record function
callbacks. Once an ExecutionGraphObserver is created, the start() and stop()
methods control when the event data is recorded.
Deleting or calling unregister_callback() will remove the observer from the
record function callbacks, finalize the output file, and will stop
incurring any overheads.
"""
def __init__(self):
"""
Initializes the default states.
"""
self._registered = False
self._execution_graph_running = False
def __del__(self):
"""
Calls unregister_callback() to make sure to finalize outputs.
"""
self.unregister_callback()
def register_callback(self, output_file_path: str):
"""
Adds EG observer to record function callbacks. The the data will be
written to output_file_path.
"""
if not self._registered:
self._output_file_path = output_file_path
self._registered = _add_execution_graph_observer(output_file_path)
def unregister_callback(self):
"""
Removes EG observer from record function callbacks.
"""
if self._registered:
self.stop()
_remove_execution_graph_observer()
self._registered = False
def start(self):
"""
Starts to capture.
"""
if self._registered and not self._execution_graph_running:
_enable_execution_graph_observer()
self._execution_graph_running = True
def stop(self):
"""
Stops to capture.
"""
if self._execution_graph_running:
_disable_execution_graph_observer()
self._execution_graph_running = False
def get_output_file_path(self) -> str:
"""
Returns the output file name.
"""
return self._output_file_path
| pytorch-master | torch/profiler/profiler.py |
import os
import site
import sys
import typing
import torch
def _prefix_regex() -> typing.List[str]:
raw_paths = (
site.getsitepackages() +
sys.path +
[site.getuserbase()] +
[site.getusersitepackages()] +
[os.path.dirname(os.path.dirname(torch.__file__))]
)
path_prefixes = sorted({os.path.abspath(i) for i in raw_paths}, reverse=True)
assert all(isinstance(i, str) for i in path_prefixes)
return [i + os.sep for i in path_prefixes]
| pytorch-master | torch/profiler/python_tracer.py |
from collections import deque
from dataclasses import dataclass
import re
from typing import Dict, List
from torch.profiler import DeviceType
from torch.autograd.profiler import profile
from torch.autograd import _KinetoEvent
@dataclass
class EventMetrics:
duration_time_ns: int = 0
self_time_ns: int = 0
idle_time_ns: int = 0
queue_depth: int = 0
@property
def fraction_idle_time(self):
if self.duration_time_ns == 0:
return 0.0
return self.idle_time_ns / self.duration_time_ns
@dataclass
class Interval:
start: int
end: int
queue_depth: int = 0
class EventKey:
def __init__(self, event):
self.event = event
def __hash__(self):
return hash(self.event.id)
def __eq__(self, other):
return self.event.id == other.event.id
def __repr__(self):
return f"{self.event.name()}"
def intervals_overlap(self, intervals: List[Interval]):
overlap_time = 0
intervals = sorted(intervals, key=lambda x: x.start)
if intervals:
overlap_start = max(self.event.start_time_ns, intervals[0].start)
overlap_end = min(self.event.end_time_ns, intervals[0].end)
if overlap_start < overlap_end:
overlap_time += overlap_end - overlap_start
i, j = 0, 1
while (j < len(intervals)):
prev_interval = intervals[i]
curr_interval = intervals[j]
j += 1
if prev_interval.end > curr_interval.start:
# Completely subsumed by previous interval
if prev_interval.end > curr_interval.end:
j += 1
continue
else:
curr_interval.start = prev_interval.end
i = j
overlap_start = max(self.event.start_time_ns, curr_interval.start)
overlap_end = min(self.event.end_time_ns, curr_interval.end)
if overlap_start < overlap_end:
overlap_time += overlap_end - overlap_start
return overlap_time
class BasicEvaluation:
def __init__(self, prof: profile):
self.profile = prof
self.metrics: Dict[EventKey, EventMetrics] = {}
self.compute_self_time()
self.event_keys = sorted((e for e in self.metrics.keys()),
key=lambda x: x.event.start_time_ns)
self.events = [e.event for e in self.event_keys]
self.cuda_events: List[_KinetoEvent] = []
self.queue_depth_list = self.compute_queue_depth()
self.compute_idle_time()
def compute_self_time(self):
'''
Computes event's self time(total time - time in child ops).
'''
assert (self.profile.kineto_results is not None)
stack = deque(self.profile.kineto_results.experimental_event_tree())
# standard iterating dfs
while stack:
curr_event = stack.pop()
self_time = curr_event.duration_time_ns
for child_event in curr_event.children:
self_time -= child_event.duration_time_ns
stack.append(child_event)
assert EventKey(
curr_event
) not in self.metrics, f"Duplicate id: {curr_event.id}, {curr_event.name()}"
self.metrics[EventKey(curr_event)] = EventMetrics(
self_time_ns=self_time)
self.metrics[EventKey(
curr_event)].duration_time_ns = curr_event.duration_time_ns
def compute_queue_depth(self):
'''
Computes queue_depth at each event. This will calculate the queue depth data for
All the events in the tree.
This will return a list of Interval of queue depth data of cuda launch and kernels.
'''
assert (self.profile.kineto_results is not None)
cuda_event_list = self.profile.kineto_results.events()
def is_cuda_launch_kernel(e):
# TODO: find a better way to identify cudaLaunchKernel
return e.name() == "cudaLaunchKernel"
def is_cuda_kernel(e):
# TODO: find a better way to identify CUDA Kernel
return e.device_type() == DeviceType.CUDA and "mem" not in e.name(
).lower()
cuda_launch_events = sorted(
(e for e in cuda_event_list if is_cuda_launch_kernel(e)),
key=lambda x: x.start_us())
cuda_kernel_events = sorted(
(e for e in cuda_event_list if is_cuda_kernel(e)),
key=lambda x: x.start_us())
self.cuda_events = sorted(cuda_launch_events + cuda_kernel_events,
key=lambda x: x.start_us())
kernel_mapping: Dict[_KinetoEvent, int] = {}
last_mapped_kernel = 0
for cuda_launch_event in cuda_launch_events:
index = index_of_first_match(
cuda_kernel_events,
lambda x: x.linked_correlation_id(
) == cuda_launch_event.linked_correlation_id(),
start=last_mapped_kernel)
kernel_mapping[cuda_launch_event] = index
last_mapped_kernel = index if index is not None else last_mapped_kernel
current_kernel_index = 0
spawned_kernel_index = -1
all_events = cuda_launch_events + cuda_kernel_events + self.events
def new_old_event_comparator(event):
if hasattr(event, "start_us"):
return event.start_us() * 1000
if hasattr(event, "start_time_ns"):
return event.start_time_ns
raise Exception("Unknown Event Type")
queue_depth_list: List[Interval] = []
all_events.sort(key=new_old_event_comparator)
for event in all_events:
# Find latest cuda kernel event
if hasattr(event, "start_us"):
start_time = event.start_us() * 1000
end_time = (event.start_us() + event.duration_us()) * 1000
# Find current spawned cuda kernel event
if event in kernel_mapping and kernel_mapping[
event] is not None:
spawned_kernel_index = kernel_mapping[event]
elif hasattr(event, "start_time_ns"):
start_time = event.start_time_ns # type: ignore[attr-defined]
end_time = event.end_time_ns # type: ignore[attr-defined]
while (current_kernel_index < len(cuda_kernel_events) and
(cuda_kernel_events[current_kernel_index].start_us()) * 1000
<= start_time):
current_kernel_index += 1
current_queue_depth = spawned_kernel_index - current_kernel_index + 1
current_queue_depth = max(current_queue_depth, 0)
if hasattr(event, "start_us"):
queue_depth_list.append(
Interval(start_time, end_time, current_queue_depth))
elif hasattr(event, "start_time_ns"):
self.metrics[EventKey(event)].queue_depth = current_queue_depth
return queue_depth_list
def compute_idle_time(self):
'''
Computes idle time of the profile.
'''
# Based on queue_depth_list, we can calculate idle time for all the events
idle = False
idle_start = 0
idle_intervals: List[Interval] = []
if self.queue_depth_list and self.events:
idle_intervals += [
Interval(self.events[0].start_time_ns,
self.queue_depth_list[0].start),
Interval(self.queue_depth_list[-1].end,
self.events[-1].end_time_ns)
]
for data_point in self.queue_depth_list:
if data_point.queue_depth == 0 and not idle:
idle_start = data_point.end
idle = True
if data_point.queue_depth > 0 and idle:
idle_intervals.append(Interval(idle_start, data_point.start))
idle = False
event_list = [e.event for e in self.metrics.keys()]
for event in event_list:
self.metrics[EventKey(event)].idle_time_ns = EventKey(
event).intervals_overlap(idle_intervals)
def rank_events(self, length):
'''
Filter and Rank the events based on some heuristics:
1) Events that are in the falling phase of the queue depth.
2) Events that have a high idle_time, self_time difference.
Parameters:
length: The number of events to return.
'''
# Find the interval when qd is falling to 0
import torch
queue_depth_list = list(reversed(self.queue_depth_list))
qd_values = [e.queue_depth for e in queue_depth_list]
bottom_threashold = 0
top_threashold = 4
decrease_interval = []
i = 0
while (i < len(qd_values)):
if qd_values[i] > bottom_threashold:
i += 1
continue
for j in range(i + 1, len(qd_values)):
# Find next zero and if the max value between them exceeds
# the threshold, then we have a falling interval
next_minimum_idx = index_of_first_match(
qd_values, lambda x: x <= bottom_threashold, start=j)
peak_idx = argmax(qd_values, start=j, end=next_minimum_idx)
# if is a valid peak, we add to list and continue
if peak_idx is not None and qd_values[
peak_idx] >= top_threashold:
decrease_interval.append(
Interval(queue_depth_list[peak_idx].start,
queue_depth_list[i].start))
i = next_minimum_idx if next_minimum_idx is not None else i
break
i += 1
# Filter out events that are not in the decrease interval
event_list = [
event for event in self.metrics.keys()
if event.intervals_overlap(decrease_interval)
]
if event_list:
self_time = torch.tensor(
[self.metrics[event].self_time_ns for event in event_list],
dtype=torch.float32)
idle_time = torch.tensor([
self.metrics[event].fraction_idle_time for event in event_list
], dtype=torch.float32)
normalized_gain = (idle_time -
torch.mean(idle_time)) / torch.std(idle_time)
normalized_self = (self_time -
torch.mean(self_time)) / torch.std(self_time)
heuristic_score_list = normalized_gain + 0.6 * normalized_self
# Sort events by heuristic
event_list = [
event
for _, event in sorted(zip(heuristic_score_list, event_list),
key=lambda x: x[0],
reverse=True)
]
event_list = event_list[:length]
return event_list
def get_optimizable_events(self,
length: int = 1,
print_enable: bool = True):
event_list = self.rank_events(length)
if not print_enable:
return event_list
output = "Optimizable events:\n" if event_list else "No events to optimize\n"
output += "\n".join([
f"""{'-'*80}
Event: {event}
Source code location: {source_code_location(event.event)}
Percentage idle time: {self.metrics[event].fraction_idle_time * 100:.2f}%
{'-'*80}""" for event in event_list
])
if print_enable:
print(output)
return event_list
def index_of_first_match(seq, predicate, start=0, end=None):
if end is None or end >= len(seq):
end = len(seq)
for i in range(start, end):
if predicate(seq[i]):
return i
return None
def argmax(seq, key=lambda x: x, start=0, end=None):
seq = seq[start:end]
if len(seq) == 0:
return None
return seq.index(max(seq, key=key)) + start
def source_code_location(event):
while (event is not None):
match = re.search(r"\.py\(.*\)", event.name())
if (match is None):
event = event.parent
continue
return event.name()
return "No source code location found"
| pytorch-master | torch/profiler/_utils.py |
# The Tensor classes are added to this module by python_tensor.cpp
from typing import Optional, Tuple, List, Union
import torch
from torch._C import _add_docstr, _sparse # type: ignore[attr-defined]
from torch import Tensor
# A workaround to support both TorchScript and MyPy:
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from torch.types import _dtype as DType
DimOrDims = Optional[Union[int, Tuple[int], List[int]]]
else:
# The JIT doesn't understand Union, nor torch.dtype here
DType = int
DimOrDims = Optional[Tuple[int]]
__all__ = [
'addmm',
'mm',
'sum',
'softmax',
'log_softmax',
]
addmm = _add_docstr(_sparse._sparse_addmm, r"""
sparse.addmm(mat, mat1, mat2, *, beta=1., alpha=1.) -> Tensor
This function does exact same thing as :func:`torch.addmm` in the forward,
except that it supports backward for sparse COO matrix :attr:`mat1`.
When :attr:`mat1` is a COO tensor it must have `sparse_dim = 2`.
When inputs are COO tensors, this function also supports backward for both inputs.
Supports both CSR and COO storage formats.
.. note::
This function doesn't support computing derivaties with respect to CSR matrices.
Args:
mat (Tensor): a dense matrix to be added
mat1 (Tensor): a sparse matrix to be multiplied
mat2 (Tensor): a dense matrix to be multiplied
beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
""")
mm = _add_docstr(_sparse._sparse_mm, r"""
Performs a matrix multiplication of the sparse matrix :attr:`mat1`
and the (sparse or strided) matrix :attr:`mat2`. Similar to :func:`torch.mm`, if :attr:`mat1` is a
:math:`(n \times m)` tensor, :attr:`mat2` is a :math:`(m \times p)` tensor, out will be a
:math:`(n \times p)` tensor.
When :attr:`mat1` is a COO tensor it must have `sparse_dim = 2`.
When inputs are COO tensors, this function also supports backward for both inputs.
Supports both CSR and COO storage formats.
.. note::
This function doesn't support computing derivaties with respect to CSR matrices.
Args:
mat1 (Tensor): the first sparse matrix to be multiplied
mat2 (Tensor): the second matrix to be multiplied, which could be sparse or dense
Shape:
The format of the output tensor of this function follows:
- sparse x sparse -> sparse
- sparse x dense -> dense
Example::
>>> a = torch.randn(2, 3).to_sparse().requires_grad_(True)
>>> a
tensor(indices=tensor([[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]]),
values=tensor([ 1.5901, 0.0183, -0.6146, 1.8061, -0.0112, 0.6302]),
size=(2, 3), nnz=6, layout=torch.sparse_coo, requires_grad=True)
>>> b = torch.randn(3, 2, requires_grad=True)
>>> b
tensor([[-0.6479, 0.7874],
[-1.2056, 0.5641],
[-1.1716, -0.9923]], requires_grad=True)
>>> y = torch.sparse.mm(a, b)
>>> y
tensor([[-0.3323, 1.8723],
[-1.8951, 0.7904]], grad_fn=<SparseAddmmBackward>)
>>> y.sum().backward()
>>> a.grad
tensor(indices=tensor([[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]]),
values=tensor([ 0.1394, -0.6415, -2.1639, 0.1394, -0.6415, -2.1639]),
size=(2, 3), nnz=6, layout=torch.sparse_coo)
""")
sampled_addmm = _add_docstr(_sparse.sparse_sampled_addmm, r"""
sparse.sampled_addmm(input, mat1, mat2, *, beta=1., alpha=1., out=None) -> Tensor
Performs a matrix multiplication of the dense matrices :attr:`mat1` and :attr:`mat2` at the locations
specified by the sparsity pattern of :attr:`input`. The matrix :attr:`input` is added to the final result.
Mathematically this performs the following operation:
.. math::
\text{out} = \alpha\ (\text{mat1} \mathbin{@} \text{mat2})*\text{spy}(\text{input}) + \beta\ \text{input}
where :math:`\text{spy}(\text{input})` is the sparsity pattern matrix of :attr:`input`, :attr:`alpha`
and :attr:`beta` are the scaling factors.
:math:`\text{spy}(\text{input})` has value 1 at the positions where :attr:`input` has non-zero values, and 0 elsewhere.
.. note::
:attr:`input` must be a sparse CSR tensor. :attr:`mat1` and :attr:`mat2` must be dense tensors.
This function is implemented only for tensors on CUDA devices.
Args:
input (Tensor): a sparse CSR matrix of shape `(m, n)` to be added and used to compute
the sampled matrix multiplication
mat1 (Tensor): a dense matrix of shape `(m, k)` to be multiplied
mat2 (Tensor): a dense matrix of shape `(k, n)` to be multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Examples::
>>> input = torch.eye(3, device='cuda').to_sparse_csr()
>>> mat1 = torch.randn(3, 5, device='cuda')
>>> mat2 = torch.randn(5, 3, device='cuda')
>>> torch.sparse.sampled_addmm(input, mat1, mat2)
tensor(crow_indices=tensor([0, 1, 2, 3]),
col_indices=tensor([0, 1, 2]),
values=tensor([ 0.2847, -0.7805, -0.1900]), device='cuda:0',
size=(3, 3), nnz=3, layout=torch.sparse_csr)
>>> torch.sparse.sampled_addmm(input, mat1, mat2).to_dense()
tensor([[ 0.2847, 0.0000, 0.0000],
[ 0.0000, -0.7805, 0.0000],
[ 0.0000, 0.0000, -0.1900]], device='cuda:0')
>>> torch.sparse.sampled_addmm(input, mat1, mat2, beta=0.5, alpha=0.5)
tensor(crow_indices=tensor([0, 1, 2, 3]),
col_indices=tensor([0, 1, 2]),
values=tensor([ 0.1423, -0.3903, -0.0950]), device='cuda:0',
size=(3, 3), nnz=3, layout=torch.sparse_csr)
""")
def sum(input: Tensor, dim: DimOrDims = None,
dtype: Optional[DType] = None) -> Tensor:
r"""
Returns the sum of each row of the sparse tensor :attr:`input` in the given
dimensions :attr:`dim`. If :attr:`dim` is a list of dimensions,
reduce over all of them. When sum over all ``sparse_dim``, this method
returns a dense tensor instead of a sparse tensor.
All summed :attr:`dim` are squeezed (see :func:`torch.squeeze`), resulting an output
tensor having :attr:`dim` fewer dimensions than :attr:`input`.
During backward, only gradients at ``nnz`` locations of :attr:`input`
will propagate back. Note that the gradients of :attr:`input` is coalesced.
Args:
input (Tensor): the input sparse tensor
dim (int or tuple of ints): a dimension or a list of dimensions to reduce. Default: reduce
over all dims.
dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
Default: dtype of :attr:`input`.
Example::
>>> nnz = 3
>>> dims = [5, 5, 2, 3]
>>> I = torch.cat([torch.randint(0, dims[0], size=(nnz,)),
torch.randint(0, dims[1], size=(nnz,))], 0).reshape(2, nnz)
>>> V = torch.randn(nnz, dims[2], dims[3])
>>> size = torch.Size(dims)
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> S = torch.sparse_coo_tensor(I, V, size)
>>> S
tensor(indices=tensor([[2, 0, 3],
[2, 4, 1]]),
values=tensor([[[-0.6438, -1.6467, 1.4004],
[ 0.3411, 0.0918, -0.2312]],
[[ 0.5348, 0.0634, -2.0494],
[-0.7125, -1.0646, 2.1844]],
[[ 0.1276, 0.1874, -0.6334],
[-1.9682, -0.5340, 0.7483]]]),
size=(5, 5, 2, 3), nnz=3, layout=torch.sparse_coo)
# when sum over only part of sparse_dims, return a sparse tensor
>>> torch.sparse.sum(S, [1, 3])
tensor(indices=tensor([[0, 2, 3]]),
values=tensor([[-1.4512, 0.4073],
[-0.8901, 0.2017],
[-0.3183, -1.7539]]),
size=(5, 2), nnz=3, layout=torch.sparse_coo)
# when sum over all sparse dim, return a dense tensor
# with summed dims squeezed
>>> torch.sparse.sum(S, [0, 1, 3])
tensor([-2.6596, -1.1450])
"""
if dtype is None:
if dim is not None:
return torch._sparse_sum(input, dim)
else:
return torch._sparse_sum(input)
else:
if dim is not None:
return torch._sparse_sum(input, dim, dtype=dtype)
else:
return torch._sparse_sum(input, dtype=dtype)
softmax = _add_docstr(_sparse._sparse_softmax, r"""
sparse.softmax(input, dim, *, dtype=None) -> Tensor
Applies a softmax function.
Softmax is defined as:
:math:`\text{Softmax}(x_{i}) = \frac{exp(x_i)}{\sum_j exp(x_j)}`
where :math:`i, j` run over sparse tensor indices and unspecified
entries are ignores. This is equivalent to defining unspecified
entries as negative infinity so that :math:`exp(x_k) = 0` when the
entry with index :math:`k` has not specified.
It is applied to all slices along `dim`, and will re-scale them so
that the elements lie in the range `[0, 1]` and sum to 1.
Args:
input (Tensor): input
dim (int): A dimension along which softmax will be computed.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. This is useful for preventing data type
overflows. Default: None
""")
log_softmax = _add_docstr(_sparse._sparse_log_softmax, r"""
sparse.log_softmax(input, dim, *, dtype=None) -> Tensor
Applies a softmax function followed by logarithm.
See :class:`~torch.sparse.softmax` for more details.
Args:
input (Tensor): input
dim (int): A dimension along which softmax will be computed.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. This is useful for preventing data type
overflows. Default: None
""")
spdiags = _add_docstr(
_sparse._spdiags,
r"""
sparse.spdiags(diagonals, offsets, shape, layout=None) -> Tensor
Creates a sparse 2D tensor by placing the values from rows of
:attr:`diagonals` along specified diagonals of the output
The :attr:`offsets` tensor controls which diagonals are set.
- If :attr:`offsets[i]` = 0, it is the main diagonal
- If :attr:`offsets[i]` < 0, it is below the main diagonal
- If :attr:`offsets[i]` > 0, it is above the main diagonal
The number of rows in :attr:`diagonals` must match the length of :attr:`offsets`,
and an offset may not be repeated.
Args:
diagonals (Tensor): Matrix storing diagonals row-wise
offsets (Tensor): The diagonals to be set, stored as a vector
shape (2-tuple of ints): The desired shape of the result
Keyword args:
layout (:class:`torch.layout`, optional): The desired layout of the
returned tensor. ``torch.sparse_coo``, ``torch.sparse_csc`` and ``torch.sparse_csr``
are supported. Default: ``torch.sparse_coo``
Examples:
Set the main and first two lower diagonals of a matrix::
>>> diags = torch.arange(9).reshape(3, 3)
>>> diags
tensor([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> s = torch.sparse.spdiags(diags, torch.tensor([0, -1, -2]), (3, 3))
>>> s
tensor(indices=tensor([[0, 1, 2, 1, 2, 2],
[0, 1, 2, 0, 1, 0]]),
values=tensor([0, 1, 2, 3, 4, 6]),
size=(3, 3), nnz=6, layout=torch.sparse_coo)
>>> s.to_dense()
tensor([[0, 0, 0],
[3, 1, 0],
[6, 4, 2]])
Change the output layout::
>>> diags = torch.arange(9).reshape(3, 3)
>>> diags
tensor([[0, 1, 2],[3, 4, 5], [6, 7, 8])
>>> s = torch.sparse.spdiags(diags, torch.tensor([0, -1, -2]), (3, 3), layout=torch.sparse_csr)
>>> s
tensor(crow_indices=tensor([0, 1, 3, 6]),
col_indices=tensor([0, 0, 1, 0, 1, 2]),
values=tensor([0, 3, 1, 6, 4, 2]), size=(3, 3), nnz=6,
layout=torch.sparse_csr)
>>> s.to_dense()
tensor([[0, 0, 0],
[3, 1, 0],
[6, 4, 2]])
Set partial diagonals of a large output::
>>> diags = torch.tensor([[1, 2], [3, 4]])
>>> offsets = torch.tensor([0, -1])
>>> torch.sparse.spdiags(diags, offsets, (5, 5)).to_dense()
tensor([[1, 0, 0, 0, 0],
[3, 2, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
.. note::
When setting the values along a given diagonal the index into the diagonal
and the index into the row of :attr:`diagonals` is taken as the
column index in the output. This has the effect that when setting a diagonal
with a positive offset `k` the first value along that diagonal will be
the value in position `k` of the row of :attr:`diagonals`
Specifying a positive offset::
>>> diags = torch.tensor([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
>>> torch.sparse.spdiags(diags, torch.tensor([0, 1, 2]), (5, 5)).to_dense()
tensor([[1, 2, 3, 0, 0],
[0, 2, 3, 0, 0],
[0, 0, 3, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
""")
| pytorch-master | torch/sparse/__init__.py |
pytorch-master | torch/nested/__init__.py |
|
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all
__all__ = ['Laplace']
class Laplace(Distribution):
r"""
Creates a Laplace distribution parameterized by :attr:`loc` and :attr:`scale`.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = Laplace(torch.tensor([0.0]), torch.tensor([1.0]))
>>> m.sample() # Laplace distributed with loc=0, scale=1
tensor([ 0.1046])
Args:
loc (float or Tensor): mean of the distribution
scale (float or Tensor): scale of the distribution
"""
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}
support = constraints.real
has_rsample = True
@property
def mean(self):
return self.loc
@property
def mode(self):
return self.loc
@property
def variance(self):
return 2 * self.scale.pow(2)
@property
def stddev(self):
return (2 ** 0.5) * self.scale
def __init__(self, loc, scale, validate_args=None):
self.loc, self.scale = broadcast_all(loc, scale)
if isinstance(loc, Number) and isinstance(scale, Number):
batch_shape = torch.Size()
else:
batch_shape = self.loc.size()
super(Laplace, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Laplace, _instance)
batch_shape = torch.Size(batch_shape)
new.loc = self.loc.expand(batch_shape)
new.scale = self.scale.expand(batch_shape)
super(Laplace, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
finfo = torch.finfo(self.loc.dtype)
if torch._C._get_tracing_state():
# [JIT WORKAROUND] lack of support for .uniform_()
u = torch.rand(shape, dtype=self.loc.dtype, device=self.loc.device) * 2 - 1
return self.loc - self.scale * u.sign() * torch.log1p(-u.abs().clamp(min=finfo.tiny))
u = self.loc.new(shape).uniform_(finfo.eps - 1, 1)
# TODO: If we ever implement tensor.nextafter, below is what we want ideally.
# u = self.loc.new(shape).uniform_(self.loc.nextafter(-.5, 0), .5)
return self.loc - self.scale * u.sign() * torch.log1p(-u.abs())
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
return -torch.log(2 * self.scale) - torch.abs(value - self.loc) / self.scale
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
return 0.5 - 0.5 * (value - self.loc).sign() * torch.expm1(-(value - self.loc).abs() / self.scale)
def icdf(self, value):
term = value - 0.5
return self.loc - self.scale * (term).sign() * torch.log1p(-2 * term.abs())
def entropy(self):
return 1 + torch.log(2 * self.scale)
| pytorch-master | torch/distributions/laplace.py |
import torch
from numbers import Number
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.transformed_distribution import TransformedDistribution
from torch.distributions.transforms import SigmoidTransform
from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, clamp_probs
__all__ = ['LogitRelaxedBernoulli', 'RelaxedBernoulli']
class LogitRelaxedBernoulli(Distribution):
r"""
Creates a LogitRelaxedBernoulli distribution parameterized by :attr:`probs`
or :attr:`logits` (but not both), which is the logit of a RelaxedBernoulli
distribution.
Samples are logits of values in (0, 1). See [1] for more details.
Args:
temperature (Tensor): relaxation temperature
probs (Number, Tensor): the probability of sampling `1`
logits (Number, Tensor): the log-odds of sampling `1`
[1] The Concrete Distribution: A Continuous Relaxation of Discrete Random
Variables (Maddison et al, 2017)
[2] Categorical Reparametrization with Gumbel-Softmax
(Jang et al, 2017)
"""
arg_constraints = {'probs': constraints.unit_interval,
'logits': constraints.real}
support = constraints.real
def __init__(self, temperature, probs=None, logits=None, validate_args=None):
self.temperature = temperature
if (probs is None) == (logits is None):
raise ValueError("Either `probs` or `logits` must be specified, but not both.")
if probs is not None:
is_scalar = isinstance(probs, Number)
self.probs, = broadcast_all(probs)
else:
is_scalar = isinstance(logits, Number)
self.logits, = broadcast_all(logits)
self._param = self.probs if probs is not None else self.logits
if is_scalar:
batch_shape = torch.Size()
else:
batch_shape = self._param.size()
super(LogitRelaxedBernoulli, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(LogitRelaxedBernoulli, _instance)
batch_shape = torch.Size(batch_shape)
new.temperature = self.temperature
if 'probs' in self.__dict__:
new.probs = self.probs.expand(batch_shape)
new._param = new.probs
if 'logits' in self.__dict__:
new.logits = self.logits.expand(batch_shape)
new._param = new.logits
super(LogitRelaxedBernoulli, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def _new(self, *args, **kwargs):
return self._param.new(*args, **kwargs)
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
@property
def param_shape(self):
return self._param.size()
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
probs = clamp_probs(self.probs.expand(shape))
uniforms = clamp_probs(torch.rand(shape, dtype=probs.dtype, device=probs.device))
return (uniforms.log() - (-uniforms).log1p() + probs.log() - (-probs).log1p()) / self.temperature
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
logits, value = broadcast_all(self.logits, value)
diff = logits - value.mul(self.temperature)
return self.temperature.log() + diff - 2 * diff.exp().log1p()
class RelaxedBernoulli(TransformedDistribution):
r"""
Creates a RelaxedBernoulli distribution, parametrized by
:attr:`temperature`, and either :attr:`probs` or :attr:`logits`
(but not both). This is a relaxed version of the `Bernoulli` distribution,
so the values are in (0, 1), and has reparametrizable samples.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = RelaxedBernoulli(torch.tensor([2.2]),
... torch.tensor([0.1, 0.2, 0.3, 0.99]))
>>> m.sample()
tensor([ 0.2951, 0.3442, 0.8918, 0.9021])
Args:
temperature (Tensor): relaxation temperature
probs (Number, Tensor): the probability of sampling `1`
logits (Number, Tensor): the log-odds of sampling `1`
"""
arg_constraints = {'probs': constraints.unit_interval,
'logits': constraints.real}
support = constraints.unit_interval
has_rsample = True
def __init__(self, temperature, probs=None, logits=None, validate_args=None):
base_dist = LogitRelaxedBernoulli(temperature, probs, logits)
super(RelaxedBernoulli, self).__init__(base_dist,
SigmoidTransform(),
validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(RelaxedBernoulli, _instance)
return super(RelaxedBernoulli, self).expand(batch_shape, _instance=new)
@property
def temperature(self):
return self.base_dist.temperature
@property
def logits(self):
return self.base_dist.logits
@property
def probs(self):
return self.base_dist.probs
| pytorch-master | torch/distributions/relaxed_bernoulli.py |
import torch
from torch._six import nan
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import probs_to_logits, logits_to_probs, lazy_property
__all__ = ['Categorical']
class Categorical(Distribution):
r"""
Creates a categorical distribution parameterized by either :attr:`probs` or
:attr:`logits` (but not both).
.. note::
It is equivalent to the distribution that :func:`torch.multinomial`
samples from.
Samples are integers from :math:`\{0, \ldots, K-1\}` where `K` is ``probs.size(-1)``.
If `probs` is 1-dimensional with length-`K`, each element is the relative probability
of sampling the class at that index.
If `probs` is N-dimensional, the first N-1 dimensions are treated as a batch of
relative probability vectors.
.. note:: The `probs` argument must be non-negative, finite and have a non-zero sum,
and it will be normalized to sum to 1 along the last dimension. :attr:`probs`
will return this normalized value.
The `logits` argument will be interpreted as unnormalized log probabilities
and can therefore be any real number. It will likewise be normalized so that
the resulting probabilities sum to 1 along the last dimension. :attr:`logits`
will return this normalized value.
See also: :func:`torch.multinomial`
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = Categorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ]))
>>> m.sample() # equal probability of 0, 1, 2, 3
tensor(3)
Args:
probs (Tensor): event probabilities
logits (Tensor): event log probabilities (unnormalized)
"""
arg_constraints = {'probs': constraints.simplex,
'logits': constraints.real_vector}
has_enumerate_support = True
def __init__(self, probs=None, logits=None, validate_args=None):
if (probs is None) == (logits is None):
raise ValueError("Either `probs` or `logits` must be specified, but not both.")
if probs is not None:
if probs.dim() < 1:
raise ValueError("`probs` parameter must be at least one-dimensional.")
self.probs = probs / probs.sum(-1, keepdim=True)
else:
if logits.dim() < 1:
raise ValueError("`logits` parameter must be at least one-dimensional.")
# Normalize
self.logits = logits - logits.logsumexp(dim=-1, keepdim=True)
self._param = self.probs if probs is not None else self.logits
self._num_events = self._param.size()[-1]
batch_shape = self._param.size()[:-1] if self._param.ndimension() > 1 else torch.Size()
super(Categorical, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Categorical, _instance)
batch_shape = torch.Size(batch_shape)
param_shape = batch_shape + torch.Size((self._num_events,))
if 'probs' in self.__dict__:
new.probs = self.probs.expand(param_shape)
new._param = new.probs
if 'logits' in self.__dict__:
new.logits = self.logits.expand(param_shape)
new._param = new.logits
new._num_events = self._num_events
super(Categorical, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def _new(self, *args, **kwargs):
return self._param.new(*args, **kwargs)
@constraints.dependent_property(is_discrete=True, event_dim=0)
def support(self):
return constraints.integer_interval(0, self._num_events - 1)
@lazy_property
def logits(self):
return probs_to_logits(self.probs)
@lazy_property
def probs(self):
return logits_to_probs(self.logits)
@property
def param_shape(self):
return self._param.size()
@property
def mean(self):
return torch.full(self._extended_shape(), nan, dtype=self.probs.dtype, device=self.probs.device)
@property
def mode(self):
return self.probs.argmax(axis=-1)
@property
def variance(self):
return torch.full(self._extended_shape(), nan, dtype=self.probs.dtype, device=self.probs.device)
def sample(self, sample_shape=torch.Size()):
if not isinstance(sample_shape, torch.Size):
sample_shape = torch.Size(sample_shape)
probs_2d = self.probs.reshape(-1, self._num_events)
samples_2d = torch.multinomial(probs_2d, sample_shape.numel(), True).T
return samples_2d.reshape(self._extended_shape(sample_shape))
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
value = value.long().unsqueeze(-1)
value, log_pmf = torch.broadcast_tensors(value, self.logits)
value = value[..., :1]
return log_pmf.gather(-1, value).squeeze(-1)
def entropy(self):
min_real = torch.finfo(self.logits.dtype).min
logits = torch.clamp(self.logits, min=min_real)
p_log_p = logits * self.probs
return -p_log_p.sum(-1)
def enumerate_support(self, expand=True):
num_events = self._num_events
values = torch.arange(num_events, dtype=torch.long, device=self._param.device)
values = values.view((-1,) + (1,) * len(self._batch_shape))
if expand:
values = values.expand((-1,) + self._batch_shape)
return values
| pytorch-master | torch/distributions/categorical.py |
import functools
import math
import numbers
import operator
import weakref
from typing import List
import torch
import torch.nn.functional as F
from torch.distributions import constraints
from torch.distributions.utils import (_sum_rightmost, broadcast_all,
lazy_property, tril_matrix_to_vec,
vec_to_tril_matrix)
from torch.nn.functional import pad
from torch.nn.functional import softplus
__all__ = [
'AbsTransform',
'AffineTransform',
'CatTransform',
'ComposeTransform',
'CorrCholeskyTransform',
'CumulativeDistributionTransform',
'ExpTransform',
'IndependentTransform',
'LowerCholeskyTransform',
'PowerTransform',
'ReshapeTransform',
'SigmoidTransform',
'SoftplusTransform',
'TanhTransform',
'SoftmaxTransform',
'StackTransform',
'StickBreakingTransform',
'Transform',
'identity_transform',
]
class Transform(object):
"""
Abstract class for invertable transformations with computable log
det jacobians. They are primarily used in
:class:`torch.distributions.TransformedDistribution`.
Caching is useful for transforms whose inverses are either expensive or
numerically unstable. Note that care must be taken with memoized values
since the autograd graph may be reversed. For example while the following
works with or without caching::
y = t(x)
t.log_abs_det_jacobian(x, y).backward() # x will receive gradients.
However the following will error when caching due to dependency reversal::
y = t(x)
z = t.inv(y)
grad(z.sum(), [y]) # error because z is x
Derived classes should implement one or both of :meth:`_call` or
:meth:`_inverse`. Derived classes that set `bijective=True` should also
implement :meth:`log_abs_det_jacobian`.
Args:
cache_size (int): Size of cache. If zero, no caching is done. If one,
the latest single value is cached. Only 0 and 1 are supported.
Attributes:
domain (:class:`~torch.distributions.constraints.Constraint`):
The constraint representing valid inputs to this transform.
codomain (:class:`~torch.distributions.constraints.Constraint`):
The constraint representing valid outputs to this transform
which are inputs to the inverse transform.
bijective (bool): Whether this transform is bijective. A transform
``t`` is bijective iff ``t.inv(t(x)) == x`` and
``t(t.inv(y)) == y`` for every ``x`` in the domain and ``y`` in
the codomain. Transforms that are not bijective should at least
maintain the weaker pseudoinverse properties
``t(t.inv(t(x)) == t(x)`` and ``t.inv(t(t.inv(y))) == t.inv(y)``.
sign (int or Tensor): For bijective univariate transforms, this
should be +1 or -1 depending on whether transform is monotone
increasing or decreasing.
"""
bijective = False
domain: constraints.Constraint
codomain: constraints.Constraint
def __init__(self, cache_size=0):
self._cache_size = cache_size
self._inv = None
if cache_size == 0:
pass # default behavior
elif cache_size == 1:
self._cached_x_y = None, None
else:
raise ValueError('cache_size must be 0 or 1')
super(Transform, self).__init__()
def __getstate__(self):
state = self.__dict__.copy()
state["_inv"] = None
return state
@property
def event_dim(self):
if self.domain.event_dim == self.codomain.event_dim:
return self.domain.event_dim
raise ValueError("Please use either .domain.event_dim or .codomain.event_dim")
@property
def inv(self):
"""
Returns the inverse :class:`Transform` of this transform.
This should satisfy ``t.inv.inv is t``.
"""
inv = None
if self._inv is not None:
inv = self._inv()
if inv is None:
inv = _InverseTransform(self)
self._inv = weakref.ref(inv)
return inv
@property
def sign(self):
"""
Returns the sign of the determinant of the Jacobian, if applicable.
In general this only makes sense for bijective transforms.
"""
raise NotImplementedError
def with_cache(self, cache_size=1):
if self._cache_size == cache_size:
return self
if type(self).__init__ is Transform.__init__:
return type(self)(cache_size=cache_size)
raise NotImplementedError("{}.with_cache is not implemented".format(type(self)))
def __eq__(self, other):
return self is other
def __ne__(self, other):
# Necessary for Python2
return not self.__eq__(other)
def __call__(self, x):
"""
Computes the transform `x => y`.
"""
if self._cache_size == 0:
return self._call(x)
x_old, y_old = self._cached_x_y
if x is x_old:
return y_old
y = self._call(x)
self._cached_x_y = x, y
return y
def _inv_call(self, y):
"""
Inverts the transform `y => x`.
"""
if self._cache_size == 0:
return self._inverse(y)
x_old, y_old = self._cached_x_y
if y is y_old:
return x_old
x = self._inverse(y)
self._cached_x_y = x, y
return x
def _call(self, x):
"""
Abstract method to compute forward transformation.
"""
raise NotImplementedError
def _inverse(self, y):
"""
Abstract method to compute inverse transformation.
"""
raise NotImplementedError
def log_abs_det_jacobian(self, x, y):
"""
Computes the log det jacobian `log |dy/dx|` given input and output.
"""
raise NotImplementedError
def __repr__(self):
return self.__class__.__name__ + '()'
def forward_shape(self, shape):
"""
Infers the shape of the forward computation, given the input shape.
Defaults to preserving shape.
"""
return shape
def inverse_shape(self, shape):
"""
Infers the shapes of the inverse computation, given the output shape.
Defaults to preserving shape.
"""
return shape
class _InverseTransform(Transform):
"""
Inverts a single :class:`Transform`.
This class is private; please instead use the ``Transform.inv`` property.
"""
def __init__(self, transform: Transform):
super(_InverseTransform, self).__init__(cache_size=transform._cache_size)
self._inv: Transform = transform
@constraints.dependent_property(is_discrete=False)
def domain(self):
assert self._inv is not None
return self._inv.codomain
@constraints.dependent_property(is_discrete=False)
def codomain(self):
assert self._inv is not None
return self._inv.domain
@property
def bijective(self):
assert self._inv is not None
return self._inv.bijective
@property
def sign(self):
assert self._inv is not None
return self._inv.sign
@property
def inv(self):
return self._inv
def with_cache(self, cache_size=1):
assert self._inv is not None
return self.inv.with_cache(cache_size).inv
def __eq__(self, other):
if not isinstance(other, _InverseTransform):
return False
assert self._inv is not None
return self._inv == other._inv
def __repr__(self):
return f"{self.__class__.__name__}({repr(self._inv)})"
def __call__(self, x):
assert self._inv is not None
return self._inv._inv_call(x)
def log_abs_det_jacobian(self, x, y):
assert self._inv is not None
return -self._inv.log_abs_det_jacobian(y, x)
def forward_shape(self, shape):
return self._inv.inverse_shape(shape)
def inverse_shape(self, shape):
return self._inv.forward_shape(shape)
class ComposeTransform(Transform):
"""
Composes multiple transforms in a chain.
The transforms being composed are responsible for caching.
Args:
parts (list of :class:`Transform`): A list of transforms to compose.
cache_size (int): Size of cache. If zero, no caching is done. If one,
the latest single value is cached. Only 0 and 1 are supported.
"""
def __init__(self, parts: List[Transform], cache_size=0):
if cache_size:
parts = [part.with_cache(cache_size) for part in parts]
super(ComposeTransform, self).__init__(cache_size=cache_size)
self.parts = parts
def __eq__(self, other):
if not isinstance(other, ComposeTransform):
return False
return self.parts == other.parts
@constraints.dependent_property(is_discrete=False)
def domain(self):
if not self.parts:
return constraints.real
domain = self.parts[0].domain
# Adjust event_dim to be maximum among all parts.
event_dim = self.parts[-1].codomain.event_dim
for part in reversed(self.parts):
event_dim += part.domain.event_dim - part.codomain.event_dim
event_dim = max(event_dim, part.domain.event_dim)
assert event_dim >= domain.event_dim
if event_dim > domain.event_dim:
domain = constraints.independent(domain, event_dim - domain.event_dim)
return domain
@constraints.dependent_property(is_discrete=False)
def codomain(self):
if not self.parts:
return constraints.real
codomain = self.parts[-1].codomain
# Adjust event_dim to be maximum among all parts.
event_dim = self.parts[0].domain.event_dim
for part in self.parts:
event_dim += part.codomain.event_dim - part.domain.event_dim
event_dim = max(event_dim, part.codomain.event_dim)
assert event_dim >= codomain.event_dim
if event_dim > codomain.event_dim:
codomain = constraints.independent(codomain, event_dim - codomain.event_dim)
return codomain
@lazy_property
def bijective(self):
return all(p.bijective for p in self.parts)
@lazy_property
def sign(self):
sign = 1
for p in self.parts:
sign = sign * p.sign
return sign
@property
def inv(self):
inv = None
if self._inv is not None:
inv = self._inv()
if inv is None:
inv = ComposeTransform([p.inv for p in reversed(self.parts)])
self._inv = weakref.ref(inv)
inv._inv = weakref.ref(self)
return inv
def with_cache(self, cache_size=1):
if self._cache_size == cache_size:
return self
return ComposeTransform(self.parts, cache_size=cache_size)
def __call__(self, x):
for part in self.parts:
x = part(x)
return x
def log_abs_det_jacobian(self, x, y):
if not self.parts:
return torch.zeros_like(x)
# Compute intermediates. This will be free if parts[:-1] are all cached.
xs = [x]
for part in self.parts[:-1]:
xs.append(part(xs[-1]))
xs.append(y)
terms = []
event_dim = self.domain.event_dim
for part, x, y in zip(self.parts, xs[:-1], xs[1:]):
terms.append(_sum_rightmost(part.log_abs_det_jacobian(x, y),
event_dim - part.domain.event_dim))
event_dim += part.codomain.event_dim - part.domain.event_dim
return functools.reduce(operator.add, terms)
def forward_shape(self, shape):
for part in self.parts:
shape = part.forward_shape(shape)
return shape
def inverse_shape(self, shape):
for part in reversed(self.parts):
shape = part.inverse_shape(shape)
return shape
def __repr__(self):
fmt_string = self.__class__.__name__ + '(\n '
fmt_string += ',\n '.join([p.__repr__() for p in self.parts])
fmt_string += '\n)'
return fmt_string
identity_transform = ComposeTransform([])
class IndependentTransform(Transform):
"""
Wrapper around another transform to treat
``reinterpreted_batch_ndims``-many extra of the right most dimensions as
dependent. This has no effect on the forward or backward transforms, but
does sum out ``reinterpreted_batch_ndims``-many of the rightmost dimensions
in :meth:`log_abs_det_jacobian`.
Args:
base_transform (:class:`Transform`): A base transform.
reinterpreted_batch_ndims (int): The number of extra rightmost
dimensions to treat as dependent.
"""
def __init__(self, base_transform, reinterpreted_batch_ndims, cache_size=0):
super().__init__(cache_size=cache_size)
self.base_transform = base_transform.with_cache(cache_size)
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
def with_cache(self, cache_size=1):
if self._cache_size == cache_size:
return self
return IndependentTransform(self.base_transform,
self.reinterpreted_batch_ndims,
cache_size=cache_size)
@constraints.dependent_property(is_discrete=False)
def domain(self):
return constraints.independent(self.base_transform.domain,
self.reinterpreted_batch_ndims)
@constraints.dependent_property(is_discrete=False)
def codomain(self):
return constraints.independent(self.base_transform.codomain,
self.reinterpreted_batch_ndims)
@property
def bijective(self):
return self.base_transform.bijective
@property
def sign(self):
return self.base_transform.sign
def _call(self, x):
if x.dim() < self.domain.event_dim:
raise ValueError("Too few dimensions on input")
return self.base_transform(x)
def _inverse(self, y):
if y.dim() < self.codomain.event_dim:
raise ValueError("Too few dimensions on input")
return self.base_transform.inv(y)
def log_abs_det_jacobian(self, x, y):
result = self.base_transform.log_abs_det_jacobian(x, y)
result = _sum_rightmost(result, self.reinterpreted_batch_ndims)
return result
def __repr__(self):
return f"{self.__class__.__name__}({repr(self.base_transform)}, {self.reinterpreted_batch_ndims})"
def forward_shape(self, shape):
return self.base_transform.forward_shape(shape)
def inverse_shape(self, shape):
return self.base_transform.inverse_shape(shape)
class ReshapeTransform(Transform):
"""
Unit Jacobian transform to reshape the rightmost part of a tensor.
Note that ``in_shape`` and ``out_shape`` must have the same number of
elements, just as for :meth:`torch.Tensor.reshape`.
Arguments:
in_shape (torch.Size): The input event shape.
out_shape (torch.Size): The output event shape.
"""
bijective = True
def __init__(self, in_shape, out_shape, cache_size=0):
self.in_shape = torch.Size(in_shape)
self.out_shape = torch.Size(out_shape)
if self.in_shape.numel() != self.out_shape.numel():
raise ValueError("in_shape, out_shape have different numbers of elements")
super().__init__(cache_size=cache_size)
@constraints.dependent_property
def domain(self):
return constraints.independent(constraints.real, len(self.in_shape))
@constraints.dependent_property
def codomain(self):
return constraints.independent(constraints.real, len(self.out_shape))
def with_cache(self, cache_size=1):
if self._cache_size == cache_size:
return self
return ReshapeTransform(self.in_shape, self.out_shape, cache_size=cache_size)
def _call(self, x):
batch_shape = x.shape[:x.dim() - len(self.in_shape)]
return x.reshape(batch_shape + self.out_shape)
def _inverse(self, y):
batch_shape = y.shape[:y.dim() - len(self.out_shape)]
return y.reshape(batch_shape + self.in_shape)
def log_abs_det_jacobian(self, x, y):
batch_shape = x.shape[:x.dim() - len(self.in_shape)]
return x.new_zeros(batch_shape)
def forward_shape(self, shape):
if len(shape) < len(self.in_shape):
raise ValueError("Too few dimensions on input")
cut = len(shape) - len(self.in_shape)
if shape[cut:] != self.in_shape:
raise ValueError("Shape mismatch: expected {} but got {}".format(shape[cut:], self.in_shape))
return shape[:cut] + self.out_shape
def inverse_shape(self, shape):
if len(shape) < len(self.out_shape):
raise ValueError("Too few dimensions on input")
cut = len(shape) - len(self.out_shape)
if shape[cut:] != self.out_shape:
raise ValueError("Shape mismatch: expected {} but got {}".format(shape[cut:], self.out_shape))
return shape[:cut] + self.in_shape
class ExpTransform(Transform):
r"""
Transform via the mapping :math:`y = \exp(x)`.
"""
domain = constraints.real
codomain = constraints.positive
bijective = True
sign = +1
def __eq__(self, other):
return isinstance(other, ExpTransform)
def _call(self, x):
return x.exp()
def _inverse(self, y):
return y.log()
def log_abs_det_jacobian(self, x, y):
return x
class PowerTransform(Transform):
r"""
Transform via the mapping :math:`y = x^{\text{exponent}}`.
"""
domain = constraints.positive
codomain = constraints.positive
bijective = True
sign = +1
def __init__(self, exponent, cache_size=0):
super(PowerTransform, self).__init__(cache_size=cache_size)
self.exponent, = broadcast_all(exponent)
def with_cache(self, cache_size=1):
if self._cache_size == cache_size:
return self
return PowerTransform(self.exponent, cache_size=cache_size)
def __eq__(self, other):
if not isinstance(other, PowerTransform):
return False
return self.exponent.eq(other.exponent).all().item()
def _call(self, x):
return x.pow(self.exponent)
def _inverse(self, y):
return y.pow(1 / self.exponent)
def log_abs_det_jacobian(self, x, y):
return (self.exponent * y / x).abs().log()
def forward_shape(self, shape):
return torch.broadcast_shapes(shape, getattr(self.exponent, "shape", ()))
def inverse_shape(self, shape):
return torch.broadcast_shapes(shape, getattr(self.exponent, "shape", ()))
def _clipped_sigmoid(x):
finfo = torch.finfo(x.dtype)
return torch.clamp(torch.sigmoid(x), min=finfo.tiny, max=1. - finfo.eps)
class SigmoidTransform(Transform):
r"""
Transform via the mapping :math:`y = \frac{1}{1 + \exp(-x)}` and :math:`x = \text{logit}(y)`.
"""
domain = constraints.real
codomain = constraints.unit_interval
bijective = True
sign = +1
def __eq__(self, other):
return isinstance(other, SigmoidTransform)
def _call(self, x):
return _clipped_sigmoid(x)
def _inverse(self, y):
finfo = torch.finfo(y.dtype)
y = y.clamp(min=finfo.tiny, max=1. - finfo.eps)
return y.log() - (-y).log1p()
def log_abs_det_jacobian(self, x, y):
return -F.softplus(-x) - F.softplus(x)
class SoftplusTransform(Transform):
r"""
Transform via the mapping :math:`\text{Softplus}(x) = \log(1 + \exp(x))`.
The implementation reverts to the linear function when :math:`x > 20`.
"""
domain = constraints.real
codomain = constraints.positive
bijective = True
sign = +1
def __eq__(self, other):
return isinstance(other, SoftplusTransform)
def _call(self, x):
return softplus(x)
def _inverse(self, y):
return (-y).expm1().neg().log() + y
def log_abs_det_jacobian(self, x, y):
return -softplus(-x)
class TanhTransform(Transform):
r"""
Transform via the mapping :math:`y = \tanh(x)`.
It is equivalent to
```
ComposeTransform([AffineTransform(0., 2.), SigmoidTransform(), AffineTransform(-1., 2.)])
```
However this might not be numerically stable, thus it is recommended to use `TanhTransform`
instead.
Note that one should use `cache_size=1` when it comes to `NaN/Inf` values.
"""
domain = constraints.real
codomain = constraints.interval(-1.0, 1.0)
bijective = True
sign = +1
def __eq__(self, other):
return isinstance(other, TanhTransform)
def _call(self, x):
return x.tanh()
def _inverse(self, y):
# We do not clamp to the boundary here as it may degrade the performance of certain algorithms.
# one should use `cache_size=1` instead
return torch.atanh(y)
def log_abs_det_jacobian(self, x, y):
# We use a formula that is more numerically stable, see details in the following link
# https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/bijectors/tanh.py#L69-L80
return 2. * (math.log(2.) - x - softplus(-2. * x))
class AbsTransform(Transform):
r"""
Transform via the mapping :math:`y = |x|`.
"""
domain = constraints.real
codomain = constraints.positive
def __eq__(self, other):
return isinstance(other, AbsTransform)
def _call(self, x):
return x.abs()
def _inverse(self, y):
return y
class AffineTransform(Transform):
r"""
Transform via the pointwise affine mapping :math:`y = \text{loc} + \text{scale} \times x`.
Args:
loc (Tensor or float): Location parameter.
scale (Tensor or float): Scale parameter.
event_dim (int): Optional size of `event_shape`. This should be zero
for univariate random variables, 1 for distributions over vectors,
2 for distributions over matrices, etc.
"""
bijective = True
def __init__(self, loc, scale, event_dim=0, cache_size=0):
super(AffineTransform, self).__init__(cache_size=cache_size)
self.loc = loc
self.scale = scale
self._event_dim = event_dim
@property
def event_dim(self):
return self._event_dim
@constraints.dependent_property(is_discrete=False)
def domain(self):
if self.event_dim == 0:
return constraints.real
return constraints.independent(constraints.real, self.event_dim)
@constraints.dependent_property(is_discrete=False)
def codomain(self):
if self.event_dim == 0:
return constraints.real
return constraints.independent(constraints.real, self.event_dim)
def with_cache(self, cache_size=1):
if self._cache_size == cache_size:
return self
return AffineTransform(self.loc, self.scale, self.event_dim, cache_size=cache_size)
def __eq__(self, other):
if not isinstance(other, AffineTransform):
return False
if isinstance(self.loc, numbers.Number) and isinstance(other.loc, numbers.Number):
if self.loc != other.loc:
return False
else:
if not (self.loc == other.loc).all().item():
return False
if isinstance(self.scale, numbers.Number) and isinstance(other.scale, numbers.Number):
if self.scale != other.scale:
return False
else:
if not (self.scale == other.scale).all().item():
return False
return True
@property
def sign(self):
if isinstance(self.scale, numbers.Real):
return 1 if float(self.scale) > 0 else -1 if float(self.scale) < 0 else 0
return self.scale.sign()
def _call(self, x):
return self.loc + self.scale * x
def _inverse(self, y):
return (y - self.loc) / self.scale
def log_abs_det_jacobian(self, x, y):
shape = x.shape
scale = self.scale
if isinstance(scale, numbers.Real):
result = torch.full_like(x, math.log(abs(scale)))
else:
result = torch.abs(scale).log()
if self.event_dim:
result_size = result.size()[:-self.event_dim] + (-1,)
result = result.view(result_size).sum(-1)
shape = shape[:-self.event_dim]
return result.expand(shape)
def forward_shape(self, shape):
return torch.broadcast_shapes(shape,
getattr(self.loc, "shape", ()),
getattr(self.scale, "shape", ()))
def inverse_shape(self, shape):
return torch.broadcast_shapes(shape,
getattr(self.loc, "shape", ()),
getattr(self.scale, "shape", ()))
class CorrCholeskyTransform(Transform):
r"""
Transforms an uncontrained real vector :math:`x` with length :math:`D*(D-1)/2` into the
Cholesky factor of a D-dimension correlation matrix. This Cholesky factor is a lower
triangular matrix with positive diagonals and unit Euclidean norm for each row.
The transform is processed as follows:
1. First we convert x into a lower triangular matrix in row order.
2. For each row :math:`X_i` of the lower triangular part, we apply a *signed* version of
class :class:`StickBreakingTransform` to transform :math:`X_i` into a
unit Euclidean length vector using the following steps:
- Scales into the interval :math:`(-1, 1)` domain: :math:`r_i = \tanh(X_i)`.
- Transforms into an unsigned domain: :math:`z_i = r_i^2`.
- Applies :math:`s_i = StickBreakingTransform(z_i)`.
- Transforms back into signed domain: :math:`y_i = sign(r_i) * \sqrt{s_i}`.
"""
domain = constraints.real_vector
codomain = constraints.corr_cholesky
bijective = True
def _call(self, x):
x = torch.tanh(x)
eps = torch.finfo(x.dtype).eps
x = x.clamp(min=-1 + eps, max=1 - eps)
r = vec_to_tril_matrix(x, diag=-1)
# apply stick-breaking on the squared values
# Note that y = sign(r) * sqrt(z * z1m_cumprod)
# = (sign(r) * sqrt(z)) * sqrt(z1m_cumprod) = r * sqrt(z1m_cumprod)
z = r ** 2
z1m_cumprod_sqrt = (1 - z).sqrt().cumprod(-1)
# Diagonal elements must be 1.
r = r + torch.eye(r.shape[-1], dtype=r.dtype, device=r.device)
y = r * pad(z1m_cumprod_sqrt[..., :-1], [1, 0], value=1)
return y
def _inverse(self, y):
# inverse stick-breaking
# See: https://mc-stan.org/docs/2_18/reference-manual/cholesky-factors-of-correlation-matrices-1.html
y_cumsum = 1 - torch.cumsum(y * y, dim=-1)
y_cumsum_shifted = pad(y_cumsum[..., :-1], [1, 0], value=1)
y_vec = tril_matrix_to_vec(y, diag=-1)
y_cumsum_vec = tril_matrix_to_vec(y_cumsum_shifted, diag=-1)
t = y_vec / (y_cumsum_vec).sqrt()
# inverse of tanh
x = ((1 + t) / (1 - t)).log() / 2
return x
def log_abs_det_jacobian(self, x, y, intermediates=None):
# Because domain and codomain are two spaces with different dimensions, determinant of
# Jacobian is not well-defined. We return `log_abs_det_jacobian` of `x` and the
# flattened lower triangular part of `y`.
# See: https://mc-stan.org/docs/2_18/reference-manual/cholesky-factors-of-correlation-matrices-1.html
y1m_cumsum = 1 - (y * y).cumsum(dim=-1)
# by taking diagonal=-2, we don't need to shift z_cumprod to the right
# also works for 2 x 2 matrix
y1m_cumsum_tril = tril_matrix_to_vec(y1m_cumsum, diag=-2)
stick_breaking_logdet = 0.5 * (y1m_cumsum_tril).log().sum(-1)
tanh_logdet = -2 * (x + softplus(-2 * x) - math.log(2.)).sum(dim=-1)
return stick_breaking_logdet + tanh_logdet
def forward_shape(self, shape):
# Reshape from (..., N) to (..., D, D).
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
N = shape[-1]
D = round((0.25 + 2 * N) ** 0.5 + 0.5)
if D * (D - 1) // 2 != N:
raise ValueError("Input is not a flattend lower-diagonal number")
return shape[:-1] + (D, D)
def inverse_shape(self, shape):
# Reshape from (..., D, D) to (..., N).
if len(shape) < 2:
raise ValueError("Too few dimensions on input")
if shape[-2] != shape[-1]:
raise ValueError("Input is not square")
D = shape[-1]
N = D * (D - 1) // 2
return shape[:-2] + (N,)
class SoftmaxTransform(Transform):
r"""
Transform from unconstrained space to the simplex via :math:`y = \exp(x)` then
normalizing.
This is not bijective and cannot be used for HMC. However this acts mostly
coordinate-wise (except for the final normalization), and thus is
appropriate for coordinate-wise optimization algorithms.
"""
domain = constraints.real_vector
codomain = constraints.simplex
def __eq__(self, other):
return isinstance(other, SoftmaxTransform)
def _call(self, x):
logprobs = x
probs = (logprobs - logprobs.max(-1, True)[0]).exp()
return probs / probs.sum(-1, True)
def _inverse(self, y):
probs = y
return probs.log()
def forward_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return shape
def inverse_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return shape
class StickBreakingTransform(Transform):
"""
Transform from unconstrained space to the simplex of one additional
dimension via a stick-breaking process.
This transform arises as an iterated sigmoid transform in a stick-breaking
construction of the `Dirichlet` distribution: the first logit is
transformed via sigmoid to the first probability and the probability of
everything else, and then the process recurses.
This is bijective and appropriate for use in HMC; however it mixes
coordinates together and is less appropriate for optimization.
"""
domain = constraints.real_vector
codomain = constraints.simplex
bijective = True
def __eq__(self, other):
return isinstance(other, StickBreakingTransform)
def _call(self, x):
offset = x.shape[-1] + 1 - x.new_ones(x.shape[-1]).cumsum(-1)
z = _clipped_sigmoid(x - offset.log())
z_cumprod = (1 - z).cumprod(-1)
y = pad(z, [0, 1], value=1) * pad(z_cumprod, [1, 0], value=1)
return y
def _inverse(self, y):
y_crop = y[..., :-1]
offset = y.shape[-1] - y.new_ones(y_crop.shape[-1]).cumsum(-1)
sf = 1 - y_crop.cumsum(-1)
# we clamp to make sure that sf is positive which sometimes does not
# happen when y[-1] ~ 0 or y[:-1].sum() ~ 1
sf = torch.clamp(sf, min=torch.finfo(y.dtype).tiny)
x = y_crop.log() - sf.log() + offset.log()
return x
def log_abs_det_jacobian(self, x, y):
offset = x.shape[-1] + 1 - x.new_ones(x.shape[-1]).cumsum(-1)
x = x - offset.log()
# use the identity 1 - sigmoid(x) = exp(-x) * sigmoid(x)
detJ = (-x + F.logsigmoid(x) + y[..., :-1].log()).sum(-1)
return detJ
def forward_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return shape[:-1] + (shape[-1] + 1,)
def inverse_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return shape[:-1] + (shape[-1] - 1,)
class LowerCholeskyTransform(Transform):
"""
Transform from unconstrained matrices to lower-triangular matrices with
nonnegative diagonal entries.
This is useful for parameterizing positive definite matrices in terms of
their Cholesky factorization.
"""
domain = constraints.independent(constraints.real, 2)
codomain = constraints.lower_cholesky
def __eq__(self, other):
return isinstance(other, LowerCholeskyTransform)
def _call(self, x):
return x.tril(-1) + x.diagonal(dim1=-2, dim2=-1).exp().diag_embed()
def _inverse(self, y):
return y.tril(-1) + y.diagonal(dim1=-2, dim2=-1).log().diag_embed()
class CatTransform(Transform):
"""
Transform functor that applies a sequence of transforms `tseq`
component-wise to each submatrix at `dim`, of length `lengths[dim]`,
in a way compatible with :func:`torch.cat`.
Example::
x0 = torch.cat([torch.range(1, 10), torch.range(1, 10)], dim=0)
x = torch.cat([x0, x0], dim=0)
t0 = CatTransform([ExpTransform(), identity_transform], dim=0, lengths=[10, 10])
t = CatTransform([t0, t0], dim=0, lengths=[20, 20])
y = t(x)
"""
transforms: List[Transform]
def __init__(self, tseq, dim=0, lengths=None, cache_size=0):
assert all(isinstance(t, Transform) for t in tseq)
if cache_size:
tseq = [t.with_cache(cache_size) for t in tseq]
super(CatTransform, self).__init__(cache_size=cache_size)
self.transforms = list(tseq)
if lengths is None:
lengths = [1] * len(self.transforms)
self.lengths = list(lengths)
assert len(self.lengths) == len(self.transforms)
self.dim = dim
@lazy_property
def event_dim(self):
return max(t.event_dim for t in self.transforms)
@lazy_property
def length(self):
return sum(self.lengths)
def with_cache(self, cache_size=1):
if self._cache_size == cache_size:
return self
return CatTransform(self.transforms, self.dim, self.lengths, cache_size)
def _call(self, x):
assert -x.dim() <= self.dim < x.dim()
assert x.size(self.dim) == self.length
yslices = []
start = 0
for trans, length in zip(self.transforms, self.lengths):
xslice = x.narrow(self.dim, start, length)
yslices.append(trans(xslice))
start = start + length # avoid += for jit compat
return torch.cat(yslices, dim=self.dim)
def _inverse(self, y):
assert -y.dim() <= self.dim < y.dim()
assert y.size(self.dim) == self.length
xslices = []
start = 0
for trans, length in zip(self.transforms, self.lengths):
yslice = y.narrow(self.dim, start, length)
xslices.append(trans.inv(yslice))
start = start + length # avoid += for jit compat
return torch.cat(xslices, dim=self.dim)
def log_abs_det_jacobian(self, x, y):
assert -x.dim() <= self.dim < x.dim()
assert x.size(self.dim) == self.length
assert -y.dim() <= self.dim < y.dim()
assert y.size(self.dim) == self.length
logdetjacs = []
start = 0
for trans, length in zip(self.transforms, self.lengths):
xslice = x.narrow(self.dim, start, length)
yslice = y.narrow(self.dim, start, length)
logdetjac = trans.log_abs_det_jacobian(xslice, yslice)
if trans.event_dim < self.event_dim:
logdetjac = _sum_rightmost(logdetjac, self.event_dim - trans.event_dim)
logdetjacs.append(logdetjac)
start = start + length # avoid += for jit compat
# Decide whether to concatenate or sum.
dim = self.dim
if dim >= 0:
dim = dim - x.dim()
dim = dim + self.event_dim
if dim < 0:
return torch.cat(logdetjacs, dim=dim)
else:
return sum(logdetjacs)
@property
def bijective(self):
return all(t.bijective for t in self.transforms)
@constraints.dependent_property
def domain(self):
return constraints.cat([t.domain for t in self.transforms],
self.dim, self.lengths)
@constraints.dependent_property
def codomain(self):
return constraints.cat([t.codomain for t in self.transforms],
self.dim, self.lengths)
class StackTransform(Transform):
"""
Transform functor that applies a sequence of transforms `tseq`
component-wise to each submatrix at `dim`
in a way compatible with :func:`torch.stack`.
Example::
x = torch.stack([torch.range(1, 10), torch.range(1, 10)], dim=1)
t = StackTransform([ExpTransform(), identity_transform], dim=1)
y = t(x)
"""
transforms: List[Transform]
def __init__(self, tseq, dim=0, cache_size=0):
assert all(isinstance(t, Transform) for t in tseq)
if cache_size:
tseq = [t.with_cache(cache_size) for t in tseq]
super(StackTransform, self).__init__(cache_size=cache_size)
self.transforms = list(tseq)
self.dim = dim
def with_cache(self, cache_size=1):
if self._cache_size == cache_size:
return self
return StackTransform(self.transforms, self.dim, cache_size)
def _slice(self, z):
return [z.select(self.dim, i) for i in range(z.size(self.dim))]
def _call(self, x):
assert -x.dim() <= self.dim < x.dim()
assert x.size(self.dim) == len(self.transforms)
yslices = []
for xslice, trans in zip(self._slice(x), self.transforms):
yslices.append(trans(xslice))
return torch.stack(yslices, dim=self.dim)
def _inverse(self, y):
assert -y.dim() <= self.dim < y.dim()
assert y.size(self.dim) == len(self.transforms)
xslices = []
for yslice, trans in zip(self._slice(y), self.transforms):
xslices.append(trans.inv(yslice))
return torch.stack(xslices, dim=self.dim)
def log_abs_det_jacobian(self, x, y):
assert -x.dim() <= self.dim < x.dim()
assert x.size(self.dim) == len(self.transforms)
assert -y.dim() <= self.dim < y.dim()
assert y.size(self.dim) == len(self.transforms)
logdetjacs = []
yslices = self._slice(y)
xslices = self._slice(x)
for xslice, yslice, trans in zip(xslices, yslices, self.transforms):
logdetjacs.append(trans.log_abs_det_jacobian(xslice, yslice))
return torch.stack(logdetjacs, dim=self.dim)
@property
def bijective(self):
return all(t.bijective for t in self.transforms)
@constraints.dependent_property
def domain(self):
return constraints.stack([t.domain for t in self.transforms], self.dim)
@constraints.dependent_property
def codomain(self):
return constraints.stack([t.codomain for t in self.transforms], self.dim)
class CumulativeDistributionTransform(Transform):
"""
Transform via the cumulative distribution function of a probability distribution.
Args:
distribution (Distribution): Distribution whose cumulative distribution function to use for
the transformation.
Example::
# Construct a Gaussian copula from a multivariate normal.
base_dist = MultivariateNormal(
loc=torch.zeros(2),
scale_tril=LKJCholesky(2).sample(),
)
transform = CumulativeDistributionTransform(Normal(0, 1))
copula = TransformedDistribution(base_dist, [transform])
"""
bijective = True
codomain = constraints.unit_interval
sign = +1
def __init__(self, distribution, cache_size=0):
super(CumulativeDistributionTransform, self).__init__(cache_size=cache_size)
self.distribution = distribution
@property
def domain(self):
return self.distribution.support
def _call(self, x):
return self.distribution.cdf(x)
def _inverse(self, y):
return self.distribution.icdf(y)
def log_abs_det_jacobian(self, x, y):
return self.distribution.log_prob(x)
def with_cache(self, cache_size=1):
if self._cache_size == cache_size:
return self
return CumulativeDistributionTransform(self.distribution, cache_size=cache_size)
| pytorch-master | torch/distributions/transforms.py |
import torch
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.distributions import constraints
from torch.distributions.exp_family import ExponentialFamily
__all__ = ['Dirichlet']
# This helper is exposed for testing.
def _Dirichlet_backward(x, concentration, grad_output):
total = concentration.sum(-1, True).expand_as(concentration)
grad = torch._dirichlet_grad(x, concentration, total)
return grad * (grad_output - (x * grad_output).sum(-1, True))
class _Dirichlet(Function):
@staticmethod
def forward(ctx, concentration):
x = torch._sample_dirichlet(concentration)
ctx.save_for_backward(x, concentration)
return x
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
x, concentration = ctx.saved_tensors
return _Dirichlet_backward(x, concentration, grad_output)
class Dirichlet(ExponentialFamily):
r"""
Creates a Dirichlet distribution parameterized by concentration :attr:`concentration`.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = Dirichlet(torch.tensor([0.5, 0.5]))
>>> m.sample() # Dirichlet distributed with concentration [0.5, 0.5]
tensor([ 0.1046, 0.8954])
Args:
concentration (Tensor): concentration parameter of the distribution
(often referred to as alpha)
"""
arg_constraints = {'concentration': constraints.independent(constraints.positive, 1)}
support = constraints.simplex
has_rsample = True
def __init__(self, concentration, validate_args=None):
if concentration.dim() < 1:
raise ValueError("`concentration` parameter must be at least one-dimensional.")
self.concentration = concentration
batch_shape, event_shape = concentration.shape[:-1], concentration.shape[-1:]
super(Dirichlet, self).__init__(batch_shape, event_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Dirichlet, _instance)
batch_shape = torch.Size(batch_shape)
new.concentration = self.concentration.expand(batch_shape + self.event_shape)
super(Dirichlet, new).__init__(batch_shape, self.event_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def rsample(self, sample_shape=()):
shape = self._extended_shape(sample_shape)
concentration = self.concentration.expand(shape)
return _Dirichlet.apply(concentration)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
return ((torch.log(value) * (self.concentration - 1.0)).sum(-1) +
torch.lgamma(self.concentration.sum(-1)) -
torch.lgamma(self.concentration).sum(-1))
@property
def mean(self):
return self.concentration / self.concentration.sum(-1, True)
@property
def mode(self):
concentrationm1 = (self.concentration - 1).clamp(min=0.)
mode = concentrationm1 / concentrationm1.sum(-1, True)
mask = (self.concentration < 1).all(axis=-1)
mode[mask] = torch.nn.functional.one_hot(mode[mask].argmax(axis=-1), concentrationm1.shape[-1]).to(mode)
return mode
@property
def variance(self):
con0 = self.concentration.sum(-1, True)
return self.concentration * (con0 - self.concentration) / (con0.pow(2) * (con0 + 1))
def entropy(self):
k = self.concentration.size(-1)
a0 = self.concentration.sum(-1)
return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) -
(k - a0) * torch.digamma(a0) -
((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1))
@property
def _natural_params(self):
return (self.concentration, )
def _log_normalizer(self, x):
return x.lgamma().sum(-1) - torch.lgamma(x.sum(-1))
| pytorch-master | torch/distributions/dirichlet.py |
from torch.distributions import constraints
from torch.distributions.transforms import ExpTransform
from torch.distributions.normal import Normal
from torch.distributions.transformed_distribution import TransformedDistribution
__all__ = ['LogNormal']
class LogNormal(TransformedDistribution):
r"""
Creates a log-normal distribution parameterized by
:attr:`loc` and :attr:`scale` where::
X ~ Normal(loc, scale)
Y = exp(X) ~ LogNormal(loc, scale)
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = LogNormal(torch.tensor([0.0]), torch.tensor([1.0]))
>>> m.sample() # log-normal distributed with mean=0 and stddev=1
tensor([ 0.1046])
Args:
loc (float or Tensor): mean of log of distribution
scale (float or Tensor): standard deviation of log of the distribution
"""
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}
support = constraints.positive
has_rsample = True
def __init__(self, loc, scale, validate_args=None):
base_dist = Normal(loc, scale, validate_args=validate_args)
super(LogNormal, self).__init__(base_dist, ExpTransform(), validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(LogNormal, _instance)
return super(LogNormal, self).expand(batch_shape, _instance=new)
@property
def loc(self):
return self.base_dist.loc
@property
def scale(self):
return self.base_dist.scale
@property
def mean(self):
return (self.loc + self.scale.pow(2) / 2).exp()
@property
def mode(self):
return (self.loc - self.scale.square()).exp()
@property
def variance(self):
return (self.scale.pow(2).exp() - 1) * (2 * self.loc + self.scale.pow(2)).exp()
def entropy(self):
return self.base_dist.entropy() + self.loc
| pytorch-master | torch/distributions/log_normal.py |
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.independent import Independent
from torch.distributions.transforms import ComposeTransform, Transform
from torch.distributions.utils import _sum_rightmost
from typing import Dict
__all__ = ['TransformedDistribution']
class TransformedDistribution(Distribution):
r"""
Extension of the Distribution class, which applies a sequence of Transforms
to a base distribution. Let f be the composition of transforms applied::
X ~ BaseDistribution
Y = f(X) ~ TransformedDistribution(BaseDistribution, f)
log p(Y) = log p(X) + log |det (dX/dY)|
Note that the ``.event_shape`` of a :class:`TransformedDistribution` is the
maximum shape of its base distribution and its transforms, since transforms
can introduce correlations among events.
An example for the usage of :class:`TransformedDistribution` would be::
# Building a Logistic Distribution
# X ~ Uniform(0, 1)
# f = a + b * logit(X)
# Y ~ f(X) ~ Logistic(a, b)
base_distribution = Uniform(0, 1)
transforms = [SigmoidTransform().inv, AffineTransform(loc=a, scale=b)]
logistic = TransformedDistribution(base_distribution, transforms)
For more examples, please look at the implementations of
:class:`~torch.distributions.gumbel.Gumbel`,
:class:`~torch.distributions.half_cauchy.HalfCauchy`,
:class:`~torch.distributions.half_normal.HalfNormal`,
:class:`~torch.distributions.log_normal.LogNormal`,
:class:`~torch.distributions.pareto.Pareto`,
:class:`~torch.distributions.weibull.Weibull`,
:class:`~torch.distributions.relaxed_bernoulli.RelaxedBernoulli` and
:class:`~torch.distributions.relaxed_categorical.RelaxedOneHotCategorical`
"""
arg_constraints: Dict[str, constraints.Constraint] = {}
def __init__(self, base_distribution, transforms, validate_args=None):
if isinstance(transforms, Transform):
self.transforms = [transforms, ]
elif isinstance(transforms, list):
if not all(isinstance(t, Transform) for t in transforms):
raise ValueError("transforms must be a Transform or a list of Transforms")
self.transforms = transforms
else:
raise ValueError("transforms must be a Transform or list, but was {}".format(transforms))
# Reshape base_distribution according to transforms.
base_shape = base_distribution.batch_shape + base_distribution.event_shape
base_event_dim = len(base_distribution.event_shape)
transform = ComposeTransform(self.transforms)
domain_event_dim = transform.domain.event_dim
if len(base_shape) < domain_event_dim:
raise ValueError("base_distribution needs to have shape with size at least {}, but got {}."
.format(domain_event_dim, base_shape))
shape = transform.forward_shape(base_shape)
expanded_base_shape = transform.inverse_shape(shape)
if base_shape != expanded_base_shape:
base_batch_shape = expanded_base_shape[:len(expanded_base_shape) - base_event_dim]
base_distribution = base_distribution.expand(base_batch_shape)
reinterpreted_batch_ndims = domain_event_dim - base_event_dim
if reinterpreted_batch_ndims > 0:
base_distribution = Independent(base_distribution, reinterpreted_batch_ndims)
self.base_dist = base_distribution
# Compute shapes.
event_dim = transform.codomain.event_dim + max(base_event_dim - domain_event_dim, 0)
assert len(shape) >= event_dim
cut = len(shape) - event_dim
batch_shape = shape[:cut]
event_shape = shape[cut:]
super(TransformedDistribution, self).__init__(batch_shape, event_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(TransformedDistribution, _instance)
batch_shape = torch.Size(batch_shape)
shape = batch_shape + self.event_shape
for t in reversed(self.transforms):
shape = t.inverse_shape(shape)
base_batch_shape = shape[:len(shape) - len(self.base_dist.event_shape)]
new.base_dist = self.base_dist.expand(base_batch_shape)
new.transforms = self.transforms
super(TransformedDistribution, new).__init__(batch_shape, self.event_shape, validate_args=False)
new._validate_args = self._validate_args
return new
@constraints.dependent_property(is_discrete=False)
def support(self):
if not self.transforms:
return self.base_dist.support
support = self.transforms[-1].codomain
if len(self.event_shape) > support.event_dim:
support = constraints.independent(support, len(self.event_shape) - support.event_dim)
return support
@property
def has_rsample(self):
return self.base_dist.has_rsample
def sample(self, sample_shape=torch.Size()):
"""
Generates a sample_shape shaped sample or sample_shape shaped batch of
samples if the distribution parameters are batched. Samples first from
base distribution and applies `transform()` for every transform in the
list.
"""
with torch.no_grad():
x = self.base_dist.sample(sample_shape)
for transform in self.transforms:
x = transform(x)
return x
def rsample(self, sample_shape=torch.Size()):
"""
Generates a sample_shape shaped reparameterized sample or sample_shape
shaped batch of reparameterized samples if the distribution parameters
are batched. Samples first from base distribution and applies
`transform()` for every transform in the list.
"""
x = self.base_dist.rsample(sample_shape)
for transform in self.transforms:
x = transform(x)
return x
def log_prob(self, value):
"""
Scores the sample by inverting the transform(s) and computing the score
using the score of the base distribution and the log abs det jacobian.
"""
if self._validate_args:
self._validate_sample(value)
event_dim = len(self.event_shape)
log_prob = 0.0
y = value
for transform in reversed(self.transforms):
x = transform.inv(y)
event_dim += transform.domain.event_dim - transform.codomain.event_dim
log_prob = log_prob - _sum_rightmost(transform.log_abs_det_jacobian(x, y),
event_dim - transform.domain.event_dim)
y = x
log_prob = log_prob + _sum_rightmost(self.base_dist.log_prob(y),
event_dim - len(self.base_dist.event_shape))
return log_prob
def _monotonize_cdf(self, value):
"""
This conditionally flips ``value -> 1-value`` to ensure :meth:`cdf` is
monotone increasing.
"""
sign = 1
for transform in self.transforms:
sign = sign * transform.sign
if isinstance(sign, int) and sign == 1:
return value
return sign * (value - 0.5) + 0.5
def cdf(self, value):
"""
Computes the cumulative distribution function by inverting the
transform(s) and computing the score of the base distribution.
"""
for transform in self.transforms[::-1]:
value = transform.inv(value)
if self._validate_args:
self.base_dist._validate_sample(value)
value = self.base_dist.cdf(value)
value = self._monotonize_cdf(value)
return value
def icdf(self, value):
"""
Computes the inverse cumulative distribution function using
transform(s) and computing the score of the base distribution.
"""
value = self._monotonize_cdf(value)
value = self.base_dist.icdf(value)
for transform in self.transforms:
value = transform(value)
return value
| pytorch-master | torch/distributions/transformed_distribution.py |
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property
from torch.nn.functional import binary_cross_entropy_with_logits
__all__ = ['Geometric']
class Geometric(Distribution):
r"""
Creates a Geometric distribution parameterized by :attr:`probs`,
where :attr:`probs` is the probability of success of Bernoulli trials.
It represents the probability that in :math:`k + 1` Bernoulli trials, the
first :math:`k` trials failed, before seeing a success.
Samples are non-negative integers [0, :math:`\inf`).
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = Geometric(torch.tensor([0.3]))
>>> m.sample() # underlying Bernoulli has 30% chance 1; 70% chance 0
tensor([ 2.])
Args:
probs (Number, Tensor): the probability of sampling `1`. Must be in range (0, 1]
logits (Number, Tensor): the log-odds of sampling `1`.
"""
arg_constraints = {'probs': constraints.unit_interval,
'logits': constraints.real}
support = constraints.nonnegative_integer
def __init__(self, probs=None, logits=None, validate_args=None):
if (probs is None) == (logits is None):
raise ValueError("Either `probs` or `logits` must be specified, but not both.")
if probs is not None:
self.probs, = broadcast_all(probs)
else:
self.logits, = broadcast_all(logits)
probs_or_logits = probs if probs is not None else logits
if isinstance(probs_or_logits, Number):
batch_shape = torch.Size()
else:
batch_shape = probs_or_logits.size()
super(Geometric, self).__init__(batch_shape, validate_args=validate_args)
if self._validate_args and probs is not None:
# Add an extra check beyond unit_interval
value = self.probs
valid = value > 0
if not valid.all():
invalid_value = value.data[~valid]
raise ValueError(
"Expected parameter probs "
f"({type(value).__name__} of shape {tuple(value.shape)}) "
f"of distribution {repr(self)} "
f"to be positive but found invalid values:\n{invalid_value}"
)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Geometric, _instance)
batch_shape = torch.Size(batch_shape)
if 'probs' in self.__dict__:
new.probs = self.probs.expand(batch_shape)
if 'logits' in self.__dict__:
new.logits = self.logits.expand(batch_shape)
super(Geometric, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
@property
def mean(self):
return 1. / self.probs - 1.
@property
def mode(self):
return torch.zeros_like(self.probs)
@property
def variance(self):
return (1. / self.probs - 1.) / self.probs
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
tiny = torch.finfo(self.probs.dtype).tiny
with torch.no_grad():
if torch._C._get_tracing_state():
# [JIT WORKAROUND] lack of support for .uniform_()
u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device)
u = u.clamp(min=tiny)
else:
u = self.probs.new(shape).uniform_(tiny, 1)
return (u.log() / (-self.probs).log1p()).floor()
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
value, probs = broadcast_all(value, self.probs)
probs = probs.clone(memory_format=torch.contiguous_format)
probs[(probs == 1) & (value == 0)] = 0
return value * (-probs).log1p() + self.probs.log()
def entropy(self):
return binary_cross_entropy_with_logits(self.logits, self.probs, reduction='none') / self.probs
| pytorch-master | torch/distributions/geometric.py |
import torch
from torch.distributions import constraints
from torch.distributions.exponential import Exponential
from torch.distributions.transformed_distribution import TransformedDistribution
from torch.distributions.transforms import AffineTransform, PowerTransform
from torch.distributions.utils import broadcast_all
from torch.distributions.gumbel import euler_constant
__all__ = ['Weibull']
class Weibull(TransformedDistribution):
r"""
Samples from a two-parameter Weibull distribution.
Example:
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = Weibull(torch.tensor([1.0]), torch.tensor([1.0]))
>>> m.sample() # sample from a Weibull distribution with scale=1, concentration=1
tensor([ 0.4784])
Args:
scale (float or Tensor): Scale parameter of distribution (lambda).
concentration (float or Tensor): Concentration parameter of distribution (k/shape).
"""
arg_constraints = {'scale': constraints.positive, 'concentration': constraints.positive}
support = constraints.positive
def __init__(self, scale, concentration, validate_args=None):
self.scale, self.concentration = broadcast_all(scale, concentration)
self.concentration_reciprocal = self.concentration.reciprocal()
base_dist = Exponential(torch.ones_like(self.scale), validate_args=validate_args)
transforms = [PowerTransform(exponent=self.concentration_reciprocal),
AffineTransform(loc=0, scale=self.scale)]
super(Weibull, self).__init__(base_dist,
transforms,
validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Weibull, _instance)
new.scale = self.scale.expand(batch_shape)
new.concentration = self.concentration.expand(batch_shape)
new.concentration_reciprocal = new.concentration.reciprocal()
base_dist = self.base_dist.expand(batch_shape)
transforms = [PowerTransform(exponent=new.concentration_reciprocal),
AffineTransform(loc=0, scale=new.scale)]
super(Weibull, new).__init__(base_dist,
transforms,
validate_args=False)
new._validate_args = self._validate_args
return new
@property
def mean(self):
return self.scale * torch.exp(torch.lgamma(1 + self.concentration_reciprocal))
@property
def mode(self):
return self.scale * ((self.concentration - 1) / self.concentration) ** self.concentration.reciprocal()
@property
def variance(self):
return self.scale.pow(2) * (torch.exp(torch.lgamma(1 + 2 * self.concentration_reciprocal)) -
torch.exp(2 * torch.lgamma(1 + self.concentration_reciprocal)))
def entropy(self):
return euler_constant * (1 - self.concentration_reciprocal) + \
torch.log(self.scale * self.concentration_reciprocal) + 1
| pytorch-master | torch/distributions/weibull.py |
import math
import torch
from torch._six import inf, nan
from torch.distributions import Chi2, constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import _standard_normal, broadcast_all
__all__ = ['StudentT']
class StudentT(Distribution):
r"""
Creates a Student's t-distribution parameterized by degree of
freedom :attr:`df`, mean :attr:`loc` and scale :attr:`scale`.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = StudentT(torch.tensor([2.0]))
>>> m.sample() # Student's t-distributed with degrees of freedom=2
tensor([ 0.1046])
Args:
df (float or Tensor): degrees of freedom
loc (float or Tensor): mean of the distribution
scale (float or Tensor): scale of the distribution
"""
arg_constraints = {'df': constraints.positive, 'loc': constraints.real, 'scale': constraints.positive}
support = constraints.real
has_rsample = True
@property
def mean(self):
m = self.loc.clone(memory_format=torch.contiguous_format)
m[self.df <= 1] = nan
return m
@property
def mode(self):
return self.loc
@property
def variance(self):
m = self.df.clone(memory_format=torch.contiguous_format)
m[self.df > 2] = self.scale[self.df > 2].pow(2) * self.df[self.df > 2] / (self.df[self.df > 2] - 2)
m[(self.df <= 2) & (self.df > 1)] = inf
m[self.df <= 1] = nan
return m
def __init__(self, df, loc=0., scale=1., validate_args=None):
self.df, self.loc, self.scale = broadcast_all(df, loc, scale)
self._chi2 = Chi2(self.df)
batch_shape = self.df.size()
super(StudentT, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(StudentT, _instance)
batch_shape = torch.Size(batch_shape)
new.df = self.df.expand(batch_shape)
new.loc = self.loc.expand(batch_shape)
new.scale = self.scale.expand(batch_shape)
new._chi2 = self._chi2.expand(batch_shape)
super(StudentT, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def rsample(self, sample_shape=torch.Size()):
# NOTE: This does not agree with scipy implementation as much as other distributions.
# (see https://github.com/fritzo/notebooks/blob/master/debug-student-t.ipynb). Using DoubleTensor
# parameters seems to help.
# X ~ Normal(0, 1)
# Z ~ Chi2(df)
# Y = X / sqrt(Z / df) ~ StudentT(df)
shape = self._extended_shape(sample_shape)
X = _standard_normal(shape, dtype=self.df.dtype, device=self.df.device)
Z = self._chi2.rsample(sample_shape)
Y = X * torch.rsqrt(Z / self.df)
return self.loc + self.scale * Y
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
y = (value - self.loc) / self.scale
Z = (self.scale.log() +
0.5 * self.df.log() +
0.5 * math.log(math.pi) +
torch.lgamma(0.5 * self.df) -
torch.lgamma(0.5 * (self.df + 1.)))
return -0.5 * (self.df + 1.) * torch.log1p(y**2. / self.df) - Z
def entropy(self):
lbeta = torch.lgamma(0.5 * self.df) + math.lgamma(0.5) - torch.lgamma(0.5 * (self.df + 1))
return (self.scale.log() +
0.5 * (self.df + 1) *
(torch.digamma(0.5 * (self.df + 1)) - torch.digamma(0.5 * self.df)) +
0.5 * self.df.log() + lbeta)
| pytorch-master | torch/distributions/studentT.py |
import math
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import _standard_normal, lazy_property
__all__ = ['MultivariateNormal']
def _batch_mv(bmat, bvec):
r"""
Performs a batched matrix-vector product, with compatible but different batch shapes.
This function takes as input `bmat`, containing :math:`n \times n` matrices, and
`bvec`, containing length :math:`n` vectors.
Both `bmat` and `bvec` may have any number of leading dimensions, which correspond
to a batch shape. They are not necessarily assumed to have the same batch shape,
just ones which can be broadcasted.
"""
return torch.matmul(bmat, bvec.unsqueeze(-1)).squeeze(-1)
def _batch_mahalanobis(bL, bx):
r"""
Computes the squared Mahalanobis distance :math:`\mathbf{x}^\top\mathbf{M}^{-1}\mathbf{x}`
for a factored :math:`\mathbf{M} = \mathbf{L}\mathbf{L}^\top`.
Accepts batches for both bL and bx. They are not necessarily assumed to have the same batch
shape, but `bL` one should be able to broadcasted to `bx` one.
"""
n = bx.size(-1)
bx_batch_shape = bx.shape[:-1]
# Assume that bL.shape = (i, 1, n, n), bx.shape = (..., i, j, n),
# we are going to make bx have shape (..., 1, j, i, 1, n) to apply batched tri.solve
bx_batch_dims = len(bx_batch_shape)
bL_batch_dims = bL.dim() - 2
outer_batch_dims = bx_batch_dims - bL_batch_dims
old_batch_dims = outer_batch_dims + bL_batch_dims
new_batch_dims = outer_batch_dims + 2 * bL_batch_dims
# Reshape bx with the shape (..., 1, i, j, 1, n)
bx_new_shape = bx.shape[:outer_batch_dims]
for (sL, sx) in zip(bL.shape[:-2], bx.shape[outer_batch_dims:-1]):
bx_new_shape += (sx // sL, sL)
bx_new_shape += (n,)
bx = bx.reshape(bx_new_shape)
# Permute bx to make it have shape (..., 1, j, i, 1, n)
permute_dims = (list(range(outer_batch_dims)) +
list(range(outer_batch_dims, new_batch_dims, 2)) +
list(range(outer_batch_dims + 1, new_batch_dims, 2)) +
[new_batch_dims])
bx = bx.permute(permute_dims)
flat_L = bL.reshape(-1, n, n) # shape = b x n x n
flat_x = bx.reshape(-1, flat_L.size(0), n) # shape = c x b x n
flat_x_swap = flat_x.permute(1, 2, 0) # shape = b x n x c
M_swap = torch.linalg.solve_triangular(flat_L, flat_x_swap, upper=False).pow(2).sum(-2) # shape = b x c
M = M_swap.t() # shape = c x b
# Now we revert the above reshape and permute operators.
permuted_M = M.reshape(bx.shape[:-1]) # shape = (..., 1, j, i, 1)
permute_inv_dims = list(range(outer_batch_dims))
for i in range(bL_batch_dims):
permute_inv_dims += [outer_batch_dims + i, old_batch_dims + i]
reshaped_M = permuted_M.permute(permute_inv_dims) # shape = (..., 1, i, j, 1)
return reshaped_M.reshape(bx_batch_shape)
def _precision_to_scale_tril(P):
# Ref: https://nbviewer.jupyter.org/gist/fehiepsi/5ef8e09e61604f10607380467eb82006#Precision-to-scale_tril
Lf = torch.linalg.cholesky(torch.flip(P, (-2, -1)))
L_inv = torch.transpose(torch.flip(Lf, (-2, -1)), -2, -1)
Id = torch.eye(P.shape[-1], dtype=P.dtype, device=P.device)
L = torch.linalg.solve_triangular(L_inv, Id, upper=False)
return L
class MultivariateNormal(Distribution):
r"""
Creates a multivariate normal (also called Gaussian) distribution
parameterized by a mean vector and a covariance matrix.
The multivariate normal distribution can be parameterized either
in terms of a positive definite covariance matrix :math:`\mathbf{\Sigma}`
or a positive definite precision matrix :math:`\mathbf{\Sigma}^{-1}`
or a lower-triangular matrix :math:`\mathbf{L}` with positive-valued
diagonal entries, such that
:math:`\mathbf{\Sigma} = \mathbf{L}\mathbf{L}^\top`. This triangular matrix
can be obtained via e.g. Cholesky decomposition of the covariance.
Example:
>>> # xdoctest: +REQUIRES(--lapack)
>>> m = MultivariateNormal(torch.zeros(2), torch.eye(2))
>>> m.sample() # normally distributed with mean=`[0,0]` and covariance_matrix=`I`
tensor([-0.2102, -0.5429])
Args:
loc (Tensor): mean of the distribution
covariance_matrix (Tensor): positive-definite covariance matrix
precision_matrix (Tensor): positive-definite precision matrix
scale_tril (Tensor): lower-triangular factor of covariance, with positive-valued diagonal
Note:
Only one of :attr:`covariance_matrix` or :attr:`precision_matrix` or
:attr:`scale_tril` can be specified.
Using :attr:`scale_tril` will be more efficient: all computations internally
are based on :attr:`scale_tril`. If :attr:`covariance_matrix` or
:attr:`precision_matrix` is passed instead, it is only used to compute
the corresponding lower triangular matrices using a Cholesky decomposition.
"""
arg_constraints = {'loc': constraints.real_vector,
'covariance_matrix': constraints.positive_definite,
'precision_matrix': constraints.positive_definite,
'scale_tril': constraints.lower_cholesky}
support = constraints.real_vector
has_rsample = True
def __init__(self, loc, covariance_matrix=None, precision_matrix=None, scale_tril=None, validate_args=None):
if loc.dim() < 1:
raise ValueError("loc must be at least one-dimensional.")
if (covariance_matrix is not None) + (scale_tril is not None) + (precision_matrix is not None) != 1:
raise ValueError("Exactly one of covariance_matrix or precision_matrix or scale_tril may be specified.")
if scale_tril is not None:
if scale_tril.dim() < 2:
raise ValueError("scale_tril matrix must be at least two-dimensional, "
"with optional leading batch dimensions")
batch_shape = torch.broadcast_shapes(scale_tril.shape[:-2], loc.shape[:-1])
self.scale_tril = scale_tril.expand(batch_shape + (-1, -1))
elif covariance_matrix is not None:
if covariance_matrix.dim() < 2:
raise ValueError("covariance_matrix must be at least two-dimensional, "
"with optional leading batch dimensions")
batch_shape = torch.broadcast_shapes(covariance_matrix.shape[:-2], loc.shape[:-1])
self.covariance_matrix = covariance_matrix.expand(batch_shape + (-1, -1))
else:
if precision_matrix.dim() < 2:
raise ValueError("precision_matrix must be at least two-dimensional, "
"with optional leading batch dimensions")
batch_shape = torch.broadcast_shapes(precision_matrix.shape[:-2], loc.shape[:-1])
self.precision_matrix = precision_matrix.expand(batch_shape + (-1, -1))
self.loc = loc.expand(batch_shape + (-1,))
event_shape = self.loc.shape[-1:]
super(MultivariateNormal, self).__init__(batch_shape, event_shape, validate_args=validate_args)
if scale_tril is not None:
self._unbroadcasted_scale_tril = scale_tril
elif covariance_matrix is not None:
self._unbroadcasted_scale_tril = torch.linalg.cholesky(covariance_matrix)
else: # precision_matrix is not None
self._unbroadcasted_scale_tril = _precision_to_scale_tril(precision_matrix)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(MultivariateNormal, _instance)
batch_shape = torch.Size(batch_shape)
loc_shape = batch_shape + self.event_shape
cov_shape = batch_shape + self.event_shape + self.event_shape
new.loc = self.loc.expand(loc_shape)
new._unbroadcasted_scale_tril = self._unbroadcasted_scale_tril
if 'covariance_matrix' in self.__dict__:
new.covariance_matrix = self.covariance_matrix.expand(cov_shape)
if 'scale_tril' in self.__dict__:
new.scale_tril = self.scale_tril.expand(cov_shape)
if 'precision_matrix' in self.__dict__:
new.precision_matrix = self.precision_matrix.expand(cov_shape)
super(MultivariateNormal, new).__init__(batch_shape,
self.event_shape,
validate_args=False)
new._validate_args = self._validate_args
return new
@lazy_property
def scale_tril(self):
return self._unbroadcasted_scale_tril.expand(
self._batch_shape + self._event_shape + self._event_shape)
@lazy_property
def covariance_matrix(self):
return (torch.matmul(self._unbroadcasted_scale_tril,
self._unbroadcasted_scale_tril.mT)
.expand(self._batch_shape + self._event_shape + self._event_shape))
@lazy_property
def precision_matrix(self):
return torch.cholesky_inverse(self._unbroadcasted_scale_tril).expand(
self._batch_shape + self._event_shape + self._event_shape)
@property
def mean(self):
return self.loc
@property
def mode(self):
return self.loc
@property
def variance(self):
return self._unbroadcasted_scale_tril.pow(2).sum(-1).expand(
self._batch_shape + self._event_shape)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
eps = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device)
return self.loc + _batch_mv(self._unbroadcasted_scale_tril, eps)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
diff = value - self.loc
M = _batch_mahalanobis(self._unbroadcasted_scale_tril, diff)
half_log_det = self._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1)
return -0.5 * (self._event_shape[0] * math.log(2 * math.pi) + M) - half_log_det
def entropy(self):
half_log_det = self._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1)
H = 0.5 * self._event_shape[0] * (1.0 + math.log(2 * math.pi)) + half_log_det
if len(self._batch_shape) == 0:
return H
else:
return H.expand(self._batch_shape)
| pytorch-master | torch/distributions/multivariate_normal.py |
import math
from numbers import Real
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.exp_family import ExponentialFamily
from torch.distributions.utils import _standard_normal, broadcast_all
__all__ = ['Normal']
class Normal(ExponentialFamily):
r"""
Creates a normal (also called Gaussian) distribution parameterized by
:attr:`loc` and :attr:`scale`.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = Normal(torch.tensor([0.0]), torch.tensor([1.0]))
>>> m.sample() # normally distributed with loc=0 and scale=1
tensor([ 0.1046])
Args:
loc (float or Tensor): mean of the distribution (often referred to as mu)
scale (float or Tensor): standard deviation of the distribution
(often referred to as sigma)
"""
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}
support = constraints.real
has_rsample = True
_mean_carrier_measure = 0
@property
def mean(self):
return self.loc
@property
def mode(self):
return self.loc
@property
def stddev(self):
return self.scale
@property
def variance(self):
return self.stddev.pow(2)
def __init__(self, loc, scale, validate_args=None):
self.loc, self.scale = broadcast_all(loc, scale)
if isinstance(loc, Number) and isinstance(scale, Number):
batch_shape = torch.Size()
else:
batch_shape = self.loc.size()
super(Normal, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Normal, _instance)
batch_shape = torch.Size(batch_shape)
new.loc = self.loc.expand(batch_shape)
new.scale = self.scale.expand(batch_shape)
super(Normal, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return torch.normal(self.loc.expand(shape), self.scale.expand(shape))
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
eps = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device)
return self.loc + eps * self.scale
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
# compute the variance
var = (self.scale ** 2)
log_scale = math.log(self.scale) if isinstance(self.scale, Real) else self.scale.log()
return -((value - self.loc) ** 2) / (2 * var) - log_scale - math.log(math.sqrt(2 * math.pi))
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
return 0.5 * (1 + torch.erf((value - self.loc) * self.scale.reciprocal() / math.sqrt(2)))
def icdf(self, value):
return self.loc + self.scale * torch.erfinv(2 * value - 1) * math.sqrt(2)
def entropy(self):
return 0.5 + 0.5 * math.log(2 * math.pi) + torch.log(self.scale)
@property
def _natural_params(self):
return (self.loc / self.scale.pow(2), -0.5 * self.scale.pow(2).reciprocal())
def _log_normalizer(self, x, y):
return -0.25 * x.pow(2) / y + 0.5 * torch.log(-math.pi / y)
| pytorch-master | torch/distributions/normal.py |
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.exp_family import ExponentialFamily
from torch.distributions.utils import broadcast_all
__all__ = ['Poisson']
class Poisson(ExponentialFamily):
r"""
Creates a Poisson distribution parameterized by :attr:`rate`, the rate parameter.
Samples are nonnegative integers, with a pmf given by
.. math::
\mathrm{rate}^k \frac{e^{-\mathrm{rate}}}{k!}
Example::
>>> # xdoctest: +SKIP("poisson_cpu not implemented for 'Long'")
>>> m = Poisson(torch.tensor([4]))
>>> m.sample()
tensor([ 3.])
Args:
rate (Number, Tensor): the rate parameter
"""
arg_constraints = {'rate': constraints.nonnegative}
support = constraints.nonnegative_integer
@property
def mean(self):
return self.rate
@property
def mode(self):
return self.rate.floor()
@property
def variance(self):
return self.rate
def __init__(self, rate, validate_args=None):
self.rate, = broadcast_all(rate)
if isinstance(rate, Number):
batch_shape = torch.Size()
else:
batch_shape = self.rate.size()
super(Poisson, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Poisson, _instance)
batch_shape = torch.Size(batch_shape)
new.rate = self.rate.expand(batch_shape)
super(Poisson, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return torch.poisson(self.rate.expand(shape))
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
rate, value = broadcast_all(self.rate, value)
return value.xlogy(rate) - rate - (value + 1).lgamma()
@property
def _natural_params(self):
return (torch.log(self.rate), )
def _log_normalizer(self, x):
return torch.exp(x)
| pytorch-master | torch/distributions/poisson.py |
from numbers import Real, Number
import torch
from torch.distributions import constraints
from torch.distributions.dirichlet import Dirichlet
from torch.distributions.exp_family import ExponentialFamily
from torch.distributions.utils import broadcast_all
__all__ = ['Beta']
class Beta(ExponentialFamily):
r"""
Beta distribution parameterized by :attr:`concentration1` and :attr:`concentration0`.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = Beta(torch.tensor([0.5]), torch.tensor([0.5]))
>>> m.sample() # Beta distributed with concentration concentration1 and concentration0
tensor([ 0.1046])
Args:
concentration1 (float or Tensor): 1st concentration parameter of the distribution
(often referred to as alpha)
concentration0 (float or Tensor): 2nd concentration parameter of the distribution
(often referred to as beta)
"""
arg_constraints = {'concentration1': constraints.positive, 'concentration0': constraints.positive}
support = constraints.unit_interval
has_rsample = True
def __init__(self, concentration1, concentration0, validate_args=None):
if isinstance(concentration1, Real) and isinstance(concentration0, Real):
concentration1_concentration0 = torch.tensor([float(concentration1), float(concentration0)])
else:
concentration1, concentration0 = broadcast_all(concentration1, concentration0)
concentration1_concentration0 = torch.stack([concentration1, concentration0], -1)
self._dirichlet = Dirichlet(concentration1_concentration0, validate_args=validate_args)
super(Beta, self).__init__(self._dirichlet._batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Beta, _instance)
batch_shape = torch.Size(batch_shape)
new._dirichlet = self._dirichlet.expand(batch_shape)
super(Beta, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
@property
def mean(self):
return self.concentration1 / (self.concentration1 + self.concentration0)
@property
def mode(self):
return self._dirichlet.mode[..., 0]
@property
def variance(self):
total = self.concentration1 + self.concentration0
return (self.concentration1 * self.concentration0 /
(total.pow(2) * (total + 1)))
def rsample(self, sample_shape=()):
return self._dirichlet.rsample(sample_shape).select(-1, 0)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
heads_tails = torch.stack([value, 1.0 - value], -1)
return self._dirichlet.log_prob(heads_tails)
def entropy(self):
return self._dirichlet.entropy()
@property
def concentration1(self):
result = self._dirichlet.concentration[..., 0]
if isinstance(result, Number):
return torch.tensor([result])
else:
return result
@property
def concentration0(self):
result = self._dirichlet.concentration[..., 1]
if isinstance(result, Number):
return torch.tensor([result])
else:
return result
@property
def _natural_params(self):
return (self.concentration1, self.concentration0)
def _log_normalizer(self, x, y):
return torch.lgamma(x) + torch.lgamma(y) - torch.lgamma(x + y)
| pytorch-master | torch/distributions/beta.py |
import torch
from torch._six import nan
from torch.distributions import constraints
from torch.distributions.uniform import Uniform
from torch.distributions.transformed_distribution import TransformedDistribution
from torch.distributions.transforms import AffineTransform, PowerTransform
from torch.distributions.utils import broadcast_all, euler_constant
__all__ = ['Kumaraswamy']
def _moments(a, b, n):
"""
Computes nth moment of Kumaraswamy using using torch.lgamma
"""
arg1 = 1 + n / a
log_value = torch.lgamma(arg1) + torch.lgamma(b) - torch.lgamma(arg1 + b)
return b * torch.exp(log_value)
class Kumaraswamy(TransformedDistribution):
r"""
Samples from a Kumaraswamy distribution.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = Kumaraswamy(torch.tensor([1.0]), torch.tensor([1.0]))
>>> m.sample() # sample from a Kumaraswamy distribution with concentration alpha=1 and beta=1
tensor([ 0.1729])
Args:
concentration1 (float or Tensor): 1st concentration parameter of the distribution
(often referred to as alpha)
concentration0 (float or Tensor): 2nd concentration parameter of the distribution
(often referred to as beta)
"""
arg_constraints = {'concentration1': constraints.positive, 'concentration0': constraints.positive}
support = constraints.unit_interval
has_rsample = True
def __init__(self, concentration1, concentration0, validate_args=None):
self.concentration1, self.concentration0 = broadcast_all(concentration1, concentration0)
finfo = torch.finfo(self.concentration0.dtype)
base_dist = Uniform(torch.full_like(self.concentration0, 0),
torch.full_like(self.concentration0, 1),
validate_args=validate_args)
transforms = [PowerTransform(exponent=self.concentration0.reciprocal()),
AffineTransform(loc=1., scale=-1.),
PowerTransform(exponent=self.concentration1.reciprocal())]
super(Kumaraswamy, self).__init__(base_dist, transforms, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Kumaraswamy, _instance)
new.concentration1 = self.concentration1.expand(batch_shape)
new.concentration0 = self.concentration0.expand(batch_shape)
return super(Kumaraswamy, self).expand(batch_shape, _instance=new)
@property
def mean(self):
return _moments(self.concentration1, self.concentration0, 1)
@property
def mode(self):
# Evaluate in log-space for numerical stability.
log_mode = self.concentration0.reciprocal() * \
(-self.concentration0).log1p() - (-self.concentration0 * self.concentration1).log1p()
log_mode[(self.concentration0 < 1) | (self.concentration1 < 1)] = nan
return log_mode.exp()
@property
def variance(self):
return _moments(self.concentration1, self.concentration0, 2) - torch.pow(self.mean, 2)
def entropy(self):
t1 = (1 - self.concentration1.reciprocal())
t0 = (1 - self.concentration0.reciprocal())
H0 = torch.digamma(self.concentration0 + 1) + euler_constant
return t0 + t1 * H0 - torch.log(self.concentration1) - torch.log(self.concentration0)
| pytorch-master | torch/distributions/kumaraswamy.py |
import math
import torch
from torch._six import inf
from torch.distributions import constraints
from torch.distributions.transforms import AbsTransform
from torch.distributions.normal import Normal
from torch.distributions.transformed_distribution import TransformedDistribution
__all__ = ['HalfNormal']
class HalfNormal(TransformedDistribution):
r"""
Creates a half-normal distribution parameterized by `scale` where::
X ~ Normal(0, scale)
Y = |X| ~ HalfNormal(scale)
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = HalfNormal(torch.tensor([1.0]))
>>> m.sample() # half-normal distributed with scale=1
tensor([ 0.1046])
Args:
scale (float or Tensor): scale of the full Normal distribution
"""
arg_constraints = {'scale': constraints.positive}
support = constraints.nonnegative
has_rsample = True
def __init__(self, scale, validate_args=None):
base_dist = Normal(0, scale, validate_args=False)
super(HalfNormal, self).__init__(base_dist, AbsTransform(),
validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(HalfNormal, _instance)
return super(HalfNormal, self).expand(batch_shape, _instance=new)
@property
def scale(self):
return self.base_dist.scale
@property
def mean(self):
return self.scale * math.sqrt(2 / math.pi)
@property
def mode(self):
return torch.zeros_like(self.scale)
@property
def variance(self):
return self.scale.pow(2) * (1 - 2 / math.pi)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
log_prob = self.base_dist.log_prob(value) + math.log(2)
log_prob[value.expand(log_prob.shape) < 0] = -inf
return log_prob
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
return 2 * self.base_dist.cdf(value) - 1
def icdf(self, prob):
return self.base_dist.icdf((prob + 1) / 2)
def entropy(self):
return self.base_dist.entropy() - math.log(2)
| pytorch-master | torch/distributions/half_normal.py |
import torch
from torch.distributions import constraints
from torch.distributions.categorical import Categorical
from torch.distributions.utils import clamp_probs, broadcast_all
from torch.distributions.distribution import Distribution
from torch.distributions.transformed_distribution import TransformedDistribution
from torch.distributions.transforms import ExpTransform
__all__ = ['ExpRelaxedCategorical', 'RelaxedOneHotCategorical']
class ExpRelaxedCategorical(Distribution):
r"""
Creates a ExpRelaxedCategorical parameterized by
:attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both).
Returns the log of a point in the simplex. Based on the interface to
:class:`OneHotCategorical`.
Implementation based on [1].
See also: :func:`torch.distributions.OneHotCategorical`
Args:
temperature (Tensor): relaxation temperature
probs (Tensor): event probabilities
logits (Tensor): unnormalized log probability for each event
[1] The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables
(Maddison et al, 2017)
[2] Categorical Reparametrization with Gumbel-Softmax
(Jang et al, 2017)
"""
arg_constraints = {'probs': constraints.simplex,
'logits': constraints.real_vector}
support = constraints.real_vector # The true support is actually a submanifold of this.
has_rsample = True
def __init__(self, temperature, probs=None, logits=None, validate_args=None):
self._categorical = Categorical(probs, logits)
self.temperature = temperature
batch_shape = self._categorical.batch_shape
event_shape = self._categorical.param_shape[-1:]
super(ExpRelaxedCategorical, self).__init__(batch_shape, event_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(ExpRelaxedCategorical, _instance)
batch_shape = torch.Size(batch_shape)
new.temperature = self.temperature
new._categorical = self._categorical.expand(batch_shape)
super(ExpRelaxedCategorical, new).__init__(batch_shape, self.event_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def _new(self, *args, **kwargs):
return self._categorical._new(*args, **kwargs)
@property
def param_shape(self):
return self._categorical.param_shape
@property
def logits(self):
return self._categorical.logits
@property
def probs(self):
return self._categorical.probs
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
uniforms = clamp_probs(torch.rand(shape, dtype=self.logits.dtype, device=self.logits.device))
gumbels = -((-(uniforms.log())).log())
scores = (self.logits + gumbels) / self.temperature
return scores - scores.logsumexp(dim=-1, keepdim=True)
def log_prob(self, value):
K = self._categorical._num_events
if self._validate_args:
self._validate_sample(value)
logits, value = broadcast_all(self.logits, value)
log_scale = (torch.full_like(self.temperature, float(K)).lgamma() -
self.temperature.log().mul(-(K - 1)))
score = logits - value.mul(self.temperature)
score = (score - score.logsumexp(dim=-1, keepdim=True)).sum(-1)
return score + log_scale
class RelaxedOneHotCategorical(TransformedDistribution):
r"""
Creates a RelaxedOneHotCategorical distribution parametrized by
:attr:`temperature`, and either :attr:`probs` or :attr:`logits`.
This is a relaxed version of the :class:`OneHotCategorical` distribution, so
its samples are on simplex, and are reparametrizable.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = RelaxedOneHotCategorical(torch.tensor([2.2]),
... torch.tensor([0.1, 0.2, 0.3, 0.4]))
>>> m.sample()
tensor([ 0.1294, 0.2324, 0.3859, 0.2523])
Args:
temperature (Tensor): relaxation temperature
probs (Tensor): event probabilities
logits (Tensor): unnormalized log probability for each event
"""
arg_constraints = {'probs': constraints.simplex,
'logits': constraints.real_vector}
support = constraints.simplex
has_rsample = True
def __init__(self, temperature, probs=None, logits=None, validate_args=None):
base_dist = ExpRelaxedCategorical(temperature, probs, logits, validate_args=validate_args)
super(RelaxedOneHotCategorical, self).__init__(base_dist,
ExpTransform(),
validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(RelaxedOneHotCategorical, _instance)
return super(RelaxedOneHotCategorical, self).expand(batch_shape, _instance=new)
@property
def temperature(self):
return self.base_dist.temperature
@property
def logits(self):
return self.base_dist.logits
@property
def probs(self):
return self.base_dist.probs
| pytorch-master | torch/distributions/relaxed_categorical.py |
import math
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.multivariate_normal import _batch_mahalanobis, _batch_mv
from torch.distributions.utils import _standard_normal, lazy_property
__all__ = ['LowRankMultivariateNormal']
def _batch_capacitance_tril(W, D):
r"""
Computes Cholesky of :math:`I + W.T @ inv(D) @ W` for a batch of matrices :math:`W`
and a batch of vectors :math:`D`.
"""
m = W.size(-1)
Wt_Dinv = W.mT / D.unsqueeze(-2)
K = torch.matmul(Wt_Dinv, W).contiguous()
K.view(-1, m * m)[:, ::m + 1] += 1 # add identity matrix to K
return torch.linalg.cholesky(K)
def _batch_lowrank_logdet(W, D, capacitance_tril):
r"""
Uses "matrix determinant lemma"::
log|W @ W.T + D| = log|C| + log|D|,
where :math:`C` is the capacitance matrix :math:`I + W.T @ inv(D) @ W`, to compute
the log determinant.
"""
return 2 * capacitance_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) + D.log().sum(-1)
def _batch_lowrank_mahalanobis(W, D, x, capacitance_tril):
r"""
Uses "Woodbury matrix identity"::
inv(W @ W.T + D) = inv(D) - inv(D) @ W @ inv(C) @ W.T @ inv(D),
where :math:`C` is the capacitance matrix :math:`I + W.T @ inv(D) @ W`, to compute the squared
Mahalanobis distance :math:`x.T @ inv(W @ W.T + D) @ x`.
"""
Wt_Dinv = W.mT / D.unsqueeze(-2)
Wt_Dinv_x = _batch_mv(Wt_Dinv, x)
mahalanobis_term1 = (x.pow(2) / D).sum(-1)
mahalanobis_term2 = _batch_mahalanobis(capacitance_tril, Wt_Dinv_x)
return mahalanobis_term1 - mahalanobis_term2
class LowRankMultivariateNormal(Distribution):
r"""
Creates a multivariate normal distribution with covariance matrix having a low-rank form
parameterized by :attr:`cov_factor` and :attr:`cov_diag`::
covariance_matrix = cov_factor @ cov_factor.T + cov_diag
Example:
>>> # xdoctest: +REQUIRES(--lapack)
>>> m = LowRankMultivariateNormal(torch.zeros(2), torch.tensor([[1.], [0.]]), torch.ones(2))
>>> m.sample() # normally distributed with mean=`[0,0]`, cov_factor=`[[1],[0]]`, cov_diag=`[1,1]`
tensor([-0.2102, -0.5429])
Args:
loc (Tensor): mean of the distribution with shape `batch_shape + event_shape`
cov_factor (Tensor): factor part of low-rank form of covariance matrix with shape
`batch_shape + event_shape + (rank,)`
cov_diag (Tensor): diagonal part of low-rank form of covariance matrix with shape
`batch_shape + event_shape`
Note:
The computation for determinant and inverse of covariance matrix is avoided when
`cov_factor.shape[1] << cov_factor.shape[0]` thanks to `Woodbury matrix identity
<https://en.wikipedia.org/wiki/Woodbury_matrix_identity>`_ and
`matrix determinant lemma <https://en.wikipedia.org/wiki/Matrix_determinant_lemma>`_.
Thanks to these formulas, we just need to compute the determinant and inverse of
the small size "capacitance" matrix::
capacitance = I + cov_factor.T @ inv(cov_diag) @ cov_factor
"""
arg_constraints = {"loc": constraints.real_vector,
"cov_factor": constraints.independent(constraints.real, 2),
"cov_diag": constraints.independent(constraints.positive, 1)}
support = constraints.real_vector
has_rsample = True
def __init__(self, loc, cov_factor, cov_diag, validate_args=None):
if loc.dim() < 1:
raise ValueError("loc must be at least one-dimensional.")
event_shape = loc.shape[-1:]
if cov_factor.dim() < 2:
raise ValueError("cov_factor must be at least two-dimensional, "
"with optional leading batch dimensions")
if cov_factor.shape[-2:-1] != event_shape:
raise ValueError("cov_factor must be a batch of matrices with shape {} x m"
.format(event_shape[0]))
if cov_diag.shape[-1:] != event_shape:
raise ValueError("cov_diag must be a batch of vectors with shape {}".format(event_shape))
loc_ = loc.unsqueeze(-1)
cov_diag_ = cov_diag.unsqueeze(-1)
try:
loc_, self.cov_factor, cov_diag_ = torch.broadcast_tensors(loc_, cov_factor, cov_diag_)
except RuntimeError as e:
raise ValueError("Incompatible batch shapes: loc {}, cov_factor {}, cov_diag {}"
.format(loc.shape, cov_factor.shape, cov_diag.shape)) from e
self.loc = loc_[..., 0]
self.cov_diag = cov_diag_[..., 0]
batch_shape = self.loc.shape[:-1]
self._unbroadcasted_cov_factor = cov_factor
self._unbroadcasted_cov_diag = cov_diag
self._capacitance_tril = _batch_capacitance_tril(cov_factor, cov_diag)
super(LowRankMultivariateNormal, self).__init__(batch_shape, event_shape,
validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(LowRankMultivariateNormal, _instance)
batch_shape = torch.Size(batch_shape)
loc_shape = batch_shape + self.event_shape
new.loc = self.loc.expand(loc_shape)
new.cov_diag = self.cov_diag.expand(loc_shape)
new.cov_factor = self.cov_factor.expand(loc_shape + self.cov_factor.shape[-1:])
new._unbroadcasted_cov_factor = self._unbroadcasted_cov_factor
new._unbroadcasted_cov_diag = self._unbroadcasted_cov_diag
new._capacitance_tril = self._capacitance_tril
super(LowRankMultivariateNormal, new).__init__(batch_shape,
self.event_shape,
validate_args=False)
new._validate_args = self._validate_args
return new
@property
def mean(self):
return self.loc
@property
def mode(self):
return self.loc
@lazy_property
def variance(self):
return (self._unbroadcasted_cov_factor.pow(2).sum(-1)
+ self._unbroadcasted_cov_diag).expand(self._batch_shape + self._event_shape)
@lazy_property
def scale_tril(self):
# The following identity is used to increase the numerically computation stability
# for Cholesky decomposition (see http://www.gaussianprocess.org/gpml/, Section 3.4.3):
# W @ W.T + D = D1/2 @ (I + D-1/2 @ W @ W.T @ D-1/2) @ D1/2
# The matrix "I + D-1/2 @ W @ W.T @ D-1/2" has eigenvalues bounded from below by 1,
# hence it is well-conditioned and safe to take Cholesky decomposition.
n = self._event_shape[0]
cov_diag_sqrt_unsqueeze = self._unbroadcasted_cov_diag.sqrt().unsqueeze(-1)
Dinvsqrt_W = self._unbroadcasted_cov_factor / cov_diag_sqrt_unsqueeze
K = torch.matmul(Dinvsqrt_W, Dinvsqrt_W.mT).contiguous()
K.view(-1, n * n)[:, ::n + 1] += 1 # add identity matrix to K
scale_tril = cov_diag_sqrt_unsqueeze * torch.linalg.cholesky(K)
return scale_tril.expand(self._batch_shape + self._event_shape + self._event_shape)
@lazy_property
def covariance_matrix(self):
covariance_matrix = (torch.matmul(self._unbroadcasted_cov_factor,
self._unbroadcasted_cov_factor.mT)
+ torch.diag_embed(self._unbroadcasted_cov_diag))
return covariance_matrix.expand(self._batch_shape + self._event_shape +
self._event_shape)
@lazy_property
def precision_matrix(self):
# We use "Woodbury matrix identity" to take advantage of low rank form::
# inv(W @ W.T + D) = inv(D) - inv(D) @ W @ inv(C) @ W.T @ inv(D)
# where :math:`C` is the capacitance matrix.
Wt_Dinv = (self._unbroadcasted_cov_factor.mT
/ self._unbroadcasted_cov_diag.unsqueeze(-2))
A = torch.linalg.solve_triangular(self._capacitance_tril, Wt_Dinv, upper=False)
precision_matrix = torch.diag_embed(self._unbroadcasted_cov_diag.reciprocal()) - A.mT @ A
return precision_matrix.expand(self._batch_shape + self._event_shape +
self._event_shape)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
W_shape = shape[:-1] + self.cov_factor.shape[-1:]
eps_W = _standard_normal(W_shape, dtype=self.loc.dtype, device=self.loc.device)
eps_D = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device)
return (self.loc + _batch_mv(self._unbroadcasted_cov_factor, eps_W)
+ self._unbroadcasted_cov_diag.sqrt() * eps_D)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
diff = value - self.loc
M = _batch_lowrank_mahalanobis(self._unbroadcasted_cov_factor,
self._unbroadcasted_cov_diag,
diff,
self._capacitance_tril)
log_det = _batch_lowrank_logdet(self._unbroadcasted_cov_factor,
self._unbroadcasted_cov_diag,
self._capacitance_tril)
return -0.5 * (self._event_shape[0] * math.log(2 * math.pi) + log_det + M)
def entropy(self):
log_det = _batch_lowrank_logdet(self._unbroadcasted_cov_factor,
self._unbroadcasted_cov_diag,
self._capacitance_tril)
H = 0.5 * (self._event_shape[0] * (1.0 + math.log(2 * math.pi)) + log_det)
if len(self._batch_shape) == 0:
return H
else:
return H.expand(self._batch_shape)
| pytorch-master | torch/distributions/lowrank_multivariate_normal.py |
import math
import torch
from torch._six import inf
from torch.distributions import constraints
from torch.distributions.transforms import AbsTransform
from torch.distributions.cauchy import Cauchy
from torch.distributions.transformed_distribution import TransformedDistribution
__all__ = ['HalfCauchy']
class HalfCauchy(TransformedDistribution):
r"""
Creates a half-Cauchy distribution parameterized by `scale` where::
X ~ Cauchy(0, scale)
Y = |X| ~ HalfCauchy(scale)
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = HalfCauchy(torch.tensor([1.0]))
>>> m.sample() # half-cauchy distributed with scale=1
tensor([ 2.3214])
Args:
scale (float or Tensor): scale of the full Cauchy distribution
"""
arg_constraints = {'scale': constraints.positive}
support = constraints.nonnegative
has_rsample = True
def __init__(self, scale, validate_args=None):
base_dist = Cauchy(0, scale, validate_args=False)
super(HalfCauchy, self).__init__(base_dist, AbsTransform(),
validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(HalfCauchy, _instance)
return super(HalfCauchy, self).expand(batch_shape, _instance=new)
@property
def scale(self):
return self.base_dist.scale
@property
def mean(self):
return torch.full(self._extended_shape(), math.inf, dtype=self.scale.dtype, device=self.scale.device)
@property
def mode(self):
return torch.zeros_like(self.scale)
@property
def variance(self):
return self.base_dist.variance
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
value = torch.as_tensor(value, dtype=self.base_dist.scale.dtype,
device=self.base_dist.scale.device)
log_prob = self.base_dist.log_prob(value) + math.log(2)
log_prob[value.expand(log_prob.shape) < 0] = -inf
return log_prob
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
return 2 * self.base_dist.cdf(value) - 1
def icdf(self, prob):
return self.base_dist.icdf((prob + 1) / 2)
def entropy(self):
return self.base_dist.entropy() - math.log(2)
| pytorch-master | torch/distributions/half_cauchy.py |
r"""
The ``distributions`` package contains parameterizable probability distributions
and sampling functions. This allows the construction of stochastic computation
graphs and stochastic gradient estimators for optimization. This package
generally follows the design of the `TensorFlow Distributions`_ package.
.. _`TensorFlow Distributions`:
https://arxiv.org/abs/1711.10604
It is not possible to directly backpropagate through random samples. However,
there are two main methods for creating surrogate functions that can be
backpropagated through. These are the score function estimator/likelihood ratio
estimator/REINFORCE and the pathwise derivative estimator. REINFORCE is commonly
seen as the basis for policy gradient methods in reinforcement learning, and the
pathwise derivative estimator is commonly seen in the reparameterization trick
in variational autoencoders. Whilst the score function only requires the value
of samples :math:`f(x)`, the pathwise derivative requires the derivative
:math:`f'(x)`. The next sections discuss these two in a reinforcement learning
example. For more details see
`Gradient Estimation Using Stochastic Computation Graphs`_ .
.. _`Gradient Estimation Using Stochastic Computation Graphs`:
https://arxiv.org/abs/1506.05254
Score function
^^^^^^^^^^^^^^
When the probability density function is differentiable with respect to its
parameters, we only need :meth:`~torch.distributions.Distribution.sample` and
:meth:`~torch.distributions.Distribution.log_prob` to implement REINFORCE:
.. math::
\Delta\theta = \alpha r \frac{\partial\log p(a|\pi^\theta(s))}{\partial\theta}
where :math:`\theta` are the parameters, :math:`\alpha` is the learning rate,
:math:`r` is the reward and :math:`p(a|\pi^\theta(s))` is the probability of
taking action :math:`a` in state :math:`s` given policy :math:`\pi^\theta`.
In practice we would sample an action from the output of a network, apply this
action in an environment, and then use ``log_prob`` to construct an equivalent
loss function. Note that we use a negative because optimizers use gradient
descent, whilst the rule above assumes gradient ascent. With a categorical
policy, the code for implementing REINFORCE would be as follows::
probs = policy_network(state)
# Note that this is equivalent to what used to be called multinomial
m = Categorical(probs)
action = m.sample()
next_state, reward = env.step(action)
loss = -m.log_prob(action) * reward
loss.backward()
Pathwise derivative
^^^^^^^^^^^^^^^^^^^
The other way to implement these stochastic/policy gradients would be to use the
reparameterization trick from the
:meth:`~torch.distributions.Distribution.rsample` method, where the
parameterized random variable can be constructed via a parameterized
deterministic function of a parameter-free random variable. The reparameterized
sample therefore becomes differentiable. The code for implementing the pathwise
derivative would be as follows::
params = policy_network(state)
m = Normal(*params)
# Any distribution with .has_rsample == True could work based on the application
action = m.rsample()
next_state, reward = env.step(action) # Assuming that reward is differentiable
loss = -reward
loss.backward()
"""
from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .chi2 import Chi2
from .constraint_registry import biject_to, transform_to
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exp_family import ExponentialFamily
from .exponential import Exponential
from .fishersnedecor import FisherSnedecor
from .gamma import Gamma
from .geometric import Geometric
from .gumbel import Gumbel
from .half_cauchy import HalfCauchy
from .half_normal import HalfNormal
from .independent import Independent
from .kl import kl_divergence, register_kl, _add_kl_info
from .kumaraswamy import Kumaraswamy
from .laplace import Laplace
from .lkj_cholesky import LKJCholesky
from .log_normal import LogNormal
from .logistic_normal import LogisticNormal
from .lowrank_multivariate_normal import LowRankMultivariateNormal
from .mixture_same_family import MixtureSameFamily
from .multinomial import Multinomial
from .multivariate_normal import MultivariateNormal
from .negative_binomial import NegativeBinomial
from .normal import Normal
from .one_hot_categorical import OneHotCategorical, OneHotCategoricalStraightThrough
from .pareto import Pareto
from .poisson import Poisson
from .relaxed_bernoulli import RelaxedBernoulli
from .relaxed_categorical import RelaxedOneHotCategorical
from .studentT import StudentT
from .transformed_distribution import TransformedDistribution
from .transforms import * # noqa: F403
from .uniform import Uniform
from .von_mises import VonMises
from .weibull import Weibull
from .wishart import Wishart
from . import transforms
_add_kl_info()
del _add_kl_info
__all__ = [
'Bernoulli',
'Beta',
'Binomial',
'Categorical',
'Cauchy',
'Chi2',
'ContinuousBernoulli',
'Dirichlet',
'Distribution',
'Exponential',
'ExponentialFamily',
'FisherSnedecor',
'Gamma',
'Geometric',
'Gumbel',
'HalfCauchy',
'HalfNormal',
'Independent',
'Kumaraswamy',
'LKJCholesky',
'Laplace',
'LogNormal',
'LogisticNormal',
'LowRankMultivariateNormal',
'MixtureSameFamily',
'Multinomial',
'MultivariateNormal',
'NegativeBinomial',
'Normal',
'OneHotCategorical',
'OneHotCategoricalStraightThrough',
'Pareto',
'RelaxedBernoulli',
'RelaxedOneHotCategorical',
'StudentT',
'Poisson',
'Uniform',
'VonMises',
'Weibull',
'Wishart',
'TransformedDistribution',
'biject_to',
'kl_divergence',
'register_kl',
'transform_to',
]
__all__.extend(transforms.__all__)
| pytorch-master | torch/distributions/__init__.py |
"""
This closely follows the implementation in NumPyro (https://github.com/pyro-ppl/numpyro).
Original copyright notice:
# Copyright: Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
import math
import torch
from torch.distributions import constraints, Beta
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all
__all__ = ['LKJCholesky']
class LKJCholesky(Distribution):
r"""
LKJ distribution for lower Cholesky factor of correlation matrices.
The distribution is controlled by ``concentration`` parameter :math:`\eta`
to make the probability of the correlation matrix :math:`M` generated from
a Cholesky factor propotional to :math:`\det(M)^{\eta - 1}`. Because of that,
when ``concentration == 1``, we have a uniform distribution over Cholesky
factors of correlation matrices. Note that this distribution samples the
Cholesky factor of correlation matrices and not the correlation matrices
themselves and thereby differs slightly from the derivations in [1] for
the `LKJCorr` distribution. For sampling, this uses the Onion method from
[1] Section 3.
L ~ LKJCholesky(dim, concentration)
X = L @ L' ~ LKJCorr(dim, concentration)
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> l = LKJCholesky(3, 0.5)
>>> l.sample() # l @ l.T is a sample of a correlation 3x3 matrix
tensor([[ 1.0000, 0.0000, 0.0000],
[ 0.3516, 0.9361, 0.0000],
[-0.1899, 0.4748, 0.8593]])
Args:
dimension (dim): dimension of the matrices
concentration (float or Tensor): concentration/shape parameter of the
distribution (often referred to as eta)
**References**
[1] `Generating random correlation matrices based on vines and extended onion method`,
Daniel Lewandowski, Dorota Kurowicka, Harry Joe.
"""
arg_constraints = {'concentration': constraints.positive}
support = constraints.corr_cholesky
def __init__(self, dim, concentration=1., validate_args=None):
if dim < 2:
raise ValueError(f'Expected dim to be an integer greater than or equal to 2. Found dim={dim}.')
self.dim = dim
self.concentration, = broadcast_all(concentration)
batch_shape = self.concentration.size()
event_shape = torch.Size((dim, dim))
# This is used to draw vectorized samples from the beta distribution in Sec. 3.2 of [1].
marginal_conc = self.concentration + 0.5 * (self.dim - 2)
offset = torch.arange(self.dim - 1, dtype=self.concentration.dtype, device=self.concentration.device)
offset = torch.cat([offset.new_zeros((1,)), offset])
beta_conc1 = offset + 0.5
beta_conc0 = marginal_conc.unsqueeze(-1) - 0.5 * offset
self._beta = Beta(beta_conc1, beta_conc0)
super(LKJCholesky, self).__init__(batch_shape, event_shape, validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(LKJCholesky, _instance)
batch_shape = torch.Size(batch_shape)
new.dim = self.dim
new.concentration = self.concentration.expand(batch_shape)
new._beta = self._beta.expand(batch_shape + (self.dim,))
super(LKJCholesky, new).__init__(batch_shape, self.event_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def sample(self, sample_shape=torch.Size()):
# This uses the Onion method, but there are a few differences from [1] Sec. 3.2:
# - This vectorizes the for loop and also works for heterogeneous eta.
# - Same algorithm generalizes to n=1.
# - The procedure is simplified since we are sampling the cholesky factor of
# the correlation matrix instead of the correlation matrix itself. As such,
# we only need to generate `w`.
y = self._beta.sample(sample_shape).unsqueeze(-1)
u_normal = torch.randn(self._extended_shape(sample_shape),
dtype=y.dtype,
device=y.device).tril(-1)
u_hypersphere = u_normal / u_normal.norm(dim=-1, keepdim=True)
# Replace NaNs in first row
u_hypersphere[..., 0, :].fill_(0.)
w = torch.sqrt(y) * u_hypersphere
# Fill diagonal elements; clamp for numerical stability
eps = torch.finfo(w.dtype).tiny
diag_elems = torch.clamp(1 - torch.sum(w**2, dim=-1), min=eps).sqrt()
w += torch.diag_embed(diag_elems)
return w
def log_prob(self, value):
# See: https://mc-stan.org/docs/2_25/functions-reference/cholesky-lkj-correlation-distribution.html
# The probability of a correlation matrix is proportional to
# determinant ** (concentration - 1) = prod(L_ii ^ 2(concentration - 1))
# Additionally, the Jacobian of the transformation from Cholesky factor to
# correlation matrix is:
# prod(L_ii ^ (D - i))
# So the probability of a Cholesky factor is propotional to
# prod(L_ii ^ (2 * concentration - 2 + D - i)) = prod(L_ii ^ order_i)
# with order_i = 2 * concentration - 2 + D - i
if self._validate_args:
self._validate_sample(value)
diag_elems = value.diagonal(dim1=-1, dim2=-2)[..., 1:]
order = torch.arange(2, self.dim + 1, device=self.concentration.device)
order = 2 * (self.concentration - 1).unsqueeze(-1) + self.dim - order
unnormalized_log_pdf = torch.sum(order * diag_elems.log(), dim=-1)
# Compute normalization constant (page 1999 of [1])
dm1 = self.dim - 1
alpha = self.concentration + 0.5 * dm1
denominator = torch.lgamma(alpha) * dm1
numerator = torch.mvlgamma(alpha - 0.5, dm1)
# pi_constant in [1] is D * (D - 1) / 4 * log(pi)
# pi_constant in multigammaln is (D - 1) * (D - 2) / 4 * log(pi)
# hence, we need to add a pi_constant = (D - 1) * log(pi) / 2
pi_constant = 0.5 * dm1 * math.log(math.pi)
normalize_term = pi_constant + numerator - denominator
return unnormalized_log_pdf - normalize_term
| pytorch-master | torch/distributions/lkj_cholesky.py |
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import _sum_rightmost
from typing import Dict
__all__ = ['Independent']
class Independent(Distribution):
r"""
Reinterprets some of the batch dims of a distribution as event dims.
This is mainly useful for changing the shape of the result of
:meth:`log_prob`. For example to create a diagonal Normal distribution with
the same shape as a Multivariate Normal distribution (so they are
interchangeable), you can::
>>> from torch.distributions.multivariate_normal import MultivariateNormal
>>> from torch.distributions.normal import Normal
>>> loc = torch.zeros(3)
>>> scale = torch.ones(3)
>>> mvn = MultivariateNormal(loc, scale_tril=torch.diag(scale))
>>> [mvn.batch_shape, mvn.event_shape]
[torch.Size([]), torch.Size([3])]
>>> normal = Normal(loc, scale)
>>> [normal.batch_shape, normal.event_shape]
[torch.Size([3]), torch.Size([])]
>>> diagn = Independent(normal, 1)
>>> [diagn.batch_shape, diagn.event_shape]
[torch.Size([]), torch.Size([3])]
Args:
base_distribution (torch.distributions.distribution.Distribution): a
base distribution
reinterpreted_batch_ndims (int): the number of batch dims to
reinterpret as event dims
"""
arg_constraints: Dict[str, constraints.Constraint] = {}
def __init__(self, base_distribution, reinterpreted_batch_ndims, validate_args=None):
if reinterpreted_batch_ndims > len(base_distribution.batch_shape):
raise ValueError("Expected reinterpreted_batch_ndims <= len(base_distribution.batch_shape), "
"actual {} vs {}".format(reinterpreted_batch_ndims,
len(base_distribution.batch_shape)))
shape = base_distribution.batch_shape + base_distribution.event_shape
event_dim = reinterpreted_batch_ndims + len(base_distribution.event_shape)
batch_shape = shape[:len(shape) - event_dim]
event_shape = shape[len(shape) - event_dim:]
self.base_dist = base_distribution
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
super(Independent, self).__init__(batch_shape, event_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Independent, _instance)
batch_shape = torch.Size(batch_shape)
new.base_dist = self.base_dist.expand(batch_shape +
self.event_shape[:self.reinterpreted_batch_ndims])
new.reinterpreted_batch_ndims = self.reinterpreted_batch_ndims
super(Independent, new).__init__(batch_shape, self.event_shape, validate_args=False)
new._validate_args = self._validate_args
return new
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
if self.reinterpreted_batch_ndims > 0:
return False
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
result = self.base_dist.support
if self.reinterpreted_batch_ndims:
result = constraints.independent(result, self.reinterpreted_batch_ndims)
return result
@property
def mean(self):
return self.base_dist.mean
@property
def mode(self):
return self.base_dist.mode
@property
def variance(self):
return self.base_dist.variance
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape)
def log_prob(self, value):
log_prob = self.base_dist.log_prob(value)
return _sum_rightmost(log_prob, self.reinterpreted_batch_ndims)
def entropy(self):
entropy = self.base_dist.entropy()
return _sum_rightmost(entropy, self.reinterpreted_batch_ndims)
def enumerate_support(self, expand=True):
if self.reinterpreted_batch_ndims > 0:
raise NotImplementedError("Enumeration over cartesian product is not implemented")
return self.base_dist.enumerate_support(expand=expand)
def __repr__(self):
return self.__class__.__name__ + '({}, {})'.format(self.base_dist, self.reinterpreted_batch_ndims)
| pytorch-master | torch/distributions/independent.py |
import torch
from torch._six import inf
from torch.distributions.binomial import Binomial
from torch.distributions.distribution import Distribution
from torch.distributions import Categorical
from torch.distributions import constraints
from torch.distributions.utils import broadcast_all
__all__ = ['Multinomial']
class Multinomial(Distribution):
r"""
Creates a Multinomial distribution parameterized by :attr:`total_count` and
either :attr:`probs` or :attr:`logits` (but not both). The innermost dimension of
:attr:`probs` indexes over categories. All other dimensions index over batches.
Note that :attr:`total_count` need not be specified if only :meth:`log_prob` is
called (see example below)
.. note:: The `probs` argument must be non-negative, finite and have a non-zero sum,
and it will be normalized to sum to 1 along the last dimension. :attr:`probs`
will return this normalized value.
The `logits` argument will be interpreted as unnormalized log probabilities
and can therefore be any real number. It will likewise be normalized so that
the resulting probabilities sum to 1 along the last dimension. :attr:`logits`
will return this normalized value.
- :meth:`sample` requires a single shared `total_count` for all
parameters and samples.
- :meth:`log_prob` allows different `total_count` for each parameter and
sample.
Example::
>>> # xdoctest: +SKIP("FIXME: found invalid values")
>>> m = Multinomial(100, torch.tensor([ 1., 1., 1., 1.]))
>>> x = m.sample() # equal probability of 0, 1, 2, 3
tensor([ 21., 24., 30., 25.])
>>> Multinomial(probs=torch.tensor([1., 1., 1., 1.])).log_prob(x)
tensor([-4.1338])
Args:
total_count (int): number of trials
probs (Tensor): event probabilities
logits (Tensor): event log probabilities (unnormalized)
"""
arg_constraints = {'probs': constraints.simplex,
'logits': constraints.real_vector}
total_count: int
@property
def mean(self):
return self.probs * self.total_count
@property
def variance(self):
return self.total_count * self.probs * (1 - self.probs)
def __init__(self, total_count=1, probs=None, logits=None, validate_args=None):
if not isinstance(total_count, int):
raise NotImplementedError('inhomogeneous total_count is not supported')
self.total_count = total_count
self._categorical = Categorical(probs=probs, logits=logits)
self._binomial = Binomial(total_count=total_count, probs=self.probs)
batch_shape = self._categorical.batch_shape
event_shape = self._categorical.param_shape[-1:]
super(Multinomial, self).__init__(batch_shape, event_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Multinomial, _instance)
batch_shape = torch.Size(batch_shape)
new.total_count = self.total_count
new._categorical = self._categorical.expand(batch_shape)
super(Multinomial, new).__init__(batch_shape, self.event_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def _new(self, *args, **kwargs):
return self._categorical._new(*args, **kwargs)
@constraints.dependent_property(is_discrete=True, event_dim=1)
def support(self):
return constraints.multinomial(self.total_count)
@property
def logits(self):
return self._categorical.logits
@property
def probs(self):
return self._categorical.probs
@property
def param_shape(self):
return self._categorical.param_shape
def sample(self, sample_shape=torch.Size()):
sample_shape = torch.Size(sample_shape)
samples = self._categorical.sample(torch.Size((self.total_count,)) + sample_shape)
# samples.shape is (total_count, sample_shape, batch_shape), need to change it to
# (sample_shape, batch_shape, total_count)
shifted_idx = list(range(samples.dim()))
shifted_idx.append(shifted_idx.pop(0))
samples = samples.permute(*shifted_idx)
counts = samples.new(self._extended_shape(sample_shape)).zero_()
counts.scatter_add_(-1, samples, torch.ones_like(samples))
return counts.type_as(self.probs)
def entropy(self):
n = torch.tensor(self.total_count)
cat_entropy = self._categorical.entropy()
term1 = n * cat_entropy - torch.lgamma(n + 1)
support = self._binomial.enumerate_support(expand=False)[1:]
binomial_probs = torch.exp(self._binomial.log_prob(support))
weights = torch.lgamma(support + 1)
term2 = (binomial_probs * weights).sum([0, -1])
return term1 + term2
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
logits, value = broadcast_all(self.logits, value)
logits = logits.clone(memory_format=torch.contiguous_format)
log_factorial_n = torch.lgamma(value.sum(-1) + 1)
log_factorial_xs = torch.lgamma(value + 1).sum(-1)
logits[(value == 0) & (logits == -inf)] = 0
log_powers = (logits * value).sum(-1)
return log_factorial_n - log_factorial_xs + log_powers
| pytorch-master | torch/distributions/multinomial.py |
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.exp_family import ExponentialFamily
from torch.distributions.utils import broadcast_all
__all__ = ['Exponential']
class Exponential(ExponentialFamily):
r"""
Creates a Exponential distribution parameterized by :attr:`rate`.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = Exponential(torch.tensor([1.0]))
>>> m.sample() # Exponential distributed with rate=1
tensor([ 0.1046])
Args:
rate (float or Tensor): rate = 1 / scale of the distribution
"""
arg_constraints = {'rate': constraints.positive}
support = constraints.nonnegative
has_rsample = True
_mean_carrier_measure = 0
@property
def mean(self):
return self.rate.reciprocal()
@property
def mode(self):
return torch.zeros_like(self.rate)
@property
def stddev(self):
return self.rate.reciprocal()
@property
def variance(self):
return self.rate.pow(-2)
def __init__(self, rate, validate_args=None):
self.rate, = broadcast_all(rate)
batch_shape = torch.Size() if isinstance(rate, Number) else self.rate.size()
super(Exponential, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Exponential, _instance)
batch_shape = torch.Size(batch_shape)
new.rate = self.rate.expand(batch_shape)
super(Exponential, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
if torch._C._get_tracing_state():
# [JIT WORKAROUND] lack of support for ._exponential()
u = torch.rand(shape, dtype=self.rate.dtype, device=self.rate.device)
return -(-u).log1p() / self.rate
return self.rate.new(shape).exponential_() / self.rate
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
return self.rate.log() - self.rate * value
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
return 1 - torch.exp(-self.rate * value)
def icdf(self, value):
return -torch.log(1 - value) / self.rate
def entropy(self):
return 1.0 - torch.log(self.rate)
@property
def _natural_params(self):
return (-self.rate, )
def _log_normalizer(self, x):
return -torch.log(-x)
| pytorch-master | torch/distributions/exponential.py |
from torch.distributions import constraints
from torch.distributions.exponential import Exponential
from torch.distributions.transformed_distribution import TransformedDistribution
from torch.distributions.transforms import AffineTransform, ExpTransform
from torch.distributions.utils import broadcast_all
__all__ = ['Pareto']
class Pareto(TransformedDistribution):
r"""
Samples from a Pareto Type 1 distribution.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = Pareto(torch.tensor([1.0]), torch.tensor([1.0]))
>>> m.sample() # sample from a Pareto distribution with scale=1 and alpha=1
tensor([ 1.5623])
Args:
scale (float or Tensor): Scale parameter of the distribution
alpha (float or Tensor): Shape parameter of the distribution
"""
arg_constraints = {'alpha': constraints.positive, 'scale': constraints.positive}
def __init__(self, scale, alpha, validate_args=None):
self.scale, self.alpha = broadcast_all(scale, alpha)
base_dist = Exponential(self.alpha, validate_args=validate_args)
transforms = [ExpTransform(), AffineTransform(loc=0, scale=self.scale)]
super(Pareto, self).__init__(base_dist, transforms, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Pareto, _instance)
new.scale = self.scale.expand(batch_shape)
new.alpha = self.alpha.expand(batch_shape)
return super(Pareto, self).expand(batch_shape, _instance=new)
@property
def mean(self):
# mean is inf for alpha <= 1
a = self.alpha.clamp(min=1)
return a * self.scale / (a - 1)
@property
def mode(self):
return self.scale
@property
def variance(self):
# var is inf for alpha <= 2
a = self.alpha.clamp(min=2)
return self.scale.pow(2) * a / ((a - 1).pow(2) * (a - 2))
@constraints.dependent_property(is_discrete=False, event_dim=0)
def support(self):
return constraints.greater_than_eq(self.scale)
def entropy(self):
return ((self.scale / self.alpha).log() + (1 + self.alpha.reciprocal()))
| pytorch-master | torch/distributions/pareto.py |
import torch
import torch.nn.functional as F
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all, probs_to_logits, lazy_property, logits_to_probs
__all__ = ['NegativeBinomial']
class NegativeBinomial(Distribution):
r"""
Creates a Negative Binomial distribution, i.e. distribution
of the number of successful independent and identical Bernoulli trials
before :attr:`total_count` failures are achieved. The probability
of success of each Bernoulli trial is :attr:`probs`.
Args:
total_count (float or Tensor): non-negative number of negative Bernoulli
trials to stop, although the distribution is still valid for real
valued count
probs (Tensor): Event probabilities of success in the half open interval [0, 1)
logits (Tensor): Event log-odds for probabilities of success
"""
arg_constraints = {'total_count': constraints.greater_than_eq(0),
'probs': constraints.half_open_interval(0., 1.),
'logits': constraints.real}
support = constraints.nonnegative_integer
def __init__(self, total_count, probs=None, logits=None, validate_args=None):
if (probs is None) == (logits is None):
raise ValueError("Either `probs` or `logits` must be specified, but not both.")
if probs is not None:
self.total_count, self.probs, = broadcast_all(total_count, probs)
self.total_count = self.total_count.type_as(self.probs)
else:
self.total_count, self.logits, = broadcast_all(total_count, logits)
self.total_count = self.total_count.type_as(self.logits)
self._param = self.probs if probs is not None else self.logits
batch_shape = self._param.size()
super(NegativeBinomial, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(NegativeBinomial, _instance)
batch_shape = torch.Size(batch_shape)
new.total_count = self.total_count.expand(batch_shape)
if 'probs' in self.__dict__:
new.probs = self.probs.expand(batch_shape)
new._param = new.probs
if 'logits' in self.__dict__:
new.logits = self.logits.expand(batch_shape)
new._param = new.logits
super(NegativeBinomial, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def _new(self, *args, **kwargs):
return self._param.new(*args, **kwargs)
@property
def mean(self):
return self.total_count * torch.exp(self.logits)
@property
def mode(self):
return ((self.total_count - 1) * self.logits.exp()).floor().clamp(min=0.)
@property
def variance(self):
return self.mean / torch.sigmoid(-self.logits)
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
@property
def param_shape(self):
return self._param.size()
@lazy_property
def _gamma(self):
# Note we avoid validating because self.total_count can be zero.
return torch.distributions.Gamma(concentration=self.total_count,
rate=torch.exp(-self.logits),
validate_args=False)
def sample(self, sample_shape=torch.Size()):
with torch.no_grad():
rate = self._gamma.sample(sample_shape=sample_shape)
return torch.poisson(rate)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
log_unnormalized_prob = (self.total_count * F.logsigmoid(-self.logits) +
value * F.logsigmoid(self.logits))
log_normalization = (-torch.lgamma(self.total_count + value) + torch.lgamma(1. + value) +
torch.lgamma(self.total_count))
log_normalization[self.total_count + value == 0.] = 0.
return log_unnormalized_prob - log_normalization
| pytorch-master | torch/distributions/negative_binomial.py |
import math
from torch._six import inf, nan
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all
__all__ = ['Cauchy']
class Cauchy(Distribution):
r"""
Samples from a Cauchy (Lorentz) distribution. The distribution of the ratio of
independent normally distributed random variables with means `0` follows a
Cauchy distribution.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = Cauchy(torch.tensor([0.0]), torch.tensor([1.0]))
>>> m.sample() # sample from a Cauchy distribution with loc=0 and scale=1
tensor([ 2.3214])
Args:
loc (float or Tensor): mode or median of the distribution.
scale (float or Tensor): half width at half maximum.
"""
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}
support = constraints.real
has_rsample = True
def __init__(self, loc, scale, validate_args=None):
self.loc, self.scale = broadcast_all(loc, scale)
if isinstance(loc, Number) and isinstance(scale, Number):
batch_shape = torch.Size()
else:
batch_shape = self.loc.size()
super(Cauchy, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Cauchy, _instance)
batch_shape = torch.Size(batch_shape)
new.loc = self.loc.expand(batch_shape)
new.scale = self.scale.expand(batch_shape)
super(Cauchy, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
@property
def mean(self):
return torch.full(self._extended_shape(), nan, dtype=self.loc.dtype, device=self.loc.device)
@property
def mode(self):
return self.loc
@property
def variance(self):
return torch.full(self._extended_shape(), inf, dtype=self.loc.dtype, device=self.loc.device)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
eps = self.loc.new(shape).cauchy_()
return self.loc + eps * self.scale
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
return -math.log(math.pi) - self.scale.log() - (1 + ((value - self.loc) / self.scale)**2).log()
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
return torch.atan((value - self.loc) / self.scale) / math.pi + 0.5
def icdf(self, value):
return torch.tan(math.pi * (value - 0.5)) * self.scale + self.loc
def entropy(self):
return math.log(4 * math.pi) + self.scale.log()
| pytorch-master | torch/distributions/cauchy.py |
import math
import torch
import torch.jit
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all, lazy_property
__all__ = ['VonMises']
def _eval_poly(y, coef):
coef = list(coef)
result = coef.pop()
while coef:
result = coef.pop() + y * result
return result
_I0_COEF_SMALL = [1.0, 3.5156229, 3.0899424, 1.2067492, 0.2659732, 0.360768e-1, 0.45813e-2]
_I0_COEF_LARGE = [0.39894228, 0.1328592e-1, 0.225319e-2, -0.157565e-2, 0.916281e-2,
-0.2057706e-1, 0.2635537e-1, -0.1647633e-1, 0.392377e-2]
_I1_COEF_SMALL = [0.5, 0.87890594, 0.51498869, 0.15084934, 0.2658733e-1, 0.301532e-2, 0.32411e-3]
_I1_COEF_LARGE = [0.39894228, -0.3988024e-1, -0.362018e-2, 0.163801e-2, -0.1031555e-1,
0.2282967e-1, -0.2895312e-1, 0.1787654e-1, -0.420059e-2]
_COEF_SMALL = [_I0_COEF_SMALL, _I1_COEF_SMALL]
_COEF_LARGE = [_I0_COEF_LARGE, _I1_COEF_LARGE]
def _log_modified_bessel_fn(x, order=0):
"""
Returns ``log(I_order(x))`` for ``x > 0``,
where `order` is either 0 or 1.
"""
assert order == 0 or order == 1
# compute small solution
y = (x / 3.75)
y = y * y
small = _eval_poly(y, _COEF_SMALL[order])
if order == 1:
small = x.abs() * small
small = small.log()
# compute large solution
y = 3.75 / x
large = x - 0.5 * x.log() + _eval_poly(y, _COEF_LARGE[order]).log()
result = torch.where(x < 3.75, small, large)
return result
@torch.jit.script_if_tracing
def _rejection_sample(loc, concentration, proposal_r, x):
done = torch.zeros(x.shape, dtype=torch.bool, device=loc.device)
while not done.all():
u = torch.rand((3,) + x.shape, dtype=loc.dtype, device=loc.device)
u1, u2, u3 = u.unbind()
z = torch.cos(math.pi * u1)
f = (1 + proposal_r * z) / (proposal_r + z)
c = concentration * (proposal_r - f)
accept = ((c * (2 - c) - u2) > 0) | ((c / u2).log() + 1 - c >= 0)
if accept.any():
x = torch.where(accept, (u3 - 0.5).sign() * f.acos(), x)
done = done | accept
return (x + math.pi + loc) % (2 * math.pi) - math.pi
class VonMises(Distribution):
"""
A circular von Mises distribution.
This implementation uses polar coordinates. The ``loc`` and ``value`` args
can be any real number (to facilitate unconstrained optimization), but are
interpreted as angles modulo 2 pi.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = VonMises(torch.tensor([1.0]), torch.tensor([1.0]))
>>> m.sample() # von Mises distributed with loc=1 and concentration=1
tensor([1.9777])
:param torch.Tensor loc: an angle in radians.
:param torch.Tensor concentration: concentration parameter
"""
arg_constraints = {'loc': constraints.real, 'concentration': constraints.positive}
support = constraints.real
has_rsample = False
def __init__(self, loc, concentration, validate_args=None):
self.loc, self.concentration = broadcast_all(loc, concentration)
batch_shape = self.loc.shape
event_shape = torch.Size()
# Parameters for sampling
tau = 1 + (1 + 4 * self.concentration ** 2).sqrt()
rho = (tau - (2 * tau).sqrt()) / (2 * self.concentration)
self._proposal_r = (1 + rho ** 2) / (2 * rho)
super(VonMises, self).__init__(batch_shape, event_shape, validate_args)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
log_prob = self.concentration * torch.cos(value - self.loc)
log_prob = log_prob - math.log(2 * math.pi) - _log_modified_bessel_fn(self.concentration, order=0)
return log_prob
@torch.no_grad()
def sample(self, sample_shape=torch.Size()):
"""
The sampling algorithm for the von Mises distribution is based on the following paper:
Best, D. J., and Nicholas I. Fisher.
"Efficient simulation of the von Mises distribution." Applied Statistics (1979): 152-157.
"""
shape = self._extended_shape(sample_shape)
x = torch.empty(shape, dtype=self.loc.dtype, device=self.loc.device)
return _rejection_sample(self.loc, self.concentration, self._proposal_r, x)
def expand(self, batch_shape):
try:
return super(VonMises, self).expand(batch_shape)
except NotImplementedError:
validate_args = self.__dict__.get('_validate_args')
loc = self.loc.expand(batch_shape)
concentration = self.concentration.expand(batch_shape)
return type(self)(loc, concentration, validate_args=validate_args)
@property
def mean(self):
"""
The provided mean is the circular one.
"""
return self.loc
@property
def mode(self):
return self.loc
@lazy_property
def variance(self):
"""
The provided variance is the circular one.
"""
return 1 - (_log_modified_bessel_fn(self.concentration, order=1) -
_log_modified_bessel_fn(self.concentration, order=0)).exp()
| pytorch-master | torch/distributions/von_mises.py |
import torch
import warnings
from torch.distributions import constraints
from torch.distributions.utils import lazy_property
from typing import Dict, Optional, Any
__all__ = ['Distribution']
class Distribution(object):
r"""
Distribution is the abstract base class for probability distributions.
"""
has_rsample = False
has_enumerate_support = False
_validate_args = __debug__
@staticmethod
def set_default_validate_args(value):
"""
Sets whether validation is enabled or disabled.
The default behavior mimics Python's ``assert`` statement: validation
is on by default, but is disabled if Python is run in optimized mode
(via ``python -O``). Validation may be expensive, so you may want to
disable it once a model is working.
Args:
value (bool): Whether to enable validation.
"""
if value not in [True, False]:
raise ValueError
Distribution._validate_args = value
def __init__(self, batch_shape=torch.Size(), event_shape=torch.Size(), validate_args=None):
self._batch_shape = batch_shape
self._event_shape = event_shape
if validate_args is not None:
self._validate_args = validate_args
if self._validate_args:
try:
arg_constraints = self.arg_constraints
except NotImplementedError:
arg_constraints = {}
warnings.warn(f'{self.__class__} does not define `arg_constraints`. ' +
'Please set `arg_constraints = {}` or initialize the distribution ' +
'with `validate_args=False` to turn off validation.')
for param, constraint in arg_constraints.items():
if constraints.is_dependent(constraint):
continue # skip constraints that cannot be checked
if param not in self.__dict__ and isinstance(getattr(type(self), param), lazy_property):
continue # skip checking lazily-constructed args
value = getattr(self, param)
valid = constraint.check(value)
if not valid.all():
raise ValueError(
f"Expected parameter {param} "
f"({type(value).__name__} of shape {tuple(value.shape)}) "
f"of distribution {repr(self)} "
f"to satisfy the constraint {repr(constraint)}, "
f"but found invalid values:\n{value}"
)
super(Distribution, self).__init__()
def expand(self, batch_shape, _instance=None):
"""
Returns a new distribution instance (or populates an existing instance
provided by a derived class) with batch dimensions expanded to
`batch_shape`. This method calls :class:`~torch.Tensor.expand` on
the distribution's parameters. As such, this does not allocate new
memory for the expanded distribution instance. Additionally,
this does not repeat any args checking or parameter broadcasting in
`__init__.py`, when an instance is first created.
Args:
batch_shape (torch.Size): the desired expanded size.
_instance: new instance provided by subclasses that
need to override `.expand`.
Returns:
New distribution instance with batch dimensions expanded to
`batch_size`.
"""
raise NotImplementedError
@property
def batch_shape(self):
"""
Returns the shape over which parameters are batched.
"""
return self._batch_shape
@property
def event_shape(self):
"""
Returns the shape of a single sample (without batching).
"""
return self._event_shape
@property
def arg_constraints(self) -> Dict[str, constraints.Constraint]:
"""
Returns a dictionary from argument names to
:class:`~torch.distributions.constraints.Constraint` objects that
should be satisfied by each argument of this distribution. Args that
are not tensors need not appear in this dict.
"""
raise NotImplementedError
@property
def support(self) -> Optional[Any]:
"""
Returns a :class:`~torch.distributions.constraints.Constraint` object
representing this distribution's support.
"""
raise NotImplementedError
@property
def mean(self):
"""
Returns the mean of the distribution.
"""
raise NotImplementedError
@property
def mode(self):
"""
Returns the mode of the distribution.
"""
raise NotImplementedError(f"{self.__class__} does not implement mode")
@property
def variance(self):
"""
Returns the variance of the distribution.
"""
raise NotImplementedError
@property
def stddev(self):
"""
Returns the standard deviation of the distribution.
"""
return self.variance.sqrt()
def sample(self, sample_shape=torch.Size()):
"""
Generates a sample_shape shaped sample or sample_shape shaped batch of
samples if the distribution parameters are batched.
"""
with torch.no_grad():
return self.rsample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
"""
Generates a sample_shape shaped reparameterized sample or sample_shape
shaped batch of reparameterized samples if the distribution parameters
are batched.
"""
raise NotImplementedError
def sample_n(self, n):
"""
Generates n samples or n batches of samples if the distribution
parameters are batched.
"""
warnings.warn('sample_n will be deprecated. Use .sample((n,)) instead', UserWarning)
return self.sample(torch.Size((n,)))
def log_prob(self, value):
"""
Returns the log of the probability density/mass function evaluated at
`value`.
Args:
value (Tensor):
"""
raise NotImplementedError
def cdf(self, value):
"""
Returns the cumulative density/mass function evaluated at
`value`.
Args:
value (Tensor):
"""
raise NotImplementedError
def icdf(self, value):
"""
Returns the inverse cumulative density/mass function evaluated at
`value`.
Args:
value (Tensor):
"""
raise NotImplementedError
def enumerate_support(self, expand=True):
"""
Returns tensor containing all values supported by a discrete
distribution. The result will enumerate over dimension 0, so the shape
of the result will be `(cardinality,) + batch_shape + event_shape`
(where `event_shape = ()` for univariate distributions).
Note that this enumerates over all batched tensors in lock-step
`[[0, 0], [1, 1], ...]`. With `expand=False`, enumeration happens
along dim 0, but with the remaining batch dimensions being
singleton dimensions, `[[0], [1], ..`.
To iterate over the full Cartesian product use
`itertools.product(m.enumerate_support())`.
Args:
expand (bool): whether to expand the support over the
batch dims to match the distribution's `batch_shape`.
Returns:
Tensor iterating over dimension 0.
"""
raise NotImplementedError
def entropy(self):
"""
Returns entropy of distribution, batched over batch_shape.
Returns:
Tensor of shape batch_shape.
"""
raise NotImplementedError
def perplexity(self):
"""
Returns perplexity of distribution, batched over batch_shape.
Returns:
Tensor of shape batch_shape.
"""
return torch.exp(self.entropy())
def _extended_shape(self, sample_shape=torch.Size()):
"""
Returns the size of the sample returned by the distribution, given
a `sample_shape`. Note, that the batch and event shapes of a distribution
instance are fixed at the time of construction. If this is empty, the
returned shape is upcast to (1,).
Args:
sample_shape (torch.Size): the size of the sample to be drawn.
"""
if not isinstance(sample_shape, torch.Size):
sample_shape = torch.Size(sample_shape)
return sample_shape + self._batch_shape + self._event_shape
def _validate_sample(self, value):
"""
Argument validation for distribution methods such as `log_prob`,
`cdf` and `icdf`. The rightmost dimensions of a value to be
scored via these methods must agree with the distribution's batch
and event shapes.
Args:
value (Tensor): the tensor whose log probability is to be
computed by the `log_prob` method.
Raises
ValueError: when the rightmost dimensions of `value` do not match the
distribution's batch and event shapes.
"""
if not isinstance(value, torch.Tensor):
raise ValueError('The value argument to log_prob must be a Tensor')
event_dim_start = len(value.size()) - len(self._event_shape)
if value.size()[event_dim_start:] != self._event_shape:
raise ValueError('The right-most size of value must match event_shape: {} vs {}.'.
format(value.size(), self._event_shape))
actual_shape = value.size()
expected_shape = self._batch_shape + self._event_shape
for i, j in zip(reversed(actual_shape), reversed(expected_shape)):
if i != 1 and j != 1 and i != j:
raise ValueError('Value is not broadcastable with batch_shape+event_shape: {} vs {}.'.
format(actual_shape, expected_shape))
try:
support = self.support
except NotImplementedError:
warnings.warn(f'{self.__class__} does not define `support` to enable ' +
'sample validation. Please initialize the distribution with ' +
'`validate_args=False` to turn off validation.')
return
assert support is not None
valid = support.check(value)
if not valid.all():
raise ValueError(
"Expected value argument "
f"({type(value).__name__} of shape {tuple(value.shape)}) "
f"to be within the support ({repr(support)}) "
f"of the distribution {repr(self)}, "
f"but found invalid values:\n{value}"
)
def _get_checked_instance(self, cls, _instance=None):
if _instance is None and type(self).__init__ != cls.__init__:
raise NotImplementedError("Subclass {} of {} that defines a custom __init__ method "
"must also define a custom .expand() method.".
format(self.__class__.__name__, cls.__name__))
return self.__new__(type(self)) if _instance is None else _instance
def __repr__(self):
param_names = [k for k, _ in self.arg_constraints.items() if k in self.__dict__]
args_string = ', '.join(['{}: {}'.format(p, self.__dict__[p]
if self.__dict__[p].numel() == 1
else self.__dict__[p].size()) for p in param_names])
return self.__class__.__name__ + '(' + args_string + ')'
| pytorch-master | torch/distributions/distribution.py |
from numbers import Number
import math
import torch
from torch.distributions import constraints
from torch.distributions.uniform import Uniform
from torch.distributions.transformed_distribution import TransformedDistribution
from torch.distributions.transforms import AffineTransform, ExpTransform
from torch.distributions.utils import broadcast_all, euler_constant
__all__ = ['Gumbel']
class Gumbel(TransformedDistribution):
r"""
Samples from a Gumbel Distribution.
Examples::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = Gumbel(torch.tensor([1.0]), torch.tensor([2.0]))
>>> m.sample() # sample from Gumbel distribution with loc=1, scale=2
tensor([ 1.0124])
Args:
loc (float or Tensor): Location parameter of the distribution
scale (float or Tensor): Scale parameter of the distribution
"""
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}
support = constraints.real
def __init__(self, loc, scale, validate_args=None):
self.loc, self.scale = broadcast_all(loc, scale)
finfo = torch.finfo(self.loc.dtype)
if isinstance(loc, Number) and isinstance(scale, Number):
base_dist = Uniform(finfo.tiny, 1 - finfo.eps)
else:
base_dist = Uniform(torch.full_like(self.loc, finfo.tiny),
torch.full_like(self.loc, 1 - finfo.eps))
transforms = [ExpTransform().inv, AffineTransform(loc=0, scale=-torch.ones_like(self.scale)),
ExpTransform().inv, AffineTransform(loc=loc, scale=-self.scale)]
super(Gumbel, self).__init__(base_dist, transforms, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Gumbel, _instance)
new.loc = self.loc.expand(batch_shape)
new.scale = self.scale.expand(batch_shape)
return super(Gumbel, self).expand(batch_shape, _instance=new)
# Explicitly defining the log probability function for Gumbel due to precision issues
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
y = (self.loc - value) / self.scale
return (y - y.exp()) - self.scale.log()
@property
def mean(self):
return self.loc + self.scale * euler_constant
@property
def mode(self):
return self.loc
@property
def stddev(self):
return (math.pi / math.sqrt(6)) * self.scale
@property
def variance(self):
return self.stddev.pow(2)
def entropy(self):
return self.scale.log() + (1 + euler_constant)
| pytorch-master | torch/distributions/gumbel.py |
r"""
PyTorch provides two global :class:`ConstraintRegistry` objects that link
:class:`~torch.distributions.constraints.Constraint` objects to
:class:`~torch.distributions.transforms.Transform` objects. These objects both
input constraints and return transforms, but they have different guarantees on
bijectivity.
1. ``biject_to(constraint)`` looks up a bijective
:class:`~torch.distributions.transforms.Transform` from ``constraints.real``
to the given ``constraint``. The returned transform is guaranteed to have
``.bijective = True`` and should implement ``.log_abs_det_jacobian()``.
2. ``transform_to(constraint)`` looks up a not-necessarily bijective
:class:`~torch.distributions.transforms.Transform` from ``constraints.real``
to the given ``constraint``. The returned transform is not guaranteed to
implement ``.log_abs_det_jacobian()``.
The ``transform_to()`` registry is useful for performing unconstrained
optimization on constrained parameters of probability distributions, which are
indicated by each distribution's ``.arg_constraints`` dict. These transforms often
overparameterize a space in order to avoid rotation; they are thus more
suitable for coordinate-wise optimization algorithms like Adam::
loc = torch.zeros(100, requires_grad=True)
unconstrained = torch.zeros(100, requires_grad=True)
scale = transform_to(Normal.arg_constraints['scale'])(unconstrained)
loss = -Normal(loc, scale).log_prob(data).sum()
The ``biject_to()`` registry is useful for Hamiltonian Monte Carlo, where
samples from a probability distribution with constrained ``.support`` are
propagated in an unconstrained space, and algorithms are typically rotation
invariant.::
dist = Exponential(rate)
unconstrained = torch.zeros(100, requires_grad=True)
sample = biject_to(dist.support)(unconstrained)
potential_energy = -dist.log_prob(sample).sum()
.. note::
An example where ``transform_to`` and ``biject_to`` differ is
``constraints.simplex``: ``transform_to(constraints.simplex)`` returns a
:class:`~torch.distributions.transforms.SoftmaxTransform` that simply
exponentiates and normalizes its inputs; this is a cheap and mostly
coordinate-wise operation appropriate for algorithms like SVI. In
contrast, ``biject_to(constraints.simplex)`` returns a
:class:`~torch.distributions.transforms.StickBreakingTransform` that
bijects its input down to a one-fewer-dimensional space; this a more
expensive less numerically stable transform but is needed for algorithms
like HMC.
The ``biject_to`` and ``transform_to`` objects can be extended by user-defined
constraints and transforms using their ``.register()`` method either as a
function on singleton constraints::
transform_to.register(my_constraint, my_transform)
or as a decorator on parameterized constraints::
@transform_to.register(MyConstraintClass)
def my_factory(constraint):
assert isinstance(constraint, MyConstraintClass)
return MyTransform(constraint.param1, constraint.param2)
You can create your own registry by creating a new :class:`ConstraintRegistry`
object.
"""
import numbers
from torch.distributions import constraints, transforms
__all__ = [
'ConstraintRegistry',
'biject_to',
'transform_to',
]
class ConstraintRegistry(object):
"""
Registry to link constraints to transforms.
"""
def __init__(self):
self._registry = {}
super(ConstraintRegistry, self).__init__()
def register(self, constraint, factory=None):
"""
Registers a :class:`~torch.distributions.constraints.Constraint`
subclass in this registry. Usage::
@my_registry.register(MyConstraintClass)
def construct_transform(constraint):
assert isinstance(constraint, MyConstraint)
return MyTransform(constraint.arg_constraints)
Args:
constraint (subclass of :class:`~torch.distributions.constraints.Constraint`):
A subclass of :class:`~torch.distributions.constraints.Constraint`, or
a singleton object of the desired class.
factory (Callable): A callable that inputs a constraint object and returns
a :class:`~torch.distributions.transforms.Transform` object.
"""
# Support use as decorator.
if factory is None:
return lambda factory: self.register(constraint, factory)
# Support calling on singleton instances.
if isinstance(constraint, constraints.Constraint):
constraint = type(constraint)
if not isinstance(constraint, type) or not issubclass(constraint, constraints.Constraint):
raise TypeError('Expected constraint to be either a Constraint subclass or instance, '
'but got {}'.format(constraint))
self._registry[constraint] = factory
return factory
def __call__(self, constraint):
"""
Looks up a transform to constrained space, given a constraint object.
Usage::
constraint = Normal.arg_constraints['scale']
scale = transform_to(constraint)(torch.zeros(1)) # constrained
u = transform_to(constraint).inv(scale) # unconstrained
Args:
constraint (:class:`~torch.distributions.constraints.Constraint`):
A constraint object.
Returns:
A :class:`~torch.distributions.transforms.Transform` object.
Raises:
`NotImplementedError` if no transform has been registered.
"""
# Look up by Constraint subclass.
try:
factory = self._registry[type(constraint)]
except KeyError:
raise NotImplementedError(
f'Cannot transform {type(constraint).__name__} constraints') from None
return factory(constraint)
biject_to = ConstraintRegistry()
transform_to = ConstraintRegistry()
################################################################################
# Registration Table
################################################################################
@biject_to.register(constraints.real)
@transform_to.register(constraints.real)
def _transform_to_real(constraint):
return transforms.identity_transform
@biject_to.register(constraints.independent)
def _biject_to_independent(constraint):
base_transform = biject_to(constraint.base_constraint)
return transforms.IndependentTransform(
base_transform, constraint.reinterpreted_batch_ndims)
@transform_to.register(constraints.independent)
def _transform_to_independent(constraint):
base_transform = transform_to(constraint.base_constraint)
return transforms.IndependentTransform(
base_transform, constraint.reinterpreted_batch_ndims)
@biject_to.register(constraints.positive)
@biject_to.register(constraints.nonnegative)
@transform_to.register(constraints.positive)
@transform_to.register(constraints.nonnegative)
def _transform_to_positive(constraint):
return transforms.ExpTransform()
@biject_to.register(constraints.greater_than)
@biject_to.register(constraints.greater_than_eq)
@transform_to.register(constraints.greater_than)
@transform_to.register(constraints.greater_than_eq)
def _transform_to_greater_than(constraint):
return transforms.ComposeTransform([transforms.ExpTransform(),
transforms.AffineTransform(constraint.lower_bound, 1)])
@biject_to.register(constraints.less_than)
@transform_to.register(constraints.less_than)
def _transform_to_less_than(constraint):
return transforms.ComposeTransform([transforms.ExpTransform(),
transforms.AffineTransform(constraint.upper_bound, -1)])
@biject_to.register(constraints.interval)
@biject_to.register(constraints.half_open_interval)
@transform_to.register(constraints.interval)
@transform_to.register(constraints.half_open_interval)
def _transform_to_interval(constraint):
# Handle the special case of the unit interval.
lower_is_0 = isinstance(constraint.lower_bound, numbers.Number) and constraint.lower_bound == 0
upper_is_1 = isinstance(constraint.upper_bound, numbers.Number) and constraint.upper_bound == 1
if lower_is_0 and upper_is_1:
return transforms.SigmoidTransform()
loc = constraint.lower_bound
scale = constraint.upper_bound - constraint.lower_bound
return transforms.ComposeTransform([transforms.SigmoidTransform(),
transforms.AffineTransform(loc, scale)])
@biject_to.register(constraints.simplex)
def _biject_to_simplex(constraint):
return transforms.StickBreakingTransform()
@transform_to.register(constraints.simplex)
def _transform_to_simplex(constraint):
return transforms.SoftmaxTransform()
# TODO define a bijection for LowerCholeskyTransform
@transform_to.register(constraints.lower_cholesky)
def _transform_to_lower_cholesky(constraint):
return transforms.LowerCholeskyTransform()
@biject_to.register(constraints.corr_cholesky)
@transform_to.register(constraints.corr_cholesky)
def _transform_to_corr_cholesky(constraint):
return transforms.CorrCholeskyTransform()
@biject_to.register(constraints.cat)
def _biject_to_cat(constraint):
return transforms.CatTransform([biject_to(c)
for c in constraint.cseq],
constraint.dim,
constraint.lengths)
@transform_to.register(constraints.cat)
def _transform_to_cat(constraint):
return transforms.CatTransform([transform_to(c)
for c in constraint.cseq],
constraint.dim,
constraint.lengths)
@biject_to.register(constraints.stack)
def _biject_to_stack(constraint):
return transforms.StackTransform(
[biject_to(c)
for c in constraint.cseq], constraint.dim)
@transform_to.register(constraints.stack)
def _transform_to_stack(constraint):
return transforms.StackTransform(
[transform_to(c)
for c in constraint.cseq], constraint.dim)
| pytorch-master | torch/distributions/constraint_registry.py |
from functools import update_wrapper
from numbers import Number
import torch
import torch.nn.functional as F
from typing import Dict, Any
from torch.overrides import is_tensor_like
euler_constant = 0.57721566490153286060 # Euler Mascheroni Constant
def broadcast_all(*values):
r"""
Given a list of values (possibly containing numbers), returns a list where each
value is broadcasted based on the following rules:
- `torch.*Tensor` instances are broadcasted as per :ref:`_broadcasting-semantics`.
- numbers.Number instances (scalars) are upcast to tensors having
the same size and type as the first tensor passed to `values`. If all the
values are scalars, then they are upcasted to scalar Tensors.
Args:
values (list of `numbers.Number`, `torch.*Tensor` or objects implementing __torch_function__)
Raises:
ValueError: if any of the values is not a `numbers.Number` instance,
a `torch.*Tensor` instance, or an instance implementing __torch_function__
"""
if not all(is_tensor_like(v) or isinstance(v, Number)
for v in values):
raise ValueError('Input arguments must all be instances of numbers.Number, '
'torch.Tensor or objects implementing __torch_function__.')
if not all(is_tensor_like(v) for v in values):
options: Dict[str, Any] = dict(dtype=torch.get_default_dtype())
for value in values:
if isinstance(value, torch.Tensor):
options = dict(dtype=value.dtype, device=value.device)
break
new_values = [v if is_tensor_like(v) else torch.tensor(v, **options)
for v in values]
return torch.broadcast_tensors(*new_values)
return torch.broadcast_tensors(*values)
def _standard_normal(shape, dtype, device):
if torch._C._get_tracing_state():
# [JIT WORKAROUND] lack of support for .normal_()
return torch.normal(torch.zeros(shape, dtype=dtype, device=device),
torch.ones(shape, dtype=dtype, device=device))
return torch.empty(shape, dtype=dtype, device=device).normal_()
def _sum_rightmost(value, dim):
r"""
Sum out ``dim`` many rightmost dimensions of a given tensor.
Args:
value (Tensor): A tensor of ``.dim()`` at least ``dim``.
dim (int): The number of rightmost dims to sum out.
"""
if dim == 0:
return value
required_shape = value.shape[:-dim] + (-1,)
return value.reshape(required_shape).sum(-1)
def logits_to_probs(logits, is_binary=False):
r"""
Converts a tensor of logits into probabilities. Note that for the
binary case, each value denotes log odds, whereas for the
multi-dimensional case, the values along the last dimension denote
the log probabilities (possibly unnormalized) of the events.
"""
if is_binary:
return torch.sigmoid(logits)
return F.softmax(logits, dim=-1)
def clamp_probs(probs):
eps = torch.finfo(probs.dtype).eps
return probs.clamp(min=eps, max=1 - eps)
def probs_to_logits(probs, is_binary=False):
r"""
Converts a tensor of probabilities into logits. For the binary case,
this denotes the probability of occurrence of the event indexed by `1`.
For the multi-dimensional case, the values along the last dimension
denote the probabilities of occurrence of each of the events.
"""
ps_clamped = clamp_probs(probs)
if is_binary:
return torch.log(ps_clamped) - torch.log1p(-ps_clamped)
return torch.log(ps_clamped)
class lazy_property:
r"""
Used as a decorator for lazy loading of class attributes. This uses a
non-data descriptor that calls the wrapped method to compute the property on
first call; thereafter replacing the wrapped method into an instance
attribute.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
update_wrapper(self, wrapped)
def __get__(self, instance, obj_type=None):
if instance is None:
return _lazy_property_and_property(self.wrapped)
with torch.enable_grad():
value = self.wrapped(instance)
setattr(instance, self.wrapped.__name__, value)
return value
class _lazy_property_and_property(lazy_property, property):
"""We want lazy properties to look like multiple things.
* property when Sphinx autodoc looks
* lazy_property when Distribution validate_args looks
"""
def __init__(self, wrapped):
return property.__init__(self, wrapped)
def tril_matrix_to_vec(mat, diag=0):
r"""
Convert a `D x D` matrix or a batch of matrices into a (batched) vector
which comprises of lower triangular elements from the matrix in row order.
"""
n = mat.shape[-1]
if not torch._C._get_tracing_state() and (diag < -n or diag >= n):
raise ValueError(f'diag ({diag}) provided is outside [{-n}, {n-1}].')
arange = torch.arange(n, device=mat.device)
tril_mask = arange < arange.view(-1, 1) + (diag + 1)
vec = mat[..., tril_mask]
return vec
def vec_to_tril_matrix(vec, diag=0):
r"""
Convert a vector or a batch of vectors into a batched `D x D`
lower triangular matrix containing elements from the vector in row order.
"""
# +ve root of D**2 + (1+2*diag)*D - |diag| * (diag+1) - 2*vec.shape[-1] = 0
n = (-(1 + 2 * diag) + ((1 + 2 * diag)**2 + 8 * vec.shape[-1] + 4 * abs(diag) * (diag + 1))**0.5) / 2
eps = torch.finfo(vec.dtype).eps
if not torch._C._get_tracing_state() and (round(n) - n > eps):
raise ValueError(f'The size of last dimension is {vec.shape[-1]} which cannot be expressed as ' +
'the lower triangular part of a square D x D matrix.')
n = torch.round(n).long() if isinstance(n, torch.Tensor) else round(n)
mat = vec.new_zeros(vec.shape[:-1] + torch.Size((n, n)))
arange = torch.arange(n, device=vec.device)
tril_mask = arange < arange.view(-1, 1) + (diag + 1)
mat[..., tril_mask] = vec
return mat
| pytorch-master | torch/distributions/utils.py |
import math
import warnings
from functools import total_ordering
from typing import Type, Dict, Callable, Tuple
import torch
from torch._six import inf
from .bernoulli import Bernoulli
from .beta import Beta
from .binomial import Binomial
from .categorical import Categorical
from .cauchy import Cauchy
from .continuous_bernoulli import ContinuousBernoulli
from .dirichlet import Dirichlet
from .distribution import Distribution
from .exponential import Exponential
from .exp_family import ExponentialFamily
from .gamma import Gamma
from .geometric import Geometric
from .gumbel import Gumbel
from .half_normal import HalfNormal
from .independent import Independent
from .laplace import Laplace
from .lowrank_multivariate_normal import (LowRankMultivariateNormal, _batch_lowrank_logdet,
_batch_lowrank_mahalanobis)
from .multivariate_normal import (MultivariateNormal, _batch_mahalanobis)
from .normal import Normal
from .one_hot_categorical import OneHotCategorical
from .pareto import Pareto
from .poisson import Poisson
from .transformed_distribution import TransformedDistribution
from .uniform import Uniform
from .utils import _sum_rightmost, euler_constant as _euler_gamma
_KL_REGISTRY = {} # Source of truth mapping a few general (type, type) pairs to functions.
_KL_MEMOIZE: Dict[Tuple[Type, Type], Callable] = {} # Memoized version mapping many specific (type, type) pairs to functions.
def register_kl(type_p, type_q):
"""
Decorator to register a pairwise function with :meth:`kl_divergence`.
Usage::
@register_kl(Normal, Normal)
def kl_normal_normal(p, q):
# insert implementation here
Lookup returns the most specific (type,type) match ordered by subclass. If
the match is ambiguous, a `RuntimeWarning` is raised. For example to
resolve the ambiguous situation::
@register_kl(BaseP, DerivedQ)
def kl_version1(p, q): ...
@register_kl(DerivedP, BaseQ)
def kl_version2(p, q): ...
you should register a third most-specific implementation, e.g.::
register_kl(DerivedP, DerivedQ)(kl_version1) # Break the tie.
Args:
type_p (type): A subclass of :class:`~torch.distributions.Distribution`.
type_q (type): A subclass of :class:`~torch.distributions.Distribution`.
"""
if not isinstance(type_p, type) and issubclass(type_p, Distribution):
raise TypeError('Expected type_p to be a Distribution subclass but got {}'.format(type_p))
if not isinstance(type_q, type) and issubclass(type_q, Distribution):
raise TypeError('Expected type_q to be a Distribution subclass but got {}'.format(type_q))
def decorator(fun):
_KL_REGISTRY[type_p, type_q] = fun
_KL_MEMOIZE.clear() # reset since lookup order may have changed
return fun
return decorator
@total_ordering
class _Match(object):
__slots__ = ['types']
def __init__(self, *types):
self.types = types
def __eq__(self, other):
return self.types == other.types
def __le__(self, other):
for x, y in zip(self.types, other.types):
if not issubclass(x, y):
return False
if x is not y:
break
return True
def _dispatch_kl(type_p, type_q):
"""
Find the most specific approximate match, assuming single inheritance.
"""
matches = [(super_p, super_q) for super_p, super_q in _KL_REGISTRY
if issubclass(type_p, super_p) and issubclass(type_q, super_q)]
if not matches:
return NotImplemented
# Check that the left- and right- lexicographic orders agree.
# mypy isn't smart enough to know that _Match implements __lt__
# see: https://github.com/python/typing/issues/760#issuecomment-710670503
left_p, left_q = min(_Match(*m) for m in matches).types # type: ignore[type-var]
right_q, right_p = min(_Match(*reversed(m)) for m in matches).types # type: ignore[type-var]
left_fun = _KL_REGISTRY[left_p, left_q]
right_fun = _KL_REGISTRY[right_p, right_q]
if left_fun is not right_fun:
warnings.warn('Ambiguous kl_divergence({}, {}). Please register_kl({}, {})'.format(
type_p.__name__, type_q.__name__, left_p.__name__, right_q.__name__),
RuntimeWarning)
return left_fun
def _infinite_like(tensor):
"""
Helper function for obtaining infinite KL Divergence throughout
"""
return torch.full_like(tensor, inf)
def _x_log_x(tensor):
"""
Utility function for calculating x log x
"""
return tensor * tensor.log()
def _batch_trace_XXT(bmat):
"""
Utility function for calculating the trace of XX^{T} with X having arbitrary trailing batch dimensions
"""
n = bmat.size(-1)
m = bmat.size(-2)
flat_trace = bmat.reshape(-1, m * n).pow(2).sum(-1)
return flat_trace.reshape(bmat.shape[:-2])
def kl_divergence(p: Distribution, q: Distribution) -> torch.Tensor:
r"""
Compute Kullback-Leibler divergence :math:`KL(p \| q)` between two distributions.
.. math::
KL(p \| q) = \int p(x) \log\frac {p(x)} {q(x)} \,dx
Args:
p (Distribution): A :class:`~torch.distributions.Distribution` object.
q (Distribution): A :class:`~torch.distributions.Distribution` object.
Returns:
Tensor: A batch of KL divergences of shape `batch_shape`.
Raises:
NotImplementedError: If the distribution types have not been registered via
:meth:`register_kl`.
"""
try:
fun = _KL_MEMOIZE[type(p), type(q)]
except KeyError:
fun = _dispatch_kl(type(p), type(q))
_KL_MEMOIZE[type(p), type(q)] = fun
if fun is NotImplemented:
raise NotImplementedError("No KL(p || q) is implemented for p type {} and q type {}"
.format(p.__class__.__name__, q.__class__.__name__))
return fun(p, q)
################################################################################
# KL Divergence Implementations
################################################################################
# Same distributions
@register_kl(Bernoulli, Bernoulli)
def _kl_bernoulli_bernoulli(p, q):
t1 = p.probs * (torch.nn.functional.softplus(-q.logits) - torch.nn.functional.softplus(-p.logits))
t1[q.probs == 0] = inf
t1[p.probs == 0] = 0
t2 = (1 - p.probs) * (torch.nn.functional.softplus(q.logits) - torch.nn.functional.softplus(p.logits))
t2[q.probs == 1] = inf
t2[p.probs == 1] = 0
return t1 + t2
@register_kl(Beta, Beta)
def _kl_beta_beta(p, q):
sum_params_p = p.concentration1 + p.concentration0
sum_params_q = q.concentration1 + q.concentration0
t1 = q.concentration1.lgamma() + q.concentration0.lgamma() + (sum_params_p).lgamma()
t2 = p.concentration1.lgamma() + p.concentration0.lgamma() + (sum_params_q).lgamma()
t3 = (p.concentration1 - q.concentration1) * torch.digamma(p.concentration1)
t4 = (p.concentration0 - q.concentration0) * torch.digamma(p.concentration0)
t5 = (sum_params_q - sum_params_p) * torch.digamma(sum_params_p)
return t1 - t2 + t3 + t4 + t5
@register_kl(Binomial, Binomial)
def _kl_binomial_binomial(p, q):
# from https://math.stackexchange.com/questions/2214993/
# kullback-leibler-divergence-for-binomial-distributions-p-and-q
if (p.total_count < q.total_count).any():
raise NotImplementedError('KL between Binomials where q.total_count > p.total_count is not implemented')
kl = p.total_count * (p.probs * (p.logits - q.logits) + (-p.probs).log1p() - (-q.probs).log1p())
inf_idxs = p.total_count > q.total_count
kl[inf_idxs] = _infinite_like(kl[inf_idxs])
return kl
@register_kl(Categorical, Categorical)
def _kl_categorical_categorical(p, q):
t = p.probs * (p.logits - q.logits)
t[(q.probs == 0).expand_as(t)] = inf
t[(p.probs == 0).expand_as(t)] = 0
return t.sum(-1)
@register_kl(ContinuousBernoulli, ContinuousBernoulli)
def _kl_continuous_bernoulli_continuous_bernoulli(p, q):
t1 = p.mean * (p.logits - q.logits)
t2 = p._cont_bern_log_norm() + torch.log1p(-p.probs)
t3 = - q._cont_bern_log_norm() - torch.log1p(-q.probs)
return t1 + t2 + t3
@register_kl(Dirichlet, Dirichlet)
def _kl_dirichlet_dirichlet(p, q):
# From http://bariskurt.com/kullback-leibler-divergence-between-two-dirichlet-and-beta-distributions/
sum_p_concentration = p.concentration.sum(-1)
sum_q_concentration = q.concentration.sum(-1)
t1 = sum_p_concentration.lgamma() - sum_q_concentration.lgamma()
t2 = (p.concentration.lgamma() - q.concentration.lgamma()).sum(-1)
t3 = p.concentration - q.concentration
t4 = p.concentration.digamma() - sum_p_concentration.digamma().unsqueeze(-1)
return t1 - t2 + (t3 * t4).sum(-1)
@register_kl(Exponential, Exponential)
def _kl_exponential_exponential(p, q):
rate_ratio = q.rate / p.rate
t1 = -rate_ratio.log()
return t1 + rate_ratio - 1
@register_kl(ExponentialFamily, ExponentialFamily)
def _kl_expfamily_expfamily(p, q):
if not type(p) == type(q):
raise NotImplementedError("The cross KL-divergence between different exponential families cannot \
be computed using Bregman divergences")
p_nparams = [np.detach().requires_grad_() for np in p._natural_params]
q_nparams = q._natural_params
lg_normal = p._log_normalizer(*p_nparams)
gradients = torch.autograd.grad(lg_normal.sum(), p_nparams, create_graph=True)
result = q._log_normalizer(*q_nparams) - lg_normal
for pnp, qnp, g in zip(p_nparams, q_nparams, gradients):
term = (qnp - pnp) * g
result -= _sum_rightmost(term, len(q.event_shape))
return result
@register_kl(Gamma, Gamma)
def _kl_gamma_gamma(p, q):
t1 = q.concentration * (p.rate / q.rate).log()
t2 = torch.lgamma(q.concentration) - torch.lgamma(p.concentration)
t3 = (p.concentration - q.concentration) * torch.digamma(p.concentration)
t4 = (q.rate - p.rate) * (p.concentration / p.rate)
return t1 + t2 + t3 + t4
@register_kl(Gumbel, Gumbel)
def _kl_gumbel_gumbel(p, q):
ct1 = p.scale / q.scale
ct2 = q.loc / q.scale
ct3 = p.loc / q.scale
t1 = -ct1.log() - ct2 + ct3
t2 = ct1 * _euler_gamma
t3 = torch.exp(ct2 + (1 + ct1).lgamma() - ct3)
return t1 + t2 + t3 - (1 + _euler_gamma)
@register_kl(Geometric, Geometric)
def _kl_geometric_geometric(p, q):
return -p.entropy() - torch.log1p(-q.probs) / p.probs - q.logits
@register_kl(HalfNormal, HalfNormal)
def _kl_halfnormal_halfnormal(p, q):
return _kl_normal_normal(p.base_dist, q.base_dist)
@register_kl(Laplace, Laplace)
def _kl_laplace_laplace(p, q):
# From http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf
scale_ratio = p.scale / q.scale
loc_abs_diff = (p.loc - q.loc).abs()
t1 = -scale_ratio.log()
t2 = loc_abs_diff / q.scale
t3 = scale_ratio * torch.exp(-loc_abs_diff / p.scale)
return t1 + t2 + t3 - 1
@register_kl(LowRankMultivariateNormal, LowRankMultivariateNormal)
def _kl_lowrankmultivariatenormal_lowrankmultivariatenormal(p, q):
if p.event_shape != q.event_shape:
raise ValueError("KL-divergence between two Low Rank Multivariate Normals with\
different event shapes cannot be computed")
term1 = (_batch_lowrank_logdet(q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag,
q._capacitance_tril) -
_batch_lowrank_logdet(p._unbroadcasted_cov_factor, p._unbroadcasted_cov_diag,
p._capacitance_tril))
term3 = _batch_lowrank_mahalanobis(q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag,
q.loc - p.loc,
q._capacitance_tril)
# Expands term2 according to
# inv(qcov) @ pcov = [inv(qD) - inv(qD) @ qW @ inv(qC) @ qW.T @ inv(qD)] @ (pW @ pW.T + pD)
# = [inv(qD) - A.T @ A] @ (pD + pW @ pW.T)
qWt_qDinv = (q._unbroadcasted_cov_factor.mT /
q._unbroadcasted_cov_diag.unsqueeze(-2))
A = torch.linalg.solve_triangular(q._capacitance_tril, qWt_qDinv, upper=False)
term21 = (p._unbroadcasted_cov_diag / q._unbroadcasted_cov_diag).sum(-1)
term22 = _batch_trace_XXT(p._unbroadcasted_cov_factor *
q._unbroadcasted_cov_diag.rsqrt().unsqueeze(-1))
term23 = _batch_trace_XXT(A * p._unbroadcasted_cov_diag.sqrt().unsqueeze(-2))
term24 = _batch_trace_XXT(A.matmul(p._unbroadcasted_cov_factor))
term2 = term21 + term22 - term23 - term24
return 0.5 * (term1 + term2 + term3 - p.event_shape[0])
@register_kl(MultivariateNormal, LowRankMultivariateNormal)
def _kl_multivariatenormal_lowrankmultivariatenormal(p, q):
if p.event_shape != q.event_shape:
raise ValueError("KL-divergence between two (Low Rank) Multivariate Normals with\
different event shapes cannot be computed")
term1 = (_batch_lowrank_logdet(q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag,
q._capacitance_tril) -
2 * p._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1))
term3 = _batch_lowrank_mahalanobis(q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag,
q.loc - p.loc,
q._capacitance_tril)
# Expands term2 according to
# inv(qcov) @ pcov = [inv(qD) - inv(qD) @ qW @ inv(qC) @ qW.T @ inv(qD)] @ p_tril @ p_tril.T
# = [inv(qD) - A.T @ A] @ p_tril @ p_tril.T
qWt_qDinv = (q._unbroadcasted_cov_factor.mT /
q._unbroadcasted_cov_diag.unsqueeze(-2))
A = torch.linalg.solve_triangular(q._capacitance_tril, qWt_qDinv, upper=False)
term21 = _batch_trace_XXT(p._unbroadcasted_scale_tril *
q._unbroadcasted_cov_diag.rsqrt().unsqueeze(-1))
term22 = _batch_trace_XXT(A.matmul(p._unbroadcasted_scale_tril))
term2 = term21 - term22
return 0.5 * (term1 + term2 + term3 - p.event_shape[0])
@register_kl(LowRankMultivariateNormal, MultivariateNormal)
def _kl_lowrankmultivariatenormal_multivariatenormal(p, q):
if p.event_shape != q.event_shape:
raise ValueError("KL-divergence between two (Low Rank) Multivariate Normals with\
different event shapes cannot be computed")
term1 = (2 * q._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) -
_batch_lowrank_logdet(p._unbroadcasted_cov_factor, p._unbroadcasted_cov_diag,
p._capacitance_tril))
term3 = _batch_mahalanobis(q._unbroadcasted_scale_tril, (q.loc - p.loc))
# Expands term2 according to
# inv(qcov) @ pcov = inv(q_tril @ q_tril.T) @ (pW @ pW.T + pD)
combined_batch_shape = torch._C._infer_size(q._unbroadcasted_scale_tril.shape[:-2],
p._unbroadcasted_cov_factor.shape[:-2])
n = p.event_shape[0]
q_scale_tril = q._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n))
p_cov_factor = p._unbroadcasted_cov_factor.expand(combined_batch_shape +
(n, p.cov_factor.size(-1)))
p_cov_diag = (torch.diag_embed(p._unbroadcasted_cov_diag.sqrt())
.expand(combined_batch_shape + (n, n)))
term21 = _batch_trace_XXT(torch.linalg.solve_triangular(q_scale_tril, p_cov_factor, upper=False))
term22 = _batch_trace_XXT(torch.linalg.solve_triangular(q_scale_tril, p_cov_diag, upper=False))
term2 = term21 + term22
return 0.5 * (term1 + term2 + term3 - p.event_shape[0])
@register_kl(MultivariateNormal, MultivariateNormal)
def _kl_multivariatenormal_multivariatenormal(p, q):
# From https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Kullback%E2%80%93Leibler_divergence
if p.event_shape != q.event_shape:
raise ValueError("KL-divergence between two Multivariate Normals with\
different event shapes cannot be computed")
half_term1 = (q._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) -
p._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1))
combined_batch_shape = torch._C._infer_size(q._unbroadcasted_scale_tril.shape[:-2],
p._unbroadcasted_scale_tril.shape[:-2])
n = p.event_shape[0]
q_scale_tril = q._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n))
p_scale_tril = p._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n))
term2 = _batch_trace_XXT(torch.linalg.solve_triangular(q_scale_tril, p_scale_tril, upper=False))
term3 = _batch_mahalanobis(q._unbroadcasted_scale_tril, (q.loc - p.loc))
return half_term1 + 0.5 * (term2 + term3 - n)
@register_kl(Normal, Normal)
def _kl_normal_normal(p, q):
var_ratio = (p.scale / q.scale).pow(2)
t1 = ((p.loc - q.loc) / q.scale).pow(2)
return 0.5 * (var_ratio + t1 - 1 - var_ratio.log())
@register_kl(OneHotCategorical, OneHotCategorical)
def _kl_onehotcategorical_onehotcategorical(p, q):
return _kl_categorical_categorical(p._categorical, q._categorical)
@register_kl(Pareto, Pareto)
def _kl_pareto_pareto(p, q):
# From http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf
scale_ratio = p.scale / q.scale
alpha_ratio = q.alpha / p.alpha
t1 = q.alpha * scale_ratio.log()
t2 = -alpha_ratio.log()
result = t1 + t2 + alpha_ratio - 1
result[p.support.lower_bound < q.support.lower_bound] = inf
return result
@register_kl(Poisson, Poisson)
def _kl_poisson_poisson(p, q):
return p.rate * (p.rate.log() - q.rate.log()) - (p.rate - q.rate)
@register_kl(TransformedDistribution, TransformedDistribution)
def _kl_transformed_transformed(p, q):
if p.transforms != q.transforms:
raise NotImplementedError
if p.event_shape != q.event_shape:
raise NotImplementedError
return kl_divergence(p.base_dist, q.base_dist)
@register_kl(Uniform, Uniform)
def _kl_uniform_uniform(p, q):
result = ((q.high - q.low) / (p.high - p.low)).log()
result[(q.low > p.low) | (q.high < p.high)] = inf
return result
# Different distributions
@register_kl(Bernoulli, Poisson)
def _kl_bernoulli_poisson(p, q):
return -p.entropy() - (p.probs * q.rate.log() - q.rate)
@register_kl(Beta, ContinuousBernoulli)
def _kl_beta_continuous_bernoulli(p, q):
return -p.entropy() - p.mean * q.logits - torch.log1p(-q.probs) - q._cont_bern_log_norm()
@register_kl(Beta, Pareto)
def _kl_beta_infinity(p, q):
return _infinite_like(p.concentration1)
@register_kl(Beta, Exponential)
def _kl_beta_exponential(p, q):
return -p.entropy() - q.rate.log() + q.rate * (p.concentration1 / (p.concentration1 + p.concentration0))
@register_kl(Beta, Gamma)
def _kl_beta_gamma(p, q):
t1 = -p.entropy()
t2 = q.concentration.lgamma() - q.concentration * q.rate.log()
t3 = (q.concentration - 1) * (p.concentration1.digamma() - (p.concentration1 + p.concentration0).digamma())
t4 = q.rate * p.concentration1 / (p.concentration1 + p.concentration0)
return t1 + t2 - t3 + t4
# TODO: Add Beta-Laplace KL Divergence
@register_kl(Beta, Normal)
def _kl_beta_normal(p, q):
E_beta = p.concentration1 / (p.concentration1 + p.concentration0)
var_normal = q.scale.pow(2)
t1 = -p.entropy()
t2 = 0.5 * (var_normal * 2 * math.pi).log()
t3 = (E_beta * (1 - E_beta) / (p.concentration1 + p.concentration0 + 1) + E_beta.pow(2)) * 0.5
t4 = q.loc * E_beta
t5 = q.loc.pow(2) * 0.5
return t1 + t2 + (t3 - t4 + t5) / var_normal
@register_kl(Beta, Uniform)
def _kl_beta_uniform(p, q):
result = -p.entropy() + (q.high - q.low).log()
result[(q.low > p.support.lower_bound) | (q.high < p.support.upper_bound)] = inf
return result
# Note that the KL between a ContinuousBernoulli and Beta has no closed form
@register_kl(ContinuousBernoulli, Pareto)
def _kl_continuous_bernoulli_infinity(p, q):
return _infinite_like(p.probs)
@register_kl(ContinuousBernoulli, Exponential)
def _kl_continuous_bernoulli_exponential(p, q):
return -p.entropy() - torch.log(q.rate) + q.rate * p.mean
# Note that the KL between a ContinuousBernoulli and Gamma has no closed form
# TODO: Add ContinuousBernoulli-Laplace KL Divergence
@register_kl(ContinuousBernoulli, Normal)
def _kl_continuous_bernoulli_normal(p, q):
t1 = -p.entropy()
t2 = 0.5 * (math.log(2. * math.pi) + torch.square(q.loc / q.scale)) + torch.log(q.scale)
t3 = (p.variance + torch.square(p.mean) - 2. * q.loc * p.mean) / (2.0 * torch.square(q.scale))
return t1 + t2 + t3
@register_kl(ContinuousBernoulli, Uniform)
def _kl_continuous_bernoulli_uniform(p, q):
result = -p.entropy() + (q.high - q.low).log()
return torch.where(torch.max(torch.ge(q.low, p.support.lower_bound),
torch.le(q.high, p.support.upper_bound)),
torch.ones_like(result) * inf, result)
@register_kl(Exponential, Beta)
@register_kl(Exponential, ContinuousBernoulli)
@register_kl(Exponential, Pareto)
@register_kl(Exponential, Uniform)
def _kl_exponential_infinity(p, q):
return _infinite_like(p.rate)
@register_kl(Exponential, Gamma)
def _kl_exponential_gamma(p, q):
ratio = q.rate / p.rate
t1 = -q.concentration * torch.log(ratio)
return t1 + ratio + q.concentration.lgamma() + q.concentration * _euler_gamma - (1 + _euler_gamma)
@register_kl(Exponential, Gumbel)
def _kl_exponential_gumbel(p, q):
scale_rate_prod = p.rate * q.scale
loc_scale_ratio = q.loc / q.scale
t1 = scale_rate_prod.log() - 1
t2 = torch.exp(loc_scale_ratio) * scale_rate_prod / (scale_rate_prod + 1)
t3 = scale_rate_prod.reciprocal()
return t1 - loc_scale_ratio + t2 + t3
# TODO: Add Exponential-Laplace KL Divergence
@register_kl(Exponential, Normal)
def _kl_exponential_normal(p, q):
var_normal = q.scale.pow(2)
rate_sqr = p.rate.pow(2)
t1 = 0.5 * torch.log(rate_sqr * var_normal * 2 * math.pi)
t2 = rate_sqr.reciprocal()
t3 = q.loc / p.rate
t4 = q.loc.pow(2) * 0.5
return t1 - 1 + (t2 - t3 + t4) / var_normal
@register_kl(Gamma, Beta)
@register_kl(Gamma, ContinuousBernoulli)
@register_kl(Gamma, Pareto)
@register_kl(Gamma, Uniform)
def _kl_gamma_infinity(p, q):
return _infinite_like(p.concentration)
@register_kl(Gamma, Exponential)
def _kl_gamma_exponential(p, q):
return -p.entropy() - q.rate.log() + q.rate * p.concentration / p.rate
@register_kl(Gamma, Gumbel)
def _kl_gamma_gumbel(p, q):
beta_scale_prod = p.rate * q.scale
loc_scale_ratio = q.loc / q.scale
t1 = (p.concentration - 1) * p.concentration.digamma() - p.concentration.lgamma() - p.concentration
t2 = beta_scale_prod.log() + p.concentration / beta_scale_prod
t3 = torch.exp(loc_scale_ratio) * (1 + beta_scale_prod.reciprocal()).pow(-p.concentration) - loc_scale_ratio
return t1 + t2 + t3
# TODO: Add Gamma-Laplace KL Divergence
@register_kl(Gamma, Normal)
def _kl_gamma_normal(p, q):
var_normal = q.scale.pow(2)
beta_sqr = p.rate.pow(2)
t1 = 0.5 * torch.log(beta_sqr * var_normal * 2 * math.pi) - p.concentration - p.concentration.lgamma()
t2 = 0.5 * (p.concentration.pow(2) + p.concentration) / beta_sqr
t3 = q.loc * p.concentration / p.rate
t4 = 0.5 * q.loc.pow(2)
return t1 + (p.concentration - 1) * p.concentration.digamma() + (t2 - t3 + t4) / var_normal
@register_kl(Gumbel, Beta)
@register_kl(Gumbel, ContinuousBernoulli)
@register_kl(Gumbel, Exponential)
@register_kl(Gumbel, Gamma)
@register_kl(Gumbel, Pareto)
@register_kl(Gumbel, Uniform)
def _kl_gumbel_infinity(p, q):
return _infinite_like(p.loc)
# TODO: Add Gumbel-Laplace KL Divergence
@register_kl(Gumbel, Normal)
def _kl_gumbel_normal(p, q):
param_ratio = p.scale / q.scale
t1 = (param_ratio / math.sqrt(2 * math.pi)).log()
t2 = (math.pi * param_ratio * 0.5).pow(2) / 3
t3 = ((p.loc + p.scale * _euler_gamma - q.loc) / q.scale).pow(2) * 0.5
return -t1 + t2 + t3 - (_euler_gamma + 1)
@register_kl(Laplace, Beta)
@register_kl(Laplace, ContinuousBernoulli)
@register_kl(Laplace, Exponential)
@register_kl(Laplace, Gamma)
@register_kl(Laplace, Pareto)
@register_kl(Laplace, Uniform)
def _kl_laplace_infinity(p, q):
return _infinite_like(p.loc)
@register_kl(Laplace, Normal)
def _kl_laplace_normal(p, q):
var_normal = q.scale.pow(2)
scale_sqr_var_ratio = p.scale.pow(2) / var_normal
t1 = 0.5 * torch.log(2 * scale_sqr_var_ratio / math.pi)
t2 = 0.5 * p.loc.pow(2)
t3 = p.loc * q.loc
t4 = 0.5 * q.loc.pow(2)
return -t1 + scale_sqr_var_ratio + (t2 - t3 + t4) / var_normal - 1
@register_kl(Normal, Beta)
@register_kl(Normal, ContinuousBernoulli)
@register_kl(Normal, Exponential)
@register_kl(Normal, Gamma)
@register_kl(Normal, Pareto)
@register_kl(Normal, Uniform)
def _kl_normal_infinity(p, q):
return _infinite_like(p.loc)
@register_kl(Normal, Gumbel)
def _kl_normal_gumbel(p, q):
mean_scale_ratio = p.loc / q.scale
var_scale_sqr_ratio = (p.scale / q.scale).pow(2)
loc_scale_ratio = q.loc / q.scale
t1 = var_scale_sqr_ratio.log() * 0.5
t2 = mean_scale_ratio - loc_scale_ratio
t3 = torch.exp(-mean_scale_ratio + 0.5 * var_scale_sqr_ratio + loc_scale_ratio)
return -t1 + t2 + t3 - (0.5 * (1 + math.log(2 * math.pi)))
@register_kl(Normal, Laplace)
def _kl_normal_laplace(p, q):
loc_diff = p.loc - q.loc
scale_ratio = p.scale / q.scale
loc_diff_scale_ratio = loc_diff / p.scale
t1 = torch.log(scale_ratio)
t2 = math.sqrt(2 / math.pi) * p.scale * torch.exp(-0.5 * loc_diff_scale_ratio.pow(2))
t3 = loc_diff * torch.erf(math.sqrt(0.5) * loc_diff_scale_ratio)
return -t1 + (t2 + t3) / q.scale - (0.5 * (1 + math.log(0.5 * math.pi)))
@register_kl(Pareto, Beta)
@register_kl(Pareto, ContinuousBernoulli)
@register_kl(Pareto, Uniform)
def _kl_pareto_infinity(p, q):
return _infinite_like(p.scale)
@register_kl(Pareto, Exponential)
def _kl_pareto_exponential(p, q):
scale_rate_prod = p.scale * q.rate
t1 = (p.alpha / scale_rate_prod).log()
t2 = p.alpha.reciprocal()
t3 = p.alpha * scale_rate_prod / (p.alpha - 1)
result = t1 - t2 + t3 - 1
result[p.alpha <= 1] = inf
return result
@register_kl(Pareto, Gamma)
def _kl_pareto_gamma(p, q):
common_term = p.scale.log() + p.alpha.reciprocal()
t1 = p.alpha.log() - common_term
t2 = q.concentration.lgamma() - q.concentration * q.rate.log()
t3 = (1 - q.concentration) * common_term
t4 = q.rate * p.alpha * p.scale / (p.alpha - 1)
result = t1 + t2 + t3 + t4 - 1
result[p.alpha <= 1] = inf
return result
# TODO: Add Pareto-Laplace KL Divergence
@register_kl(Pareto, Normal)
def _kl_pareto_normal(p, q):
var_normal = 2 * q.scale.pow(2)
common_term = p.scale / (p.alpha - 1)
t1 = (math.sqrt(2 * math.pi) * q.scale * p.alpha / p.scale).log()
t2 = p.alpha.reciprocal()
t3 = p.alpha * common_term.pow(2) / (p.alpha - 2)
t4 = (p.alpha * common_term - q.loc).pow(2)
result = t1 - t2 + (t3 + t4) / var_normal - 1
result[p.alpha <= 2] = inf
return result
@register_kl(Poisson, Bernoulli)
@register_kl(Poisson, Binomial)
def _kl_poisson_infinity(p, q):
return _infinite_like(p.rate)
@register_kl(Uniform, Beta)
def _kl_uniform_beta(p, q):
common_term = p.high - p.low
t1 = torch.log(common_term)
t2 = (q.concentration1 - 1) * (_x_log_x(p.high) - _x_log_x(p.low) - common_term) / common_term
t3 = (q.concentration0 - 1) * (_x_log_x((1 - p.high)) - _x_log_x((1 - p.low)) + common_term) / common_term
t4 = q.concentration1.lgamma() + q.concentration0.lgamma() - (q.concentration1 + q.concentration0).lgamma()
result = t3 + t4 - t1 - t2
result[(p.high > q.support.upper_bound) | (p.low < q.support.lower_bound)] = inf
return result
@register_kl(Uniform, ContinuousBernoulli)
def _kl_uniform_continuous_bernoulli(p, q):
result = -p.entropy() - p.mean * q.logits - torch.log1p(-q.probs) - q._cont_bern_log_norm()
return torch.where(torch.max(torch.ge(p.high, q.support.upper_bound),
torch.le(p.low, q.support.lower_bound)),
torch.ones_like(result) * inf, result)
@register_kl(Uniform, Exponential)
def _kl_uniform_exponetial(p, q):
result = q.rate * (p.high + p.low) / 2 - ((p.high - p.low) * q.rate).log()
result[p.low < q.support.lower_bound] = inf
return result
@register_kl(Uniform, Gamma)
def _kl_uniform_gamma(p, q):
common_term = p.high - p.low
t1 = common_term.log()
t2 = q.concentration.lgamma() - q.concentration * q.rate.log()
t3 = (1 - q.concentration) * (_x_log_x(p.high) - _x_log_x(p.low) - common_term) / common_term
t4 = q.rate * (p.high + p.low) / 2
result = -t1 + t2 + t3 + t4
result[p.low < q.support.lower_bound] = inf
return result
@register_kl(Uniform, Gumbel)
def _kl_uniform_gumbel(p, q):
common_term = q.scale / (p.high - p.low)
high_loc_diff = (p.high - q.loc) / q.scale
low_loc_diff = (p.low - q.loc) / q.scale
t1 = common_term.log() + 0.5 * (high_loc_diff + low_loc_diff)
t2 = common_term * (torch.exp(-high_loc_diff) - torch.exp(-low_loc_diff))
return t1 - t2
# TODO: Uniform-Laplace KL Divergence
@register_kl(Uniform, Normal)
def _kl_uniform_normal(p, q):
common_term = p.high - p.low
t1 = (math.sqrt(math.pi * 2) * q.scale / common_term).log()
t2 = (common_term).pow(2) / 12
t3 = ((p.high + p.low - 2 * q.loc) / 2).pow(2)
return t1 + 0.5 * (t2 + t3) / q.scale.pow(2)
@register_kl(Uniform, Pareto)
def _kl_uniform_pareto(p, q):
support_uniform = p.high - p.low
t1 = (q.alpha * q.scale.pow(q.alpha) * (support_uniform)).log()
t2 = (_x_log_x(p.high) - _x_log_x(p.low) - support_uniform) / support_uniform
result = t2 * (q.alpha + 1) - t1
result[p.low < q.support.lower_bound] = inf
return result
@register_kl(Independent, Independent)
def _kl_independent_independent(p, q):
if p.reinterpreted_batch_ndims != q.reinterpreted_batch_ndims:
raise NotImplementedError
result = kl_divergence(p.base_dist, q.base_dist)
return _sum_rightmost(result, p.reinterpreted_batch_ndims)
@register_kl(Cauchy, Cauchy)
def _kl_cauchy_cauchy(p, q):
# From https://arxiv.org/abs/1905.10965
t1 = ((p.scale + q.scale).pow(2) + (p.loc - q.loc).pow(2)).log()
t2 = (4 * p.scale * q.scale).log()
return t1 - t2
def _add_kl_info():
"""Appends a list of implemented KL functions to the doc for kl_divergence."""
rows = ["KL divergence is currently implemented for the following distribution pairs:"]
for p, q in sorted(_KL_REGISTRY,
key=lambda p_q: (p_q[0].__name__, p_q[1].__name__)):
rows.append("* :class:`~torch.distributions.{}` and :class:`~torch.distributions.{}`"
.format(p.__name__, q.__name__))
kl_info = '\n\t'.join(rows)
if kl_divergence.__doc__:
kl_divergence.__doc__ += kl_info # type: ignore[operator]
| pytorch-master | torch/distributions/kl.py |
import torch
from torch.distributions.distribution import Distribution
from torch.distributions import Categorical
from torch.distributions import constraints
from typing import Dict
__all__ = ['MixtureSameFamily']
class MixtureSameFamily(Distribution):
r"""
The `MixtureSameFamily` distribution implements a (batch of) mixture
distribution where all component are from different parameterizations of
the same distribution type. It is parameterized by a `Categorical`
"selecting distribution" (over `k` component) and a component
distribution, i.e., a `Distribution` with a rightmost batch shape
(equal to `[k]`) which indexes each (batch of) component.
Examples::
>>> # xdoctest: +SKIP("undefined vars")
>>> # Construct Gaussian Mixture Model in 1D consisting of 5 equally
>>> # weighted normal distributions
>>> mix = D.Categorical(torch.ones(5,))
>>> comp = D.Normal(torch.randn(5,), torch.rand(5,))
>>> gmm = MixtureSameFamily(mix, comp)
>>> # Construct Gaussian Mixture Modle in 2D consisting of 5 equally
>>> # weighted bivariate normal distributions
>>> mix = D.Categorical(torch.ones(5,))
>>> comp = D.Independent(D.Normal(
... torch.randn(5,2), torch.rand(5,2)), 1)
>>> gmm = MixtureSameFamily(mix, comp)
>>> # Construct a batch of 3 Gaussian Mixture Models in 2D each
>>> # consisting of 5 random weighted bivariate normal distributions
>>> mix = D.Categorical(torch.rand(3,5))
>>> comp = D.Independent(D.Normal(
... torch.randn(3,5,2), torch.rand(3,5,2)), 1)
>>> gmm = MixtureSameFamily(mix, comp)
Args:
mixture_distribution: `torch.distributions.Categorical`-like
instance. Manages the probability of selecting component.
The number of categories must match the rightmost batch
dimension of the `component_distribution`. Must have either
scalar `batch_shape` or `batch_shape` matching
`component_distribution.batch_shape[:-1]`
component_distribution: `torch.distributions.Distribution`-like
instance. Right-most batch dimension indexes component.
"""
arg_constraints: Dict[str, constraints.Constraint] = {}
has_rsample = False
def __init__(self,
mixture_distribution,
component_distribution,
validate_args=None):
self._mixture_distribution = mixture_distribution
self._component_distribution = component_distribution
if not isinstance(self._mixture_distribution, Categorical):
raise ValueError(" The Mixture distribution needs to be an "
" instance of torch.distribtutions.Categorical")
if not isinstance(self._component_distribution, Distribution):
raise ValueError("The Component distribution need to be an "
"instance of torch.distributions.Distribution")
# Check that batch size matches
mdbs = self._mixture_distribution.batch_shape
cdbs = self._component_distribution.batch_shape[:-1]
for size1, size2 in zip(reversed(mdbs), reversed(cdbs)):
if size1 != 1 and size2 != 1 and size1 != size2:
raise ValueError("`mixture_distribution.batch_shape` ({0}) is not "
"compatible with `component_distribution."
"batch_shape`({1})".format(mdbs, cdbs))
# Check that the number of mixture component matches
km = self._mixture_distribution.logits.shape[-1]
kc = self._component_distribution.batch_shape[-1]
if km is not None and kc is not None and km != kc:
raise ValueError("`mixture_distribution component` ({0}) does not"
" equal `component_distribution.batch_shape[-1]`"
" ({1})".format(km, kc))
self._num_component = km
event_shape = self._component_distribution.event_shape
self._event_ndims = len(event_shape)
super(MixtureSameFamily, self).__init__(batch_shape=cdbs,
event_shape=event_shape,
validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
batch_shape = torch.Size(batch_shape)
batch_shape_comp = batch_shape + (self._num_component,)
new = self._get_checked_instance(MixtureSameFamily, _instance)
new._component_distribution = \
self._component_distribution.expand(batch_shape_comp)
new._mixture_distribution = \
self._mixture_distribution.expand(batch_shape)
new._num_component = self._num_component
new._event_ndims = self._event_ndims
event_shape = new._component_distribution.event_shape
super(MixtureSameFamily, new).__init__(batch_shape=batch_shape,
event_shape=event_shape,
validate_args=False)
new._validate_args = self._validate_args
return new
@constraints.dependent_property
def support(self):
# FIXME this may have the wrong shape when support contains batched
# parameters
return self._component_distribution.support
@property
def mixture_distribution(self):
return self._mixture_distribution
@property
def component_distribution(self):
return self._component_distribution
@property
def mean(self):
probs = self._pad_mixture_dimensions(self.mixture_distribution.probs)
return torch.sum(probs * self.component_distribution.mean,
dim=-1 - self._event_ndims) # [B, E]
@property
def variance(self):
# Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X])
probs = self._pad_mixture_dimensions(self.mixture_distribution.probs)
mean_cond_var = torch.sum(probs * self.component_distribution.variance,
dim=-1 - self._event_ndims)
var_cond_mean = torch.sum(probs * (self.component_distribution.mean -
self._pad(self.mean)).pow(2.0),
dim=-1 - self._event_ndims)
return mean_cond_var + var_cond_mean
def cdf(self, x):
x = self._pad(x)
cdf_x = self.component_distribution.cdf(x)
mix_prob = self.mixture_distribution.probs
return torch.sum(cdf_x * mix_prob, dim=-1)
def log_prob(self, x):
if self._validate_args:
self._validate_sample(x)
x = self._pad(x)
log_prob_x = self.component_distribution.log_prob(x) # [S, B, k]
log_mix_prob = torch.log_softmax(self.mixture_distribution.logits,
dim=-1) # [B, k]
return torch.logsumexp(log_prob_x + log_mix_prob, dim=-1) # [S, B]
def sample(self, sample_shape=torch.Size()):
with torch.no_grad():
sample_len = len(sample_shape)
batch_len = len(self.batch_shape)
gather_dim = sample_len + batch_len
es = self.event_shape
# mixture samples [n, B]
mix_sample = self.mixture_distribution.sample(sample_shape)
mix_shape = mix_sample.shape
# component samples [n, B, k, E]
comp_samples = self.component_distribution.sample(sample_shape)
# Gather along the k dimension
mix_sample_r = mix_sample.reshape(
mix_shape + torch.Size([1] * (len(es) + 1)))
mix_sample_r = mix_sample_r.repeat(
torch.Size([1] * len(mix_shape)) + torch.Size([1]) + es)
samples = torch.gather(comp_samples, gather_dim, mix_sample_r)
return samples.squeeze(gather_dim)
def _pad(self, x):
return x.unsqueeze(-1 - self._event_ndims)
def _pad_mixture_dimensions(self, x):
dist_batch_ndims = self.batch_shape.numel()
cat_batch_ndims = self.mixture_distribution.batch_shape.numel()
pad_ndims = 0 if cat_batch_ndims == 1 else \
dist_batch_ndims - cat_batch_ndims
xs = x.shape
x = x.reshape(xs[:-1] + torch.Size(pad_ndims * [1]) +
xs[-1:] + torch.Size(self._event_ndims * [1]))
return x
def __repr__(self):
args_string = '\n {},\n {}'.format(self.mixture_distribution,
self.component_distribution)
return 'MixtureSameFamily' + '(' + args_string + ')'
| pytorch-master | torch/distributions/mixture_same_family.py |
from numbers import Number
import math
import torch
from torch.distributions import constraints
from torch.distributions.exp_family import ExponentialFamily
from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, clamp_probs
from torch.nn.functional import binary_cross_entropy_with_logits
__all__ = ['ContinuousBernoulli']
class ContinuousBernoulli(ExponentialFamily):
r"""
Creates a continuous Bernoulli distribution parameterized by :attr:`probs`
or :attr:`logits` (but not both).
The distribution is supported in [0, 1] and parameterized by 'probs' (in
(0,1)) or 'logits' (real-valued). Note that, unlike the Bernoulli, 'probs'
does not correspond to a probability and 'logits' does not correspond to
log-odds, but the same names are used due to the similarity with the
Bernoulli. See [1] for more details.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = ContinuousBernoulli(torch.tensor([0.3]))
>>> m.sample()
tensor([ 0.2538])
Args:
probs (Number, Tensor): (0,1) valued parameters
logits (Number, Tensor): real valued parameters whose sigmoid matches 'probs'
[1] The continuous Bernoulli: fixing a pervasive error in variational
autoencoders, Loaiza-Ganem G and Cunningham JP, NeurIPS 2019.
https://arxiv.org/abs/1907.06845
"""
arg_constraints = {'probs': constraints.unit_interval,
'logits': constraints.real}
support = constraints.unit_interval
_mean_carrier_measure = 0
has_rsample = True
def __init__(self, probs=None, logits=None, lims=(0.499, 0.501), validate_args=None):
if (probs is None) == (logits is None):
raise ValueError("Either `probs` or `logits` must be specified, but not both.")
if probs is not None:
is_scalar = isinstance(probs, Number)
self.probs, = broadcast_all(probs)
# validate 'probs' here if necessary as it is later clamped for numerical stability
# close to 0 and 1, later on; otherwise the clamped 'probs' would always pass
if validate_args is not None:
if not self.arg_constraints['probs'].check(getattr(self, 'probs')).all():
raise ValueError("The parameter {} has invalid values".format('probs'))
self.probs = clamp_probs(self.probs)
else:
is_scalar = isinstance(logits, Number)
self.logits, = broadcast_all(logits)
self._param = self.probs if probs is not None else self.logits
if is_scalar:
batch_shape = torch.Size()
else:
batch_shape = self._param.size()
self._lims = lims
super(ContinuousBernoulli, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(ContinuousBernoulli, _instance)
new._lims = self._lims
batch_shape = torch.Size(batch_shape)
if 'probs' in self.__dict__:
new.probs = self.probs.expand(batch_shape)
new._param = new.probs
if 'logits' in self.__dict__:
new.logits = self.logits.expand(batch_shape)
new._param = new.logits
super(ContinuousBernoulli, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def _new(self, *args, **kwargs):
return self._param.new(*args, **kwargs)
def _outside_unstable_region(self):
return torch.max(torch.le(self.probs, self._lims[0]),
torch.gt(self.probs, self._lims[1]))
def _cut_probs(self):
return torch.where(self._outside_unstable_region(),
self.probs,
self._lims[0] * torch.ones_like(self.probs))
def _cont_bern_log_norm(self):
'''computes the log normalizing constant as a function of the 'probs' parameter'''
cut_probs = self._cut_probs()
cut_probs_below_half = torch.where(torch.le(cut_probs, 0.5),
cut_probs,
torch.zeros_like(cut_probs))
cut_probs_above_half = torch.where(torch.ge(cut_probs, 0.5),
cut_probs,
torch.ones_like(cut_probs))
log_norm = torch.log(torch.abs(torch.log1p(-cut_probs) - torch.log(cut_probs))) - torch.where(
torch.le(cut_probs, 0.5),
torch.log1p(-2.0 * cut_probs_below_half),
torch.log(2.0 * cut_probs_above_half - 1.0))
x = torch.pow(self.probs - 0.5, 2)
taylor = math.log(2.0) + (4.0 / 3.0 + 104.0 / 45.0 * x) * x
return torch.where(self._outside_unstable_region(), log_norm, taylor)
@property
def mean(self):
cut_probs = self._cut_probs()
mus = cut_probs / (2.0 * cut_probs - 1.0) + 1.0 / (torch.log1p(-cut_probs) - torch.log(cut_probs))
x = self.probs - 0.5
taylor = 0.5 + (1.0 / 3.0 + 16.0 / 45.0 * torch.pow(x, 2)) * x
return torch.where(self._outside_unstable_region(), mus, taylor)
@property
def stddev(self):
return torch.sqrt(self.variance)
@property
def variance(self):
cut_probs = self._cut_probs()
vars = cut_probs * (cut_probs - 1.0) / torch.pow(1.0 - 2.0 * cut_probs, 2) + 1.0 / torch.pow(
torch.log1p(-cut_probs) - torch.log(cut_probs), 2)
x = torch.pow(self.probs - 0.5, 2)
taylor = 1.0 / 12.0 - (1.0 / 15.0 - 128. / 945.0 * x) * x
return torch.where(self._outside_unstable_region(), vars, taylor)
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return clamp_probs(logits_to_probs(self.logits, is_binary=True))
@property
def param_shape(self):
return self._param.size()
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device)
with torch.no_grad():
return self.icdf(u)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device)
return self.icdf(u)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
logits, value = broadcast_all(self.logits, value)
return -binary_cross_entropy_with_logits(logits, value, reduction='none') + self._cont_bern_log_norm()
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
cut_probs = self._cut_probs()
cdfs = (torch.pow(cut_probs, value) * torch.pow(1.0 - cut_probs, 1.0 - value)
+ cut_probs - 1.0) / (2.0 * cut_probs - 1.0)
unbounded_cdfs = torch.where(self._outside_unstable_region(), cdfs, value)
return torch.where(
torch.le(value, 0.0),
torch.zeros_like(value),
torch.where(torch.ge(value, 1.0), torch.ones_like(value), unbounded_cdfs))
def icdf(self, value):
cut_probs = self._cut_probs()
return torch.where(
self._outside_unstable_region(),
(torch.log1p(-cut_probs + value * (2.0 * cut_probs - 1.0))
- torch.log1p(-cut_probs)) / (torch.log(cut_probs) - torch.log1p(-cut_probs)),
value)
def entropy(self):
log_probs0 = torch.log1p(-self.probs)
log_probs1 = torch.log(self.probs)
return self.mean * (log_probs0 - log_probs1) - self._cont_bern_log_norm() - log_probs0
@property
def _natural_params(self):
return (self.logits, )
def _log_normalizer(self, x):
"""computes the log normalizing constant as a function of the natural parameter"""
out_unst_reg = torch.max(torch.le(x, self._lims[0] - 0.5),
torch.gt(x, self._lims[1] - 0.5))
cut_nat_params = torch.where(out_unst_reg,
x,
(self._lims[0] - 0.5) * torch.ones_like(x))
log_norm = torch.log(torch.abs(torch.exp(cut_nat_params) - 1.0)) - torch.log(torch.abs(cut_nat_params))
taylor = 0.5 * x + torch.pow(x, 2) / 24.0 - torch.pow(x, 4) / 2880.0
return torch.where(out_unst_reg, log_norm, taylor)
| pytorch-master | torch/distributions/continuous_bernoulli.py |
from numbers import Number
import torch
from torch._six import nan
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.gamma import Gamma
from torch.distributions.utils import broadcast_all
__all__ = ['FisherSnedecor']
class FisherSnedecor(Distribution):
r"""
Creates a Fisher-Snedecor distribution parameterized by :attr:`df1` and :attr:`df2`.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = FisherSnedecor(torch.tensor([1.0]), torch.tensor([2.0]))
>>> m.sample() # Fisher-Snedecor-distributed with df1=1 and df2=2
tensor([ 0.2453])
Args:
df1 (float or Tensor): degrees of freedom parameter 1
df2 (float or Tensor): degrees of freedom parameter 2
"""
arg_constraints = {'df1': constraints.positive, 'df2': constraints.positive}
support = constraints.positive
has_rsample = True
def __init__(self, df1, df2, validate_args=None):
self.df1, self.df2 = broadcast_all(df1, df2)
self._gamma1 = Gamma(self.df1 * 0.5, self.df1)
self._gamma2 = Gamma(self.df2 * 0.5, self.df2)
if isinstance(df1, Number) and isinstance(df2, Number):
batch_shape = torch.Size()
else:
batch_shape = self.df1.size()
super(FisherSnedecor, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(FisherSnedecor, _instance)
batch_shape = torch.Size(batch_shape)
new.df1 = self.df1.expand(batch_shape)
new.df2 = self.df2.expand(batch_shape)
new._gamma1 = self._gamma1.expand(batch_shape)
new._gamma2 = self._gamma2.expand(batch_shape)
super(FisherSnedecor, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
@property
def mean(self):
df2 = self.df2.clone(memory_format=torch.contiguous_format)
df2[df2 <= 2] = nan
return df2 / (df2 - 2)
@property
def mode(self):
mode = (self.df1 - 2) / self.df1 * self.df2 / (self.df2 + 2)
mode[self.df1 <= 2] = nan
return mode
@property
def variance(self):
df2 = self.df2.clone(memory_format=torch.contiguous_format)
df2[df2 <= 4] = nan
return 2 * df2.pow(2) * (self.df1 + df2 - 2) / (self.df1 * (df2 - 2).pow(2) * (df2 - 4))
def rsample(self, sample_shape=torch.Size(())):
shape = self._extended_shape(sample_shape)
# X1 ~ Gamma(df1 / 2, 1 / df1), X2 ~ Gamma(df2 / 2, 1 / df2)
# Y = df2 * df1 * X1 / (df1 * df2 * X2) = X1 / X2 ~ F(df1, df2)
X1 = self._gamma1.rsample(sample_shape).view(shape)
X2 = self._gamma2.rsample(sample_shape).view(shape)
tiny = torch.finfo(X2.dtype).tiny
X2.clamp_(min=tiny)
Y = X1 / X2
Y.clamp_(min=tiny)
return Y
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
ct1 = self.df1 * 0.5
ct2 = self.df2 * 0.5
ct3 = self.df1 / self.df2
t1 = (ct1 + ct2).lgamma() - ct1.lgamma() - ct2.lgamma()
t2 = ct1 * ct3.log() + (ct1 - 1) * torch.log(value)
t3 = (ct1 + ct2) * torch.log1p(ct3 * value)
return t1 + t2 - t3
| pytorch-master | torch/distributions/fishersnedecor.py |
r"""
The following constraints are implemented:
- ``constraints.boolean``
- ``constraints.cat``
- ``constraints.corr_cholesky``
- ``constraints.dependent``
- ``constraints.greater_than(lower_bound)``
- ``constraints.greater_than_eq(lower_bound)``
- ``constraints.independent(constraint, reinterpreted_batch_ndims)``
- ``constraints.integer_interval(lower_bound, upper_bound)``
- ``constraints.interval(lower_bound, upper_bound)``
- ``constraints.less_than(upper_bound)``
- ``constraints.lower_cholesky``
- ``constraints.lower_triangular``
- ``constraints.multinomial``
- ``constraints.nonnegative_integer``
- ``constraints.one_hot``
- ``constraints.positive_integer``
- ``constraints.positive``
- ``constraints.positive_semidefinite``
- ``constraints.positive_definite``
- ``constraints.real_vector``
- ``constraints.real``
- ``constraints.simplex``
- ``constraints.symmetric``
- ``constraints.stack``
- ``constraints.square``
- ``constraints.symmetric``
- ``constraints.unit_interval``
"""
import torch
__all__ = [
'Constraint',
'boolean',
'cat',
'corr_cholesky',
'dependent',
'dependent_property',
'greater_than',
'greater_than_eq',
'independent',
'integer_interval',
'interval',
'half_open_interval',
'is_dependent',
'less_than',
'lower_cholesky',
'lower_triangular',
'multinomial',
'nonnegative_integer',
'positive',
'positive_semidefinite',
'positive_definite',
'positive_integer',
'real',
'real_vector',
'simplex',
'square',
'stack',
'symmetric',
'unit_interval',
]
class Constraint(object):
"""
Abstract base class for constraints.
A constraint object represents a region over which a variable is valid,
e.g. within which a variable can be optimized.
Attributes:
is_discrete (bool): Whether constrained space is discrete.
Defaults to False.
event_dim (int): Number of rightmost dimensions that together define
an event. The :meth:`check` method will remove this many dimensions
when computing validity.
"""
is_discrete = False # Default to continuous.
event_dim = 0 # Default to univariate.
def check(self, value):
"""
Returns a byte tensor of ``sample_shape + batch_shape`` indicating
whether each event in value satisfies this constraint.
"""
raise NotImplementedError
def __repr__(self):
return self.__class__.__name__[1:] + '()'
class _Dependent(Constraint):
"""
Placeholder for variables whose support depends on other variables.
These variables obey no simple coordinate-wise constraints.
Args:
is_discrete (bool): Optional value of ``.is_discrete`` in case this
can be computed statically. If not provided, access to the
``.is_discrete`` attribute will raise a NotImplementedError.
event_dim (int): Optional value of ``.event_dim`` in case this
can be computed statically. If not provided, access to the
``.event_dim`` attribute will raise a NotImplementedError.
"""
def __init__(self, *, is_discrete=NotImplemented, event_dim=NotImplemented):
self._is_discrete = is_discrete
self._event_dim = event_dim
super().__init__()
@property
def is_discrete(self):
if self._is_discrete is NotImplemented:
raise NotImplementedError(".is_discrete cannot be determined statically")
return self._is_discrete
@property
def event_dim(self):
if self._event_dim is NotImplemented:
raise NotImplementedError(".event_dim cannot be determined statically")
return self._event_dim
def __call__(self, *, is_discrete=NotImplemented, event_dim=NotImplemented):
"""
Support for syntax to customize static attributes::
constraints.dependent(is_discrete=True, event_dim=1)
"""
if is_discrete is NotImplemented:
is_discrete = self._is_discrete
if event_dim is NotImplemented:
event_dim = self._event_dim
return _Dependent(is_discrete=is_discrete, event_dim=event_dim)
def check(self, x):
raise ValueError('Cannot determine validity of dependent constraint')
def is_dependent(constraint):
return isinstance(constraint, _Dependent)
class _DependentProperty(property, _Dependent):
"""
Decorator that extends @property to act like a `Dependent` constraint when
called on a class and act like a property when called on an object.
Example::
class Uniform(Distribution):
def __init__(self, low, high):
self.low = low
self.high = high
@constraints.dependent_property(is_discrete=False, event_dim=0)
def support(self):
return constraints.interval(self.low, self.high)
Args:
fn (Callable): The function to be decorated.
is_discrete (bool): Optional value of ``.is_discrete`` in case this
can be computed statically. If not provided, access to the
``.is_discrete`` attribute will raise a NotImplementedError.
event_dim (int): Optional value of ``.event_dim`` in case this
can be computed statically. If not provided, access to the
``.event_dim`` attribute will raise a NotImplementedError.
"""
def __init__(self, fn=None, *, is_discrete=NotImplemented, event_dim=NotImplemented):
super().__init__(fn)
self._is_discrete = is_discrete
self._event_dim = event_dim
def __call__(self, fn):
"""
Support for syntax to customize static attributes::
@constraints.dependent_property(is_discrete=True, event_dim=1)
def support(self):
...
"""
return _DependentProperty(fn, is_discrete=self._is_discrete, event_dim=self._event_dim)
class _IndependentConstraint(Constraint):
"""
Wraps a constraint by aggregating over ``reinterpreted_batch_ndims``-many
dims in :meth:`check`, so that an event is valid only if all its
independent entries are valid.
"""
def __init__(self, base_constraint, reinterpreted_batch_ndims):
assert isinstance(base_constraint, Constraint)
assert isinstance(reinterpreted_batch_ndims, int)
assert reinterpreted_batch_ndims >= 0
self.base_constraint = base_constraint
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
super().__init__()
@property
def is_discrete(self):
return self.base_constraint.is_discrete
@property
def event_dim(self):
return self.base_constraint.event_dim + self.reinterpreted_batch_ndims
def check(self, value):
result = self.base_constraint.check(value)
if result.dim() < self.reinterpreted_batch_ndims:
expected = self.base_constraint.event_dim + self.reinterpreted_batch_ndims
raise ValueError(f"Expected value.dim() >= {expected} but got {value.dim()}")
result = result.reshape(result.shape[:result.dim() - self.reinterpreted_batch_ndims] + (-1,))
result = result.all(-1)
return result
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__[1:], repr(self.base_constraint),
self.reinterpreted_batch_ndims)
class _Boolean(Constraint):
"""
Constrain to the two values `{0, 1}`.
"""
is_discrete = True
def check(self, value):
return (value == 0) | (value == 1)
class _OneHot(Constraint):
"""
Constrain to one-hot vectors.
"""
is_discrete = True
event_dim = 1
def check(self, value):
is_boolean = (value == 0) | (value == 1)
is_normalized = value.sum(-1).eq(1)
return is_boolean.all(-1) & is_normalized
class _IntegerInterval(Constraint):
"""
Constrain to an integer interval `[lower_bound, upper_bound]`.
"""
is_discrete = True
def __init__(self, lower_bound, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
super().__init__()
def check(self, value):
return (value % 1 == 0) & (self.lower_bound <= value) & (value <= self.upper_bound)
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += '(lower_bound={}, upper_bound={})'.format(self.lower_bound, self.upper_bound)
return fmt_string
class _IntegerLessThan(Constraint):
"""
Constrain to an integer interval `(-inf, upper_bound]`.
"""
is_discrete = True
def __init__(self, upper_bound):
self.upper_bound = upper_bound
super().__init__()
def check(self, value):
return (value % 1 == 0) & (value <= self.upper_bound)
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += '(upper_bound={})'.format(self.upper_bound)
return fmt_string
class _IntegerGreaterThan(Constraint):
"""
Constrain to an integer interval `[lower_bound, inf)`.
"""
is_discrete = True
def __init__(self, lower_bound):
self.lower_bound = lower_bound
super().__init__()
def check(self, value):
return (value % 1 == 0) & (value >= self.lower_bound)
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += '(lower_bound={})'.format(self.lower_bound)
return fmt_string
class _Real(Constraint):
"""
Trivially constrain to the extended real line `[-inf, inf]`.
"""
def check(self, value):
return value == value # False for NANs.
class _GreaterThan(Constraint):
"""
Constrain to a real half line `(lower_bound, inf]`.
"""
def __init__(self, lower_bound):
self.lower_bound = lower_bound
super().__init__()
def check(self, value):
return self.lower_bound < value
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += '(lower_bound={})'.format(self.lower_bound)
return fmt_string
class _GreaterThanEq(Constraint):
"""
Constrain to a real half line `[lower_bound, inf)`.
"""
def __init__(self, lower_bound):
self.lower_bound = lower_bound
super().__init__()
def check(self, value):
return self.lower_bound <= value
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += '(lower_bound={})'.format(self.lower_bound)
return fmt_string
class _LessThan(Constraint):
"""
Constrain to a real half line `[-inf, upper_bound)`.
"""
def __init__(self, upper_bound):
self.upper_bound = upper_bound
super().__init__()
def check(self, value):
return value < self.upper_bound
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += '(upper_bound={})'.format(self.upper_bound)
return fmt_string
class _Interval(Constraint):
"""
Constrain to a real interval `[lower_bound, upper_bound]`.
"""
def __init__(self, lower_bound, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
super().__init__()
def check(self, value):
return (self.lower_bound <= value) & (value <= self.upper_bound)
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += '(lower_bound={}, upper_bound={})'.format(self.lower_bound, self.upper_bound)
return fmt_string
class _HalfOpenInterval(Constraint):
"""
Constrain to a real interval `[lower_bound, upper_bound)`.
"""
def __init__(self, lower_bound, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
super().__init__()
def check(self, value):
return (self.lower_bound <= value) & (value < self.upper_bound)
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += '(lower_bound={}, upper_bound={})'.format(self.lower_bound, self.upper_bound)
return fmt_string
class _Simplex(Constraint):
"""
Constrain to the unit simplex in the innermost (rightmost) dimension.
Specifically: `x >= 0` and `x.sum(-1) == 1`.
"""
event_dim = 1
def check(self, value):
return torch.all(value >= 0, dim=-1) & ((value.sum(-1) - 1).abs() < 1e-6)
class _Multinomial(Constraint):
"""
Constrain to nonnegative integer values summing to at most an upper bound.
Note due to limitations of the Multinomial distribution, this currently
checks the weaker condition ``value.sum(-1) <= upper_bound``. In the future
this may be strengthened to ``value.sum(-1) == upper_bound``.
"""
is_discrete = True
event_dim = 1
def __init__(self, upper_bound):
self.upper_bound = upper_bound
def check(self, x):
return (x >= 0).all(dim=-1) & (x.sum(dim=-1) <= self.upper_bound)
class _LowerTriangular(Constraint):
"""
Constrain to lower-triangular square matrices.
"""
event_dim = 2
def check(self, value):
value_tril = value.tril()
return (value_tril == value).view(value.shape[:-2] + (-1,)).min(-1)[0]
class _LowerCholesky(Constraint):
"""
Constrain to lower-triangular square matrices with positive diagonals.
"""
event_dim = 2
def check(self, value):
value_tril = value.tril()
lower_triangular = (value_tril == value).view(value.shape[:-2] + (-1,)).min(-1)[0]
positive_diagonal = (value.diagonal(dim1=-2, dim2=-1) > 0).min(-1)[0]
return lower_triangular & positive_diagonal
class _CorrCholesky(Constraint):
"""
Constrain to lower-triangular square matrices with positive diagonals and each
row vector being of unit length.
"""
event_dim = 2
def check(self, value):
tol = torch.finfo(value.dtype).eps * value.size(-1) * 10 # 10 is an adjustable fudge factor
row_norm = torch.linalg.norm(value.detach(), dim=-1)
unit_row_norm = (row_norm - 1.).abs().le(tol).all(dim=-1)
return _LowerCholesky().check(value) & unit_row_norm
class _Square(Constraint):
"""
Constrain to square matrices.
"""
event_dim = 2
def check(self, value):
return torch.full(
size=value.shape[:-2],
fill_value=(value.shape[-2] == value.shape[-1]),
dtype=torch.bool,
device=value.device
)
class _Symmetric(_Square):
"""
Constrain to Symmetric square matrices.
"""
def check(self, value):
square_check = super().check(value)
if not square_check.all():
return square_check
return torch.isclose(value, value.mT, atol=1e-6).all(-2).all(-1)
class _PositiveSemidefinite(_Symmetric):
"""
Constrain to positive-semidefinite matrices.
"""
def check(self, value):
sym_check = super().check(value)
if not sym_check.all():
return sym_check
return torch.linalg.eigvalsh(value).ge(0).all(-1)
class _PositiveDefinite(_Symmetric):
"""
Constrain to positive-definite matrices.
"""
def check(self, value):
sym_check = super().check(value)
if not sym_check.all():
return sym_check
return torch.linalg.cholesky_ex(value).info.eq(0)
class _Cat(Constraint):
"""
Constraint functor that applies a sequence of constraints
`cseq` at the submatrices at dimension `dim`,
each of size `lengths[dim]`, in a way compatible with :func:`torch.cat`.
"""
def __init__(self, cseq, dim=0, lengths=None):
assert all(isinstance(c, Constraint) for c in cseq)
self.cseq = list(cseq)
if lengths is None:
lengths = [1] * len(self.cseq)
self.lengths = list(lengths)
assert len(self.lengths) == len(self.cseq)
self.dim = dim
super().__init__()
@property
def is_discrete(self):
return any(c.is_discrete for c in self.cseq)
@property
def event_dim(self):
return max(c.event_dim for c in self.cseq)
def check(self, value):
assert -value.dim() <= self.dim < value.dim()
checks = []
start = 0
for constr, length in zip(self.cseq, self.lengths):
v = value.narrow(self.dim, start, length)
checks.append(constr.check(v))
start = start + length # avoid += for jit compat
return torch.cat(checks, self.dim)
class _Stack(Constraint):
"""
Constraint functor that applies a sequence of constraints
`cseq` at the submatrices at dimension `dim`,
in a way compatible with :func:`torch.stack`.
"""
def __init__(self, cseq, dim=0):
assert all(isinstance(c, Constraint) for c in cseq)
self.cseq = list(cseq)
self.dim = dim
super().__init__()
@property
def is_discrete(self):
return any(c.is_discrete for c in self.cseq)
@property
def event_dim(self):
dim = max(c.event_dim for c in self.cseq)
if self.dim + dim < 0:
dim += 1
return dim
def check(self, value):
assert -value.dim() <= self.dim < value.dim()
vs = [value.select(self.dim, i) for i in range(value.size(self.dim))]
return torch.stack([constr.check(v)
for v, constr in zip(vs, self.cseq)], self.dim)
# Public interface.
dependent = _Dependent()
dependent_property = _DependentProperty
independent = _IndependentConstraint
boolean = _Boolean()
one_hot = _OneHot()
nonnegative_integer = _IntegerGreaterThan(0)
positive_integer = _IntegerGreaterThan(1)
integer_interval = _IntegerInterval
real = _Real()
real_vector = independent(real, 1)
positive = _GreaterThan(0.)
nonnegative = _GreaterThanEq(0.)
greater_than = _GreaterThan
greater_than_eq = _GreaterThanEq
less_than = _LessThan
multinomial = _Multinomial
unit_interval = _Interval(0., 1.)
interval = _Interval
half_open_interval = _HalfOpenInterval
simplex = _Simplex()
lower_triangular = _LowerTriangular()
lower_cholesky = _LowerCholesky()
corr_cholesky = _CorrCholesky()
square = _Square()
symmetric = _Symmetric()
positive_semidefinite = _PositiveSemidefinite()
positive_definite = _PositiveDefinite()
cat = _Cat
stack = _Stack
| pytorch-master | torch/distributions/constraints.py |
from numbers import Number
import torch
from torch._six import nan
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all
__all__ = ['Uniform']
class Uniform(Distribution):
r"""
Generates uniformly distributed random samples from the half-open interval
``[low, high)``.
Example::
>>> m = Uniform(torch.tensor([0.0]), torch.tensor([5.0]))
>>> m.sample() # uniformly distributed in the range [0.0, 5.0)
>>> # xdoctest: +SKIP
tensor([ 2.3418])
Args:
low (float or Tensor): lower range (inclusive).
high (float or Tensor): upper range (exclusive).
"""
# TODO allow (loc,scale) parameterization to allow independent constraints.
arg_constraints = {'low': constraints.dependent(is_discrete=False, event_dim=0),
'high': constraints.dependent(is_discrete=False, event_dim=0)}
has_rsample = True
@property
def mean(self):
return (self.high + self.low) / 2
@property
def mode(self):
return nan * self.high
@property
def stddev(self):
return (self.high - self.low) / 12**0.5
@property
def variance(self):
return (self.high - self.low).pow(2) / 12
def __init__(self, low, high, validate_args=None):
self.low, self.high = broadcast_all(low, high)
if isinstance(low, Number) and isinstance(high, Number):
batch_shape = torch.Size()
else:
batch_shape = self.low.size()
super(Uniform, self).__init__(batch_shape, validate_args=validate_args)
if self._validate_args and not torch.lt(self.low, self.high).all():
raise ValueError("Uniform is not defined when low>= high")
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Uniform, _instance)
batch_shape = torch.Size(batch_shape)
new.low = self.low.expand(batch_shape)
new.high = self.high.expand(batch_shape)
super(Uniform, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
@constraints.dependent_property(is_discrete=False, event_dim=0)
def support(self):
return constraints.interval(self.low, self.high)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
rand = torch.rand(shape, dtype=self.low.dtype, device=self.low.device)
return self.low + rand * (self.high - self.low)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
lb = self.low.le(value).type_as(self.low)
ub = self.high.gt(value).type_as(self.low)
return torch.log(lb.mul(ub)) - torch.log(self.high - self.low)
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
result = (value - self.low) / (self.high - self.low)
return result.clamp(min=0, max=1)
def icdf(self, value):
result = value * (self.high - self.low) + self.low
return result
def entropy(self):
return torch.log(self.high - self.low)
| pytorch-master | torch/distributions/uniform.py |
from numbers import Number
import torch
from torch._six import nan
from torch.distributions import constraints
from torch.distributions.exp_family import ExponentialFamily
from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property
from torch.nn.functional import binary_cross_entropy_with_logits
__all__ = ['Bernoulli']
class Bernoulli(ExponentialFamily):
r"""
Creates a Bernoulli distribution parameterized by :attr:`probs`
or :attr:`logits` (but not both).
Samples are binary (0 or 1). They take the value `1` with probability `p`
and `0` with probability `1 - p`.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = Bernoulli(torch.tensor([0.3]))
>>> m.sample() # 30% chance 1; 70% chance 0
tensor([ 0.])
Args:
probs (Number, Tensor): the probability of sampling `1`
logits (Number, Tensor): the log-odds of sampling `1`
"""
arg_constraints = {'probs': constraints.unit_interval,
'logits': constraints.real}
support = constraints.boolean
has_enumerate_support = True
_mean_carrier_measure = 0
def __init__(self, probs=None, logits=None, validate_args=None):
if (probs is None) == (logits is None):
raise ValueError("Either `probs` or `logits` must be specified, but not both.")
if probs is not None:
is_scalar = isinstance(probs, Number)
self.probs, = broadcast_all(probs)
else:
is_scalar = isinstance(logits, Number)
self.logits, = broadcast_all(logits)
self._param = self.probs if probs is not None else self.logits
if is_scalar:
batch_shape = torch.Size()
else:
batch_shape = self._param.size()
super(Bernoulli, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Bernoulli, _instance)
batch_shape = torch.Size(batch_shape)
if 'probs' in self.__dict__:
new.probs = self.probs.expand(batch_shape)
new._param = new.probs
if 'logits' in self.__dict__:
new.logits = self.logits.expand(batch_shape)
new._param = new.logits
super(Bernoulli, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def _new(self, *args, **kwargs):
return self._param.new(*args, **kwargs)
@property
def mean(self):
return self.probs
@property
def mode(self):
mode = (self.probs >= 0.5).to(self.probs)
mode[self.probs == 0.5] = nan
return mode
@property
def variance(self):
return self.probs * (1 - self.probs)
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
@property
def param_shape(self):
return self._param.size()
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return torch.bernoulli(self.probs.expand(shape))
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
logits, value = broadcast_all(self.logits, value)
return -binary_cross_entropy_with_logits(logits, value, reduction='none')
def entropy(self):
return binary_cross_entropy_with_logits(self.logits, self.probs, reduction='none')
def enumerate_support(self, expand=True):
values = torch.arange(2, dtype=self._param.dtype, device=self._param.device)
values = values.view((-1,) + (1,) * len(self._batch_shape))
if expand:
values = values.expand((-1,) + self._batch_shape)
return values
@property
def _natural_params(self):
return (torch.log(self.probs / (1 - self.probs)), )
def _log_normalizer(self, x):
return torch.log(1 + torch.exp(x))
| pytorch-master | torch/distributions/bernoulli.py |
import torch
from torch.distributions.distribution import Distribution
__all__ = ['ExponentialFamily']
class ExponentialFamily(Distribution):
r"""
ExponentialFamily is the abstract base class for probability distributions belonging to an
exponential family, whose probability mass/density function has the form is defined below
.. math::
p_{F}(x; \theta) = \exp(\langle t(x), \theta\rangle - F(\theta) + k(x))
where :math:`\theta` denotes the natural parameters, :math:`t(x)` denotes the sufficient statistic,
:math:`F(\theta)` is the log normalizer function for a given family and :math:`k(x)` is the carrier
measure.
Note:
This class is an intermediary between the `Distribution` class and distributions which belong
to an exponential family mainly to check the correctness of the `.entropy()` and analytic KL
divergence methods. We use this class to compute the entropy and KL divergence using the AD
framework and Bregman divergences (courtesy of: Frank Nielsen and Richard Nock, Entropies and
Cross-entropies of Exponential Families).
"""
@property
def _natural_params(self):
"""
Abstract method for natural parameters. Returns a tuple of Tensors based
on the distribution
"""
raise NotImplementedError
def _log_normalizer(self, *natural_params):
"""
Abstract method for log normalizer function. Returns a log normalizer based on
the distribution and input
"""
raise NotImplementedError
@property
def _mean_carrier_measure(self):
"""
Abstract method for expected carrier measure, which is required for computing
entropy.
"""
raise NotImplementedError
def entropy(self):
"""
Method to compute the entropy using Bregman divergence of the log normalizer.
"""
result = -self._mean_carrier_measure
nparams = [p.detach().requires_grad_() for p in self._natural_params]
lg_normal = self._log_normalizer(*nparams)
gradients = torch.autograd.grad(lg_normal.sum(), nparams, create_graph=True)
result += lg_normal
for np, g in zip(nparams, gradients):
result -= (np * g).reshape(self._batch_shape + (-1,)).sum(-1)
return result
| pytorch-master | torch/distributions/exp_family.py |
from torch.distributions import constraints
from torch.distributions.normal import Normal
from torch.distributions.transformed_distribution import TransformedDistribution
from torch.distributions.transforms import StickBreakingTransform
__all__ = ['LogisticNormal']
class LogisticNormal(TransformedDistribution):
r"""
Creates a logistic-normal distribution parameterized by :attr:`loc` and :attr:`scale`
that define the base `Normal` distribution transformed with the
`StickBreakingTransform` such that::
X ~ LogisticNormal(loc, scale)
Y = log(X / (1 - X.cumsum(-1)))[..., :-1] ~ Normal(loc, scale)
Args:
loc (float or Tensor): mean of the base distribution
scale (float or Tensor): standard deviation of the base distribution
Example::
>>> # logistic-normal distributed with mean=(0, 0, 0) and stddev=(1, 1, 1)
>>> # of the base Normal distribution
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = LogisticNormal(torch.tensor([0.0] * 3), torch.tensor([1.0] * 3))
>>> m.sample()
tensor([ 0.7653, 0.0341, 0.0579, 0.1427])
"""
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}
support = constraints.simplex
has_rsample = True
def __init__(self, loc, scale, validate_args=None):
base_dist = Normal(loc, scale, validate_args=validate_args)
if not base_dist.batch_shape:
base_dist = base_dist.expand([1])
super(LogisticNormal, self).__init__(base_dist,
StickBreakingTransform(),
validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(LogisticNormal, _instance)
return super(LogisticNormal, self).expand(batch_shape, _instance=new)
@property
def loc(self):
return self.base_dist.base_dist.loc
@property
def scale(self):
return self.base_dist.base_dist.scale
| pytorch-master | torch/distributions/logistic_normal.py |
import torch
from torch.distributions import constraints
from torch.distributions.categorical import Categorical
from torch.distributions.distribution import Distribution
__all__ = ['OneHotCategorical', 'OneHotCategoricalStraightThrough']
class OneHotCategorical(Distribution):
r"""
Creates a one-hot categorical distribution parameterized by :attr:`probs` or
:attr:`logits`.
Samples are one-hot coded vectors of size ``probs.size(-1)``.
.. note:: The `probs` argument must be non-negative, finite and have a non-zero sum,
and it will be normalized to sum to 1 along the last dimension. :attr:`probs`
will return this normalized value.
The `logits` argument will be interpreted as unnormalized log probabilities
and can therefore be any real number. It will likewise be normalized so that
the resulting probabilities sum to 1 along the last dimension. :attr:`logits`
will return this normalized value.
See also: :func:`torch.distributions.Categorical` for specifications of
:attr:`probs` and :attr:`logits`.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = OneHotCategorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ]))
>>> m.sample() # equal probability of 0, 1, 2, 3
tensor([ 0., 0., 0., 1.])
Args:
probs (Tensor): event probabilities
logits (Tensor): event log probabilities (unnormalized)
"""
arg_constraints = {'probs': constraints.simplex,
'logits': constraints.real_vector}
support = constraints.one_hot
has_enumerate_support = True
def __init__(self, probs=None, logits=None, validate_args=None):
self._categorical = Categorical(probs, logits)
batch_shape = self._categorical.batch_shape
event_shape = self._categorical.param_shape[-1:]
super(OneHotCategorical, self).__init__(batch_shape, event_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(OneHotCategorical, _instance)
batch_shape = torch.Size(batch_shape)
new._categorical = self._categorical.expand(batch_shape)
super(OneHotCategorical, new).__init__(batch_shape, self.event_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def _new(self, *args, **kwargs):
return self._categorical._new(*args, **kwargs)
@property
def _param(self):
return self._categorical._param
@property
def probs(self):
return self._categorical.probs
@property
def logits(self):
return self._categorical.logits
@property
def mean(self):
return self._categorical.probs
@property
def mode(self):
probs = self._categorical.probs
mode = probs.argmax(axis=-1)
return torch.nn.functional.one_hot(mode, num_classes=probs.shape[-1]).to(probs)
@property
def variance(self):
return self._categorical.probs * (1 - self._categorical.probs)
@property
def param_shape(self):
return self._categorical.param_shape
def sample(self, sample_shape=torch.Size()):
sample_shape = torch.Size(sample_shape)
probs = self._categorical.probs
num_events = self._categorical._num_events
indices = self._categorical.sample(sample_shape)
return torch.nn.functional.one_hot(indices, num_events).to(probs)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
indices = value.max(-1)[1]
return self._categorical.log_prob(indices)
def entropy(self):
return self._categorical.entropy()
def enumerate_support(self, expand=True):
n = self.event_shape[0]
values = torch.eye(n, dtype=self._param.dtype, device=self._param.device)
values = values.view((n,) + (1,) * len(self.batch_shape) + (n,))
if expand:
values = values.expand((n,) + self.batch_shape + (n,))
return values
class OneHotCategoricalStraightThrough(OneHotCategorical):
r"""
Creates a reparameterizable :class:`OneHotCategorical` distribution based on the straight-
through gradient estimator from [1].
[1] Estimating or Propagating Gradients Through Stochastic Neurons for Conditional Computation
(Bengio et al, 2013)
"""
has_rsample = True
def rsample(self, sample_shape=torch.Size()):
samples = self.sample(sample_shape)
probs = self._categorical.probs # cached via @lazy_property
return samples + (probs - probs.detach())
| pytorch-master | torch/distributions/one_hot_categorical.py |
import math
import warnings
from numbers import Number
from typing import Union
import torch
from torch._six import nan
from torch.distributions import constraints
from torch.distributions.exp_family import ExponentialFamily
from torch.distributions.utils import lazy_property
from torch.distributions.multivariate_normal import _precision_to_scale_tril
__all__ = ['Wishart']
_log_2 = math.log(2)
def _mvdigamma(x: torch.Tensor, p: int) -> torch.Tensor:
assert x.gt((p - 1) / 2).all(), "Wrong domain for multivariate digamma function."
return torch.digamma(
x.unsqueeze(-1)
- torch.arange(p, dtype=x.dtype, device=x.device).div(2).expand(x.shape + (-1,))
).sum(-1)
def _clamp_above_eps(x: torch.Tensor) -> torch.Tensor:
# We assume positive input for this function
return x.clamp(min=torch.finfo(x.dtype).eps)
class Wishart(ExponentialFamily):
r"""
Creates a Wishart distribution parameterized by a symmetric positive definite matrix :math:`\Sigma`,
or its Cholesky decomposition :math:`\mathbf{\Sigma} = \mathbf{L}\mathbf{L}^\top`
Example:
>>> # xdoctest: +SKIP("FIXME: scale_tril must be at least two-dimensional")
>>> m = Wishart(torch.eye(2), torch.Tensor([2]))
>>> m.sample() # Wishart distributed with mean=`df * I` and
>>> # variance(x_ij)=`df` for i != j and variance(x_ij)=`2 * df` for i == j
Args:
covariance_matrix (Tensor): positive-definite covariance matrix
precision_matrix (Tensor): positive-definite precision matrix
scale_tril (Tensor): lower-triangular factor of covariance, with positive-valued diagonal
df (float or Tensor): real-valued parameter larger than the (dimension of Square matrix) - 1
Note:
Only one of :attr:`covariance_matrix` or :attr:`precision_matrix` or
:attr:`scale_tril` can be specified.
Using :attr:`scale_tril` will be more efficient: all computations internally
are based on :attr:`scale_tril`. If :attr:`covariance_matrix` or
:attr:`precision_matrix` is passed instead, it is only used to compute
the corresponding lower triangular matrices using a Cholesky decomposition.
'torch.distributions.LKJCholesky' is a restricted Wishart distribution.[1]
**References**
[1] `On equivalence of the LKJ distribution and the restricted Wishart distribution`,
Zhenxun Wang, Yunan Wu, Haitao Chu.
"""
arg_constraints = {
'covariance_matrix': constraints.positive_definite,
'precision_matrix': constraints.positive_definite,
'scale_tril': constraints.lower_cholesky,
'df': constraints.greater_than(0),
}
support = constraints.positive_definite
has_rsample = True
_mean_carrier_measure = 0
def __init__(self,
df: Union[torch.Tensor, Number],
covariance_matrix: torch.Tensor = None,
precision_matrix: torch.Tensor = None,
scale_tril: torch.Tensor = None,
validate_args=None):
assert (covariance_matrix is not None) + (scale_tril is not None) + (precision_matrix is not None) == 1, \
"Exactly one of covariance_matrix or precision_matrix or scale_tril may be specified."
param = next(p for p in (covariance_matrix, precision_matrix, scale_tril) if p is not None)
if param.dim() < 2:
raise ValueError("scale_tril must be at least two-dimensional, with optional leading batch dimensions")
if isinstance(df, Number):
batch_shape = torch.Size(param.shape[:-2])
self.df = torch.tensor(df, dtype=param.dtype, device=param.device)
else:
batch_shape = torch.broadcast_shapes(param.shape[:-2], df.shape)
self.df = df.expand(batch_shape)
event_shape = param.shape[-2:]
if self.df.le(event_shape[-1] - 1).any():
raise ValueError(f"Value of df={df} expected to be greater than ndim - 1 = {event_shape[-1]-1}.")
if scale_tril is not None:
self.scale_tril = param.expand(batch_shape + (-1, -1))
elif covariance_matrix is not None:
self.covariance_matrix = param.expand(batch_shape + (-1, -1))
elif precision_matrix is not None:
self.precision_matrix = param.expand(batch_shape + (-1, -1))
self.arg_constraints['df'] = constraints.greater_than(event_shape[-1] - 1)
if self.df.lt(event_shape[-1]).any():
warnings.warn("Low df values detected. Singular samples are highly likely to occur for ndim - 1 < df < ndim.")
super(Wishart, self).__init__(batch_shape, event_shape, validate_args=validate_args)
self._batch_dims = [-(x + 1) for x in range(len(self._batch_shape))]
if scale_tril is not None:
self._unbroadcasted_scale_tril = scale_tril
elif covariance_matrix is not None:
self._unbroadcasted_scale_tril = torch.linalg.cholesky(covariance_matrix)
else: # precision_matrix is not None
self._unbroadcasted_scale_tril = _precision_to_scale_tril(precision_matrix)
# Chi2 distribution is needed for Bartlett decomposition sampling
self._dist_chi2 = torch.distributions.chi2.Chi2(
df=(
self.df.unsqueeze(-1)
- torch.arange(
self._event_shape[-1],
dtype=self._unbroadcasted_scale_tril.dtype,
device=self._unbroadcasted_scale_tril.device,
).expand(batch_shape + (-1,))
)
)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Wishart, _instance)
batch_shape = torch.Size(batch_shape)
cov_shape = batch_shape + self.event_shape
new._unbroadcasted_scale_tril = self._unbroadcasted_scale_tril.expand(cov_shape)
new.df = self.df.expand(batch_shape)
new._batch_dims = [-(x + 1) for x in range(len(batch_shape))]
if 'covariance_matrix' in self.__dict__:
new.covariance_matrix = self.covariance_matrix.expand(cov_shape)
if 'scale_tril' in self.__dict__:
new.scale_tril = self.scale_tril.expand(cov_shape)
if 'precision_matrix' in self.__dict__:
new.precision_matrix = self.precision_matrix.expand(cov_shape)
# Chi2 distribution is needed for Bartlett decomposition sampling
new._dist_chi2 = torch.distributions.chi2.Chi2(
df=(
new.df.unsqueeze(-1)
- torch.arange(
self.event_shape[-1],
dtype=new._unbroadcasted_scale_tril.dtype,
device=new._unbroadcasted_scale_tril.device,
).expand(batch_shape + (-1,))
)
)
super(Wishart, new).__init__(batch_shape, self.event_shape, validate_args=False)
new._validate_args = self._validate_args
return new
@lazy_property
def scale_tril(self):
return self._unbroadcasted_scale_tril.expand(
self._batch_shape + self._event_shape)
@lazy_property
def covariance_matrix(self):
return (
self._unbroadcasted_scale_tril @ self._unbroadcasted_scale_tril.transpose(-2, -1)
).expand(self._batch_shape + self._event_shape)
@lazy_property
def precision_matrix(self):
identity = torch.eye(
self._event_shape[-1],
device=self._unbroadcasted_scale_tril.device,
dtype=self._unbroadcasted_scale_tril.dtype,
)
return torch.cholesky_solve(
identity, self._unbroadcasted_scale_tril
).expand(self._batch_shape + self._event_shape)
@property
def mean(self):
return self.df.view(self._batch_shape + (1, 1)) * self.covariance_matrix
@property
def mode(self):
factor = self.df - self.covariance_matrix.shape[-1] - 1
factor[factor <= 0] = nan
return factor.view(self._batch_shape + (1, 1)) * self.covariance_matrix
@property
def variance(self):
V = self.covariance_matrix # has shape (batch_shape x event_shape)
diag_V = V.diagonal(dim1=-2, dim2=-1)
return self.df.view(self._batch_shape + (1, 1)) * (V.pow(2) + torch.einsum("...i,...j->...ij", diag_V, diag_V))
def _bartlett_sampling(self, sample_shape=torch.Size()):
p = self._event_shape[-1] # has singleton shape
# Implemented Sampling using Bartlett decomposition
noise = _clamp_above_eps(
self._dist_chi2.rsample(sample_shape).sqrt()
).diag_embed(dim1=-2, dim2=-1)
i, j = torch.tril_indices(p, p, offset=-1)
noise[..., i, j] = torch.randn(
torch.Size(sample_shape) + self._batch_shape + (int(p * (p - 1) / 2),),
dtype=noise.dtype,
device=noise.device,
)
chol = self._unbroadcasted_scale_tril @ noise
return chol @ chol.transpose(-2, -1)
def rsample(self, sample_shape=torch.Size(), max_try_correction=None):
r"""
.. warning::
In some cases, sampling algorithn based on Bartlett decomposition may return singular matrix samples.
Several tries to correct singular samples are performed by default, but it may end up returning
singular matrix samples. Sigular samples may return `-inf` values in `.log_prob()`.
In those cases, the user should validate the samples and either fix the value of `df`
or adjust `max_try_correction` value for argument in `.rsample` accordingly.
"""
if max_try_correction is None:
max_try_correction = 3 if torch._C._get_tracing_state() else 10
sample_shape = torch.Size(sample_shape)
sample = self._bartlett_sampling(sample_shape)
# Below part is to improve numerical stability temporally and should be removed in the future
is_singular = self.support.check(sample)
if self._batch_shape:
is_singular = is_singular.amax(self._batch_dims)
if torch._C._get_tracing_state():
# Less optimized version for JIT
for _ in range(max_try_correction):
sample_new = self._bartlett_sampling(sample_shape)
sample = torch.where(is_singular, sample_new, sample)
is_singular = ~self.support.check(sample)
if self._batch_shape:
is_singular = is_singular.amax(self._batch_dims)
else:
# More optimized version with data-dependent control flow.
if is_singular.any():
warnings.warn("Singular sample detected.")
for _ in range(max_try_correction):
sample_new = self._bartlett_sampling(is_singular[is_singular].shape)
sample[is_singular] = sample_new
is_singular_new = ~self.support.check(sample_new)
if self._batch_shape:
is_singular_new = is_singular_new.amax(self._batch_dims)
is_singular[is_singular.clone()] = is_singular_new
if not is_singular.any():
break
return sample
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
nu = self.df # has shape (batch_shape)
p = self._event_shape[-1] # has singleton shape
return (
- nu * (p * _log_2 / 2 + self._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1))
- torch.mvlgamma(nu / 2, p=p)
+ (nu - p - 1) / 2 * torch.linalg.slogdet(value).logabsdet
- torch.cholesky_solve(value, self._unbroadcasted_scale_tril).diagonal(dim1=-2, dim2=-1).sum(dim=-1) / 2
)
def entropy(self):
nu = self.df # has shape (batch_shape)
p = self._event_shape[-1] # has singleton shape
V = self.covariance_matrix # has shape (batch_shape x event_shape)
return (
(p + 1) * (p * _log_2 / 2 + self._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1))
+ torch.mvlgamma(nu / 2, p=p)
- (nu - p - 1) / 2 * _mvdigamma(nu / 2, p=p)
+ nu * p / 2
)
@property
def _natural_params(self):
nu = self.df # has shape (batch_shape)
p = self._event_shape[-1] # has singleton shape
return - self.precision_matrix / 2, (nu - p - 1) / 2
def _log_normalizer(self, x, y):
p = self._event_shape[-1]
return (
(y + (p + 1) / 2) * (- torch.linalg.slogdet(- 2 * x).logabsdet + _log_2 * p)
+ torch.mvlgamma(y + (p + 1) / 2, p=p)
)
| pytorch-master | torch/distributions/wishart.py |
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.exp_family import ExponentialFamily
from torch.distributions.utils import broadcast_all
__all__ = ['Gamma']
def _standard_gamma(concentration):
return torch._standard_gamma(concentration)
class Gamma(ExponentialFamily):
r"""
Creates a Gamma distribution parameterized by shape :attr:`concentration` and :attr:`rate`.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = Gamma(torch.tensor([1.0]), torch.tensor([1.0]))
>>> m.sample() # Gamma distributed with concentration=1 and rate=1
tensor([ 0.1046])
Args:
concentration (float or Tensor): shape parameter of the distribution
(often referred to as alpha)
rate (float or Tensor): rate = 1 / scale of the distribution
(often referred to as beta)
"""
arg_constraints = {'concentration': constraints.positive, 'rate': constraints.positive}
support = constraints.nonnegative
has_rsample = True
_mean_carrier_measure = 0
@property
def mean(self):
return self.concentration / self.rate
@property
def mode(self):
return ((self.concentration - 1) / self.rate).clamp(min=0)
@property
def variance(self):
return self.concentration / self.rate.pow(2)
def __init__(self, concentration, rate, validate_args=None):
self.concentration, self.rate = broadcast_all(concentration, rate)
if isinstance(concentration, Number) and isinstance(rate, Number):
batch_shape = torch.Size()
else:
batch_shape = self.concentration.size()
super(Gamma, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Gamma, _instance)
batch_shape = torch.Size(batch_shape)
new.concentration = self.concentration.expand(batch_shape)
new.rate = self.rate.expand(batch_shape)
super(Gamma, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
value = _standard_gamma(self.concentration.expand(shape)) / self.rate.expand(shape)
value.detach().clamp_(min=torch.finfo(value.dtype).tiny) # do not record in autograd graph
return value
def log_prob(self, value):
value = torch.as_tensor(value, dtype=self.rate.dtype, device=self.rate.device)
if self._validate_args:
self._validate_sample(value)
return (torch.xlogy(self.concentration, self.rate) +
torch.xlogy(self.concentration - 1, value) -
self.rate * value - torch.lgamma(self.concentration))
def entropy(self):
return (self.concentration - torch.log(self.rate) + torch.lgamma(self.concentration) +
(1.0 - self.concentration) * torch.digamma(self.concentration))
@property
def _natural_params(self):
return (self.concentration - 1, -self.rate)
def _log_normalizer(self, x, y):
return torch.lgamma(x + 1) + (x + 1) * torch.log(-y.reciprocal())
| pytorch-master | torch/distributions/gamma.py |
from torch.distributions import constraints
from torch.distributions.gamma import Gamma
__all__ = ['Chi2']
class Chi2(Gamma):
r"""
Creates a Chi-squared distribution parameterized by shape parameter :attr:`df`.
This is exactly equivalent to ``Gamma(alpha=0.5*df, beta=0.5)``
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = Chi2(torch.tensor([1.0]))
>>> m.sample() # Chi2 distributed with shape df=1
tensor([ 0.1046])
Args:
df (float or Tensor): shape parameter of the distribution
"""
arg_constraints = {'df': constraints.positive}
def __init__(self, df, validate_args=None):
super(Chi2, self).__init__(0.5 * df, 0.5, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Chi2, _instance)
return super(Chi2, self).expand(batch_shape, new)
@property
def df(self):
return self.concentration * 2
| pytorch-master | torch/distributions/chi2.py |
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all, probs_to_logits, lazy_property, logits_to_probs
__all__ = ['Binomial']
def _clamp_by_zero(x):
# works like clamp(x, min=0) but has grad at 0 is 0.5
return (x.clamp(min=0) + x - x.clamp(max=0)) / 2
class Binomial(Distribution):
r"""
Creates a Binomial distribution parameterized by :attr:`total_count` and
either :attr:`probs` or :attr:`logits` (but not both). :attr:`total_count` must be
broadcastable with :attr:`probs`/:attr:`logits`.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterinistic")
>>> m = Binomial(100, torch.tensor([0 , .2, .8, 1]))
>>> x = m.sample()
tensor([ 0., 22., 71., 100.])
>>> m = Binomial(torch.tensor([[5.], [10.]]), torch.tensor([0.5, 0.8]))
>>> x = m.sample()
tensor([[ 4., 5.],
[ 7., 6.]])
Args:
total_count (int or Tensor): number of Bernoulli trials
probs (Tensor): Event probabilities
logits (Tensor): Event log-odds
"""
arg_constraints = {'total_count': constraints.nonnegative_integer,
'probs': constraints.unit_interval,
'logits': constraints.real}
has_enumerate_support = True
def __init__(self, total_count=1, probs=None, logits=None, validate_args=None):
if (probs is None) == (logits is None):
raise ValueError("Either `probs` or `logits` must be specified, but not both.")
if probs is not None:
self.total_count, self.probs, = broadcast_all(total_count, probs)
self.total_count = self.total_count.type_as(self.probs)
else:
self.total_count, self.logits, = broadcast_all(total_count, logits)
self.total_count = self.total_count.type_as(self.logits)
self._param = self.probs if probs is not None else self.logits
batch_shape = self._param.size()
super(Binomial, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Binomial, _instance)
batch_shape = torch.Size(batch_shape)
new.total_count = self.total_count.expand(batch_shape)
if 'probs' in self.__dict__:
new.probs = self.probs.expand(batch_shape)
new._param = new.probs
if 'logits' in self.__dict__:
new.logits = self.logits.expand(batch_shape)
new._param = new.logits
super(Binomial, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def _new(self, *args, **kwargs):
return self._param.new(*args, **kwargs)
@constraints.dependent_property(is_discrete=True, event_dim=0)
def support(self):
return constraints.integer_interval(0, self.total_count)
@property
def mean(self):
return self.total_count * self.probs
@property
def mode(self):
return ((self.total_count + 1) * self.probs).floor().clamp(max=self.total_count)
@property
def variance(self):
return self.total_count * self.probs * (1 - self.probs)
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
@property
def param_shape(self):
return self._param.size()
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return torch.binomial(self.total_count.expand(shape), self.probs.expand(shape))
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
log_factorial_n = torch.lgamma(self.total_count + 1)
log_factorial_k = torch.lgamma(value + 1)
log_factorial_nmk = torch.lgamma(self.total_count - value + 1)
# k * log(p) + (n - k) * log(1 - p) = k * (log(p) - log(1 - p)) + n * log(1 - p)
# (case logit < 0) = k * logit - n * log1p(e^logit)
# (case logit > 0) = k * logit - n * (log(p) - log(1 - p)) + n * log(p)
# = k * logit - n * logit - n * log1p(e^-logit)
# (merge two cases) = k * logit - n * max(logit, 0) - n * log1p(e^-|logit|)
normalize_term = (self.total_count * _clamp_by_zero(self.logits)
+ self.total_count * torch.log1p(torch.exp(-torch.abs(self.logits)))
- log_factorial_n)
return value * self.logits - log_factorial_k - log_factorial_nmk - normalize_term
def entropy(self):
total_count = int(self.total_count.max())
if not self.total_count.min() == total_count:
raise NotImplementedError("Inhomogeneous total count not supported by `entropy`.")
log_prob = self.log_prob(self.enumerate_support(False))
return -(torch.exp(log_prob) * log_prob).sum(0)
def enumerate_support(self, expand=True):
total_count = int(self.total_count.max())
if not self.total_count.min() == total_count:
raise NotImplementedError("Inhomogeneous total count not supported by `enumerate_support`.")
values = torch.arange(1 + total_count, dtype=self._param.dtype, device=self._param.device)
values = values.view((-1,) + (1,) * len(self._batch_shape))
if expand:
values = values.expand((-1,) + self._batch_shape)
return values
| pytorch-master | torch/distributions/binomial.py |
import collections
import importlib.machinery
import io
import linecache
import pickletools
import platform
import types
from collections import defaultdict, OrderedDict
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import (
Any,
BinaryIO,
Callable,
cast,
DefaultDict,
Dict,
List,
Optional,
Sequence,
Set,
Union,
)
import torch
from torch.serialization import location_tag, normalize_storage_type
from torch.types import Storage
from torch.utils.hooks import RemovableHandle
from ._digraph import DiGraph
from ._importlib import _normalize_path
from ._mangling import demangle, is_mangled
from ._package_pickler import create_pickler
from ._stdlib import is_stdlib_module
from .find_file_dependencies import find_files_source_depends_on
from .glob_group import GlobGroup, GlobPattern
from .importer import Importer, OrderedImporter, sys_importer
__all__ = [
"PackagingErrorReason",
"EmptyMatchError",
"PackagingError",
"PackageExporter",
]
_gate_torchscript_serialization = True
ActionHook = Callable[["PackageExporter", str], None]
class _ModuleProviderAction(Enum):
"""Represents one of the actions that :class:`PackageExporter` can take on a module.
See :meth:`PackageExporter.extern` and friends for a description of what the actions do.
"""
INTERN = 1
EXTERN = 2
MOCK = 3
DENY = 4
# Special case: when a module is mocked, PackageExporter writes out a
# `_mock` module that implements our mocking stubs. If we re-package code,
# we may encounter a `_mock` module from the original package. If we do,
# just ignore it and write a `_mock` module once.
REPACKAGED_MOCK_MODULE = 5
# Special case: PackageImporter adds a fake module
# (`torch_package_importer`) that allows packaged code to access it. Don't
# re-export this.
SKIP = 6
class PackagingErrorReason(Enum):
"""Listing of different reasons a dependency may fail to package.
This enum is used to provide good error messages when
:class:`PackagingError` is raised.
"""
def __repr__(self):
return "<%s.%s>" % (self.__class__.__name__, self.name)
IS_EXTENSION_MODULE = (
"Module is a C extension module. torch.package supports Python modules only."
)
NO_DUNDER_FILE = "Module had no __file__ defined."
SOURCE_FILE_NOT_FOUND = (
"Module had a __file__, but we could not find it in your filesystem."
)
DEPENDENCY_RESOLUTION_FAILED = "Dependency resolution failed."
NO_ACTION = (
"Module did not match against any action pattern. Extern, mock, or intern it."
)
DENIED = "Module was denied by a pattern."
MOCKED_BUT_STILL_USED = (
"Module was mocked out, but is still being used in the package. "
"Please intern or extern the mocked modules if objects are supposed to be in "
"the package."
)
@dataclass
class _PatternInfo:
"""Holds :class:`PackageExporter`-specific info about how to execute matches against"""
# What action to take on a module that matches this pattern.
action: _ModuleProviderAction
# The value of `allow_empty` the user gave when specifying the pattern.
allow_empty: bool
# Whether this pattern has been matched during packaging.
was_matched: bool
def __init__(self, action, allow_empty):
self.action = action
self.allow_empty = allow_empty
self.was_matched = False
class EmptyMatchError(Exception):
"""This is an exception that is thrown when a mock or extern is marked as
``allow_empty=False``, and is not matched with any module during packaging.
"""
pass
class PackagingError(Exception):
"""This exception is raised when there is an issue with exporting a package.
``PackageExporter`` will attempt to gather up all the errors and present
them to you at once.
"""
def __init__(self, dependency_graph: DiGraph):
# Group errors by reason.
broken: Dict[PackagingErrorReason, List[str]] = defaultdict(list)
for module_name, attrs in dependency_graph.nodes.items():
error = attrs.get("error")
if error is None:
continue
if error == PackagingErrorReason.NO_ACTION:
assert "action" not in attrs
broken[error].append(module_name)
message = io.StringIO()
message.write("\n")
for reason, module_names in broken.items():
message.write(f"* {reason.value}\n")
for module_name in module_names:
message.write(f" {module_name}\n")
# Print additional context if it's provided.
error_context = dependency_graph.nodes[module_name].get("error_context")
if error_context is not None:
message.write(f" Context: {error_context}\n")
# Save the dependency graph so that tooling can get at it.
self.dependency_graph = dependency_graph
super().__init__(message.getvalue())
class PackageExporter:
"""Exporters allow you to write packages of code, pickled Python data, and
arbitrary binary and text resources into a self-contained package.
Imports can load this code in a hermetic way, such that code is loaded
from the package rather than the normal Python import system. This allows
for the packaging of PyTorch model code and data so that it can be run
on a server or used in the future for transfer learning.
The code contained in packages is copied file-by-file from the original
source when it is created, and the file format is a specially organized
zip file. Future users of the package can unzip the package, and edit the code
in order to perform custom modifications to it.
The importer for packages ensures that code in the module can only be loaded from
within the package, except for modules explicitly listed as external using :meth:`extern`.
The file ``extern_modules`` in the zip archive lists all the modules that a package externally depends on.
This prevents "implicit" dependencies where the package runs locally because it is importing
a locally-installed package, but then fails when the package is copied to another machine.
When source code is added to the package, the exporter can optionally scan it
for further code dependencies (``dependencies=True``). It looks for import statements,
resolves relative references to qualified module names, and performs an action specified by the user
(See: :meth:`extern`, :meth:`mock`, and :meth:`intern`).
"""
"""A importer that will be searched in order to find the modules referenced by other modules or by
pickled objects. The default module environment just uses sys_importer, which searches the Python environment.
"""
importer: Importer
def __init__(
self,
f: Union[str, Path, BinaryIO],
importer: Union[Importer, Sequence[Importer]] = sys_importer,
):
"""
Create an exporter.
Args:
f: The location to export to. Can be a ``string``/``Path`` object containing a filename
or a binary I/O object.
importer: If a single Importer is passed, use that to search for modules.
If a sequence of importers are passsed, an ``OrderedImporter`` will be constructed out of them.
"""
torch._C._log_api_usage_once("torch.package.PackageExporter")
if isinstance(f, (Path, str)):
f = str(f)
self.buffer: Optional[BinaryIO] = None
else: # is a byte buffer
self.buffer = f
self.zip_file = torch._C.PyTorchFileWriter(f)
self.zip_file.set_min_version(6)
self._written_files: Set[str] = set()
self.serialized_reduces: Dict[int, Any] = {}
# A graph tracking all the modules and pickle objects added to this
# package and the dependencies between them.
# - Each node is a module name (or a pickle name that looks like '<foo.obj.pkl>')
# - Each directed edge (u, v) means u depends on v.
# - Nodes may contain metadata that describe how to write the thing to the zipfile.
self.dependency_graph = DiGraph()
self.script_module_serializer = torch._C.ScriptModuleSerializer(self.zip_file)
self.storage_context = self.script_module_serializer.storage_context()
# These are OrderedDicts for compatibility with RemovableHandle.
# Generic OrderedDict type annotations are not present until 3.7.
# The real type signature is OrderedDict[int, Callable[[PackageExporter, str], None]]
self._extern_hooks: OrderedDict = OrderedDict()
self._mock_hooks: OrderedDict = OrderedDict()
self._intern_hooks: OrderedDict = OrderedDict()
if isinstance(importer, Importer):
self.importer = importer
else:
if not isinstance(importer, collections.abc.Sequence):
raise TypeError(
"importer arg should be an Importer or a sequence of Importers, "
f"got {type(importer)} instead."
)
self.importer = OrderedImporter(*importer)
self.patterns: Dict[GlobGroup, _PatternInfo] = {}
self._unique_id = 0
def save_source_file(
self, module_name: str, file_or_directory: str, dependencies=True
):
"""Adds the local file system ``file_or_directory`` to the source package to provide the code
for ``module_name``.
Args:
module_name (str): e.g. ``"my_package.my_subpackage"``, code will be saved to provide code for this package.
file_or_directory (str): the path to a file or directory of code. When a directory, all python files in the directory
are recursively copied using :meth:`save_source_file`. If a file is named ``"/__init__.py"`` the code is treated
as a package.
dependencies (bool, optional): If ``True``, we scan the source for dependencies.
"""
path = Path(file_or_directory)
if path.is_dir():
to_save = [] # list of tuples with arguments to save_source_string
module_path = module_name.replace(".", "/")
for filename in path.glob("**/*.py"):
relative_path = filename.relative_to(path).as_posix()
archivename = module_path + "/" + relative_path
submodule_name = None
if filename.name == "__init__.py":
submodule_name = archivename[: -len("/__init__.py")].replace(
"/", "."
)
is_package = True
else:
submodule_name = archivename[: -len(".py")].replace("/", ".")
is_package = False
# we delay the call to save_source_string so that we record all the source files
# being provided by this directory structure _before_ attempting to resolve the dependencies
# on the source. This makes sure we don't try to copy over modules that will just get
# overwritten by this directory blob
to_save.append(
(
submodule_name,
_read_file(str(filename)),
is_package,
dependencies,
)
)
for item in to_save:
self.save_source_string(*item)
else:
is_package = path.name == "__init__.py"
self.save_source_string(
module_name,
_read_file(file_or_directory),
is_package,
dependencies,
)
def get_unique_id(self) -> str:
"""Get an id. This id is guaranteed to only be handed out once for this package."""
ret = str(self._unique_id)
self._unique_id += 1
return ret
def _get_dependencies(
self, src: str, module_name: str, is_package: bool
) -> List[str]:
"""Return all modules that this source code depends on.
Dependencies are found by scanning the source code for import-like statements.
Arguments:
src: The Python source code to analyze for dependencies.
module_name: The name of the module that ``src`` corresponds to.
is_package: Whether this module should be treated as a package.
See :py:meth:`save_source_string` for more info.
Returns:
A list containing modules detected as direct dependencies in
``src``. The items in the list are guaranteed to be unique.
"""
package_name = (
module_name if is_package else module_name.rsplit(".", maxsplit=1)[0]
)
try:
dep_pairs = find_files_source_depends_on(src, package_name)
except Exception as e:
self.dependency_graph.add_node(
module_name,
error=PackagingErrorReason.DEPENDENCY_RESOLUTION_FAILED,
error_context=str(e),
)
return []
# Use a dict to get uniquing but also deterministic order
dependencies = {}
for dep_module_name, dep_module_obj in dep_pairs:
# handle the case where someone did something like `from pack import sub`
# where `sub` is a submodule. In this case we don't have to save pack, just sub.
# this ensures we don't pick up additional dependencies on pack.
# However, in the case where `sub` is not a submodule but an object, then we do have
# to save pack.
if dep_module_obj is not None:
possible_submodule = f"{dep_module_name}.{dep_module_obj}"
if self._module_exists(possible_submodule):
dependencies[possible_submodule] = True
# we don't need to save `pack`
continue
if self._module_exists(dep_module_name):
dependencies[dep_module_name] = True
return list(dependencies.keys())
def save_source_string(
self,
module_name: str,
src: str,
is_package: bool = False,
dependencies: bool = True,
):
"""Adds ``src`` as the source code for ``module_name`` in the exported package.
Args:
module_name (str): e.g. ``my_package.my_subpackage``, code will be saved to provide code for this package.
src (str): The Python source code to save for this package.
is_package (bool, optional): If ``True``, this module is treated as a package. Packages are allowed to have submodules
(e.g. ``my_package.my_subpackage.my_subsubpackage``), and resources can be saved inside them. Defaults to ``False``.
dependencies (bool, optional): If ``True``, we scan the source for dependencies.
"""
self.dependency_graph.add_node(
module_name,
source=src,
is_package=is_package,
provided=True,
action=_ModuleProviderAction.INTERN,
)
if dependencies:
deps = self._get_dependencies(src, module_name, is_package)
for dep in deps:
self.dependency_graph.add_edge(module_name, dep)
self.add_dependency(dep)
def _write_source_string(
self,
module_name: str,
src: str,
is_package: bool = False,
):
"""Write ``src`` as the source code for ``module_name`` in the zip archive.
Arguments are otherwise the same as for :meth:`save_source_string`.
"""
extension = "/__init__.py" if is_package else ".py"
filename = module_name.replace(".", "/") + extension
self._write(filename, src)
def _import_module(self, module_name: str):
try:
return self.importer.import_module(module_name)
except ModuleNotFoundError as e:
if not is_mangled(module_name):
raise
msg = (
f"Module not found: '{module_name}'. Make sure the PackageImporter that "
"created this module is present in `self.importer`"
)
raise ModuleNotFoundError(msg) from None
def _module_exists(self, module_name: str) -> bool:
try:
self._import_module(module_name)
return True
except Exception:
return False
def _get_source_of_module(self, module: types.ModuleType) -> Optional[str]:
filename = getattr(module, "__file__", None)
result = (
None
if filename is None or not filename.endswith(".py")
else linecache.getlines(filename, module.__dict__)
)
if result is None:
return None
return "".join(result)
def add_dependency(self, module_name: str, dependencies=True):
"""Given a module, add it to the dependency graph according to patterns
specified by the user.
"""
if (
module_name in self.dependency_graph
and self.dependency_graph.nodes[module_name].get("provided") is True
):
return
# Special case: PackageImporter provides a special module called
# `torch_package_importer` that allows packaged modules to reference
# their PackageImporter. We don't want to re-export this.
if module_name == "torch_package_importer":
self.dependency_graph.add_node(
module_name,
action=_ModuleProviderAction.SKIP,
provided=True,
)
return
if module_name == "_mock":
self.dependency_graph.add_node(
module_name,
action=_ModuleProviderAction.REPACKAGED_MOCK_MODULE,
provided=True,
)
return
if self._can_implicitly_extern(module_name):
self.dependency_graph.add_node(
module_name, action=_ModuleProviderAction.EXTERN, provided=True
)
return
for pattern, pattern_info in self.patterns.items():
if pattern.matches(module_name):
pattern_info.was_matched = True
self.dependency_graph.add_node(
module_name, action=pattern_info.action, provided=True
)
if pattern_info.action == _ModuleProviderAction.DENY:
# Requiring a denied module just adds an error to the graph.
self.dependency_graph.add_node(
module_name, error=PackagingErrorReason.DENIED
)
# If we are interning this module, we need to retrieve its
# dependencies and package those as well.
if pattern_info.action == _ModuleProviderAction.INTERN:
self._intern_module(module_name, dependencies)
return
# No patterns have matched. Explicitly add this as an error.
self.dependency_graph.add_node(
module_name, error=PackagingErrorReason.NO_ACTION
)
def save_module(self, module_name: str, dependencies=True):
"""Save the code for ``module`` into the package. Code for the module is resolved using the ``importers`` path to find the
module object, and then using its ``__file__`` attribute to find the source code.
Args:
module_name (str): e.g. ``my_package.my_subpackage``, code will be saved to provide code
for this package.
dependencies (bool, optional): If ``True``, we scan the source for dependencies.
"""
if not isinstance(module_name, str):
raise TypeError(
"save_module() expects a string input, did you perhaps mean to pass `__name__`?"
)
self._intern_module(module_name, dependencies)
def _intern_module(
self,
module_name: str,
dependencies: bool,
):
"""Adds the module to the dependency graph as an interned module,
along with any metadata needed to write it out to the zipfile at serialization time.
"""
module_obj = self._import_module(module_name)
# Subtle: if the import above succeeded, either:
# 1. The module name is not mangled, and this was just a regular import, or
# 2. The module name is mangled, but one of the importers was able to
# recognize the mangling and import it.
# Either way, it is now safe to demangle this name so that we don't
# serialize the mangled version to the package.
module_name = demangle(module_name)
# Find dependencies of this module and require them as well.
is_package = hasattr(module_obj, "__path__")
source = self._get_source_of_module(module_obj)
if source is None:
# Couldn't find a source! Add it to our dependency graph as broken
# and continue.
filename = getattr(module_obj, "__file__", None)
error_context = None
if filename is None:
packaging_error = PackagingErrorReason.NO_DUNDER_FILE
elif filename.endswith(tuple(importlib.machinery.EXTENSION_SUFFIXES)):
packaging_error = PackagingErrorReason.IS_EXTENSION_MODULE
else:
packaging_error = PackagingErrorReason.SOURCE_FILE_NOT_FOUND
error_context = f"filename: {filename}"
self.dependency_graph.add_node(
module_name,
action=_ModuleProviderAction.INTERN,
is_package=is_package,
error=packaging_error,
error_context=error_context,
provided=True,
)
return
self.dependency_graph.add_node(
module_name,
action=_ModuleProviderAction.INTERN,
is_package=is_package,
source=source,
provided=True,
)
if dependencies:
deps = self._get_dependencies(source, module_name, is_package)
for dep in deps:
self.dependency_graph.add_edge(module_name, dep)
self.add_dependency(dep)
def save_pickle(
self,
package: str,
resource: str,
obj: Any,
dependencies: bool = True,
pickle_protocol: int = 3,
):
"""Save a python object to the archive using pickle. Equivalent to :func:`torch.save` but saving into
the archive rather than a stand-alone file. Stanard pickle does not save the code, only the objects.
If ``dependencies`` is true, this method will also scan the pickled objects for which modules are required
to reconstruct them and save the relevant code.
To be able to save an object where ``type(obj).__name__`` is ``my_module.MyObject``,
``my_module.MyObject`` must resolve to the class of the object according to the ``importer`` order. When saving objects that
have previously been packaged, the importer's ``import_module`` method will need to be present in the ``importer`` list
for this to work.
Args:
package (str): The name of module package this resource should go in (e.g. ``"my_package.my_subpackage"``).
resource (str): A unique name for the resource, used to identify it to load.
obj (Any): The object to save, must be picklable.
dependencies (bool, optional): If ``True``, we scan the source for dependencies.
"""
assert (pickle_protocol == 4) or (
pickle_protocol == 3
), "torch.package only supports pickle protocols 3 and 4"
filename = self._filename(package, resource)
# Write the pickle data for `obj`
data_buf = io.BytesIO()
pickler = create_pickler(data_buf, self.importer, protocol=pickle_protocol)
pickler.persistent_id = self._persistent_id
pickler.dump(obj)
data_value = data_buf.getvalue()
mocked_modules = defaultdict(list)
name_in_dependency_graph = f"<{package}.{resource}>"
self.dependency_graph.add_node(
name_in_dependency_graph,
action=_ModuleProviderAction.INTERN,
provided=True,
is_pickle=True,
)
def _check_mocked_error(module: Optional[str], field: Optional[str]):
"""
checks if an object (field) comes from a mocked module and then adds
the pair to mocked_modules which contains mocked modules paired with their
list of mocked objects present in the pickle.
We also hold the invariant that the first user defined rule that applies
to the module is the one we use.
"""
assert isinstance(module, str)
assert isinstance(field, str)
if self._can_implicitly_extern(module):
return
for pattern, pattern_info in self.patterns.items():
if pattern.matches(module):
if pattern_info.action == _ModuleProviderAction.MOCK:
mocked_modules[module].append(field)
return
if dependencies:
all_dependencies = []
module = None
field = None
memo: DefaultDict[int, str] = defaultdict(None)
memo_count = 0
# pickletools.dis(data_value)
for opcode, arg, pos in pickletools.genops(data_value):
if pickle_protocol == 4:
if (
opcode.name == "SHORT_BINUNICODE"
or opcode.name == "BINUNICODE8"
):
assert isinstance(arg, str)
module = field
field = arg
memo[memo_count] = arg
elif (
opcode.name == "BINGET_LONG"
or opcode.name == "BINGET"
or opcode.name == "GET"
):
assert isinstance(arg, int)
module = field
field = memo.get(arg, None)
elif opcode.name == "MEMOIZE":
memo_count += 1
elif opcode.name == "STACK_GLOBAL":
assert isinstance(module, str)
if module not in all_dependencies:
all_dependencies.append(module)
_check_mocked_error(module, field)
elif (
pickle_protocol == 3 and opcode.name == "GLOBAL"
): # a global reference
assert isinstance(arg, str)
module, field = arg.split(" ")
if module not in all_dependencies:
all_dependencies.append(module)
_check_mocked_error(module, field)
for module_name in all_dependencies:
self.dependency_graph.add_edge(name_in_dependency_graph, module_name)
""" If an object happens to come from a mocked module, then we collect these errors and spit them
out with the other errors found by package exporter.
"""
if module in mocked_modules:
assert isinstance(module, str)
fields = mocked_modules[module]
self.dependency_graph.add_node(
module_name,
action=_ModuleProviderAction.MOCK,
error=PackagingErrorReason.MOCKED_BUT_STILL_USED,
error_context=f"Object(s) '{fields}' from module `{module_name}` was mocked out during packaging "
f"but is being used in resource - `{resource}` in package `{package}`. ",
provided=True,
)
else:
self.add_dependency(module_name)
self._write(filename, data_value)
def save_text(self, package: str, resource: str, text: str):
"""Save text data to the package.
Args:
package (str): The name of module package this resource should go it (e.g. ``"my_package.my_subpackage"``).
resource (str): A unique name for the resource, used to identify it to load.
text (str): The contents to save.
"""
return self.save_binary(package, resource, text.encode("utf-8"))
def save_binary(self, package, resource, binary: bytes):
"""Save raw bytes to the package.
Args:
package (str): The name of module package this resource should go it (e.g. ``"my_package.my_subpackage"``).
resource (str): A unique name for the resource, used to identify it to load.
binary (str): The data to save.
"""
filename = self._filename(package, resource)
self._write(filename, binary)
def register_extern_hook(self, hook: ActionHook) -> RemovableHandle:
"""Registers an extern hook on the exporter.
The hook will be called each time a module matches against an :meth:`extern` pattern.
It should have the following signature::
hook(exporter: PackageExporter, module_name: str) -> None
Hooks will be called in order of registration.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
A handle that can be used to remove the added hook by calling
``handle.remove()``.
"""
handle = RemovableHandle(self._extern_hooks)
self._extern_hooks[handle.id] = hook
return handle
def register_mock_hook(self, hook: ActionHook) -> RemovableHandle:
"""Registers a mock hook on the exporter.
The hook will be called each time a module matches against a :meth:`mock` pattern.
It should have the following signature::
hook(exporter: PackageExporter, module_name: str) -> None
Hooks will be called in order of registration.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
A handle that can be used to remove the added hook by calling
``handle.remove()``.
"""
handle = RemovableHandle(self._mock_hooks)
self._mock_hooks[handle.id] = hook
return handle
def register_intern_hook(self, hook: ActionHook) -> RemovableHandle:
"""Registers an intern hook on the exporter.
The hook will be called each time a module matches against an :meth:`intern` pattern.
It should have the following signature::
hook(exporter: PackageExporter, module_name: str) -> None
Hooks will be called in order of registration.
Returns:
:class:`torch.utils.hooks.RemovableHandle`:
A handle that can be used to remove the added hook by calling
``handle.remove()``.
"""
handle = RemovableHandle(self._intern_hooks)
self._intern_hooks[handle.id] = hook
return handle
def intern(
self,
include: "GlobPattern",
*,
exclude: "GlobPattern" = (),
allow_empty: bool = True,
):
"""Specify modules that should be packaged. A module must match some ``intern`` pattern in order to be
included in the package and have its dependencies processed recursively.
Args:
include (Union[List[str], str]): A string e.g. "my_package.my_subpackage", or list of strings
for the names of the modules to be externed. This can also be a glob-style pattern, as described in :meth:`mock`.
exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the include string.
allow_empty (bool): An optional flag that specifies whether the intern modules specified by this call
to the ``intern`` method must be matched to some module during packaging. If an ``intern`` module glob
pattern is added with ``allow_empty=False``, and :meth:`close` is called (either explicitly or via ``__exit__``)
before any modules match that pattern, an exception is thrown. If ``allow_empty=True``, no such exception is thrown.
"""
self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo(
_ModuleProviderAction.INTERN, allow_empty
)
def mock(
self,
include: "GlobPattern",
*,
exclude: "GlobPattern" = (),
allow_empty: bool = True,
):
"""Replace some required modules with a mock implementation. Mocked modules will return a fake
object for any attribute accessed from it. Because we copy file-by-file, the dependency resolution will sometimes
find files that are imported by model files but whose functionality is never used
(e.g. custom serialization code or training helpers).
Use this function to mock this functionality out without having to modify the original code.
Args:
include (Union[List[str], str]): A string e.g. ``"my_package.my_subpackage"``, or list of strings
for the names of the modules to be mocked out. Strings can also be a glob-style pattern
string that may match multiple modules. Any required dependencies that match this pattern
string will be mocked out automatically.
Examples :
``'torch.**'`` -- matches ``torch`` and all submodules of torch, e.g. ``'torch.nn'``
and ``'torch.nn.functional'``
``'torch.*'`` -- matches ``'torch.nn'`` or ``'torch.functional'``, but not
``'torch.nn.functional'``
exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the include string.
e.g. ``include='torch.**', exclude='torch.foo'`` will mock all torch packages except ``'torch.foo'``,
Default: is ``[]``.
allow_empty (bool): An optional flag that specifies whether the mock implementation(s) specified by this call
to the :meth:`mock` method must be matched to some module during packaging. If a mock is added with
``allow_empty=False``, and :meth:`close` is called (either explicitly or via ``__exit__``) and the mock has
not been matched to a module used by the package being exported, an exception is thrown.
If ``allow_empty=True``, no such exception is thrown.
"""
self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo(
_ModuleProviderAction.MOCK, allow_empty
)
def extern(
self,
include: "GlobPattern",
*,
exclude: "GlobPattern" = (),
allow_empty: bool = True,
):
"""Include ``module`` in the list of external modules the package can import.
This will prevent dependency discovery from saving
it in the package. The importer will load an external module directly from the standard import system.
Code for extern modules must also exist in the process loading the package.
Args:
include (Union[List[str], str]): A string e.g. ``"my_package.my_subpackage"``, or list of strings
for the names of the modules to be externed. This can also be a glob-style pattern, as
described in :meth:`mock`.
exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the
include string.
allow_empty (bool): An optional flag that specifies whether the extern modules specified by this call
to the ``extern`` method must be matched to some module during packaging. If an extern module glob
pattern is added with ``allow_empty=False``, and :meth:`close` is called (either explicitly or via
``__exit__``) before any modules match that pattern, an exception is thrown. If ``allow_empty=True``,
no such exception is thrown.
"""
self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo(
_ModuleProviderAction.EXTERN, allow_empty
)
def deny(self, include: "GlobPattern", *, exclude: "GlobPattern" = ()):
"""Blocklist modules who names match the given glob patterns from the list of modules the package can import.
If a dependency on any matching packages is found, a :class:`PackagingError` is raised.
Args:
include (Union[List[str], str]): A string e.g. ``"my_package.my_subpackage"``, or list of strings
for the names of the modules to be externed. This can also be a glob-style pattern, as described in :meth:`mock`.
exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the include string.
"""
self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo(
_ModuleProviderAction.DENY, allow_empty=True
)
def _persistent_id(self, obj):
if torch.is_storage(obj) or isinstance(obj, torch.storage.TypedStorage):
if isinstance(obj, torch.storage.TypedStorage):
# TODO: Once we decide to break serialization FC, we can
# remove this case
untyped_storage = obj._storage
storage_type_str = obj.pickle_storage_type()
storage_type = getattr(torch, storage_type_str)
storage_numel = obj.size()
elif isinstance(obj, torch.UntypedStorage):
untyped_storage = obj
storage_type = normalize_storage_type(type(storage))
storage_numel = storage.nbytes()
else:
raise RuntimeError(f"storage type not recognized: {type(obj)}")
storage: Storage = cast(Storage, untyped_storage)
location = location_tag(storage)
# serialize storage if not already written
storage_present = self.storage_context.has_storage(storage)
storage_id = self.storage_context.get_or_add_storage(storage)
if not storage_present:
if storage.device.type != "cpu":
storage = storage.cpu()
num_bytes = storage.nbytes()
self.zip_file.write_record(
f".data/{storage_id}.storage", storage.data_ptr(), num_bytes
)
return ("storage", storage_type, storage_id, location, storage_numel)
if hasattr(obj, "__reduce_package__"):
if _gate_torchscript_serialization and isinstance(
obj, torch.jit.RecursiveScriptModule
):
raise Exception(
"Serializing ScriptModules directly into a package is a beta feature. "
"To use, set global "
"`torch.package.package_exporter._gate_torchscript_serialization` to `False`."
)
if self.serialized_reduces.get(id(obj)) is None:
self.serialized_reduces[id(obj)] = (
"reduce_package",
id(obj),
*obj.__reduce_package__(self),
)
return self.serialized_reduces[id(obj)]
return None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
# If __exit__ was called because an exception was raised, we do not
# attempt to finalize the package. Instead, control is returned to the
# caller to continue raising the exception.
if exc_type is not None:
# Do the bare minimum to leave the open buffer in a valid state.
self._finalize_zip()
return
self.close()
def _write(self, filename, str_or_bytes):
if filename in self._written_files:
raise AssertionError(
f"Tried to write file '{filename}', but it already exists in this archive. "
"Please file a bug."
)
self._written_files.add(filename)
if is_mangled(filename):
raise AssertionError(
f"Tried to save a torch.package'd module as '{filename}'. "
"Directly saving torch.package'd modules is not allowed."
)
if isinstance(str_or_bytes, str):
str_or_bytes = str_or_bytes.encode("utf-8")
self.zip_file.write_record(filename, str_or_bytes, len(str_or_bytes))
def _validate_dependency_graph(self):
# 1. Check the graph for any errors inserted during dependency analysis.
for module_name, attrs in self.dependency_graph.nodes.items():
if "error" in attrs:
raise PackagingError(self.dependency_graph)
# 2. Check that all patterns for which allow_empty=False have been matched at least once.
for pattern, pattern_info in self.patterns.items():
if not pattern_info.allow_empty and not pattern_info.was_matched:
raise EmptyMatchError(
f"Exporter did not match any modules to {pattern}, which was marked as allow_empty=False"
)
def _write_mock_file(self):
if "_mock.py" not in self._written_files:
mock_file = str(Path(__file__).parent / "_mock.py")
self._write_source_string("_mock", _read_file(mock_file), is_package=False)
def _execute_dependency_graph(self):
"""Takes a finalized dependency graph describing how to package all
modules and executes it, writing to the ZIP archive.
"""
self._validate_dependency_graph()
extern_modules = []
for module_name, attrs in self.dependency_graph.nodes.items():
action = attrs["action"]
if action == _ModuleProviderAction.EXTERN:
for hook in self._extern_hooks.values():
hook(self, module_name)
extern_modules.append(module_name)
elif action == _ModuleProviderAction.MOCK:
for hook in self._mock_hooks.values():
hook(self, module_name)
self._write_mock_file()
is_package = hasattr(self._import_module(module_name), "__path__")
self._write_source_string(module_name, _MOCK_IMPL, is_package)
elif action == _ModuleProviderAction.INTERN:
for hook in self._intern_hooks.values():
hook(self, module_name)
# The node in the dependency graph contains metadata that tells us
# how to intern the module.
if "provided" not in attrs:
raise AssertionError(
f"Module was marked `intern` but not provided: {module_name}"
)
if attrs.get("is_pickle") is True:
# This node came from save_pickle, we don't need to write any source for it.
continue
is_package = attrs["is_package"]
source = attrs["source"]
self._write_source_string(module_name, source, is_package)
elif action == _ModuleProviderAction.REPACKAGED_MOCK_MODULE:
self._write_mock_file()
elif action == _ModuleProviderAction.SKIP:
continue
else:
raise AssertionError(
f"Invalid action: {module_name}, {action}. Please report a bug to PyTorch."
)
extern_file_contents = "\n".join(extern_modules) + "\n"
self._write(".data/extern_modules", extern_file_contents)
def _write_python_version(self):
"""Writes the python version that the package was created with to .data/python_version"""
self._write(".data/python_version", platform.python_version())
def close(self):
"""Write the package to the filesystem. Any calls after :meth:`close` are now invalid.
It is preferable to use resource guard syntax instead::
with PackageExporter("file.zip") as e:
...
"""
self._execute_dependency_graph()
self._write_python_version()
self.script_module_serializer.write_files()
self._finalize_zip()
def _finalize_zip(self):
"""Called at the very end of packaging to leave the zipfile in a closed but valid state."""
del self.zip_file
if self.buffer:
self.buffer.flush()
def _filename(self, package, resource):
package_path = package.replace(".", "/")
resource = _normalize_path(resource)
return f"{package_path}/{resource}"
def _can_implicitly_extern(self, module_name: str):
top_level_package_name = module_name.partition(".")[0]
return top_level_package_name == "torch" or (
top_level_package_name not in _DISALLOWED_MODULES
and is_stdlib_module(top_level_package_name)
)
def dependency_graph_string(self) -> str:
"""Returns digraph string representation of dependencies in package.
Returns:
A string representation of dependencies in package.
"""
return self.dependency_graph.to_dot()
def _nodes_with_action_type(
self, action: Optional[_ModuleProviderAction]
) -> List[str]:
result = []
for name, node_dict in self.dependency_graph.nodes.items():
node_action = node_dict.get("action", None)
if node_action == action and "is_pickle" not in node_dict:
result.append(name)
result.sort()
return result
def externed_modules(self) -> List[str]:
"""Return all modules that are currently externed.
Returns:
A list containing the names of modules which will be
externed in this package.
"""
return self._nodes_with_action_type(_ModuleProviderAction.EXTERN)
def interned_modules(self) -> List[str]:
"""Return all modules that are currently interned.
Returns:
A list containing the names of modules which will be
interned in this package.
"""
return self._nodes_with_action_type(_ModuleProviderAction.INTERN)
def mocked_modules(self) -> List[str]:
"""Return all modules that are currently mocked.
Returns:
A list containing the names of modules which will be
mocked in this package.
"""
return self._nodes_with_action_type(_ModuleProviderAction.MOCK)
def denied_modules(self) -> List[str]:
"""Return all modules that are currently denied.
Returns:
A list containing the names of modules which will be
denied in this package.
"""
return self._nodes_with_action_type(_ModuleProviderAction.DENY)
def get_rdeps(self, module_name: str) -> List[str]:
"""Return a list of all modules which depend on the module ``module_name``.
Returns:
A list containing the names of modules which depend on ``module_name``.
"""
if module_name in self.dependency_graph._pred.keys():
return list(self.dependency_graph._pred[module_name].keys())
else:
return []
def all_paths(self, src: str, dst: str) -> str:
"""Return a dot representation of the subgraph
that has all paths from src to dst.
Returns:
A dot representation containing all paths from src to dst.
(https://graphviz.org/doc/info/lang.html)
"""
return self.dependency_graph.all_paths(src, dst)
# even though these are in the standard library, we do not allow them to be
# automatically externed since they offer a lot of system level access
_DISALLOWED_MODULES = ["sys", "io"]
_MOCK_IMPL = """\
from _mock import MockedObject
def __getattr__(attr: str):
return MockedObject(__name__ + '.' + attr, _suppress_err=True)
"""
def _read_file(filename: str) -> str:
with open(filename, "rb") as f:
b = f.read()
return b.decode("utf-8")
| pytorch-master | torch/package/package_exporter.py |
# -*- coding: utf-8 -*-
from typing import Dict, List
from .glob_group import GlobGroup, GlobPattern
__all__ = ["Directory"]
class Directory:
"""A file structure representation. Organized as Directory nodes that have lists of
their Directory children. Directories for a package are created by calling
:meth:`PackageImporter.file_structure`."""
def __init__(self, name: str, is_dir: bool):
self.name = name
self.is_dir = is_dir
self.children: Dict[str, Directory] = {}
def _get_dir(self, dirs: List[str]) -> "Directory":
"""Builds path of Directories if not yet built and returns last directory
in list.
Args:
dirs (List[str]): List of directory names that are treated like a path.
Returns:
:class:`Directory`: The last Directory specified in the dirs list.
"""
if len(dirs) == 0:
return self
dir_name = dirs[0]
if dir_name not in self.children:
self.children[dir_name] = Directory(dir_name, True)
return self.children[dir_name]._get_dir(dirs[1:])
def _add_file(self, file_path: str):
"""Adds a file to a Directory.
Args:
file_path (str): Path of file to add. Last element is added as a file while
other paths items are added as directories.
"""
*dirs, file = file_path.split("/")
dir = self._get_dir(dirs)
dir.children[file] = Directory(file, False)
def has_file(self, filename: str) -> bool:
"""Checks if a file is present in a :class:`Directory`.
Args:
filename (str): Path of file to search for.
Returns:
bool: If a :class:`Directory` contains the specified file.
"""
lineage = filename.split("/", maxsplit=1)
child = lineage[0]
grandchildren = lineage[1] if len(lineage) > 1 else None
if child in self.children.keys():
if grandchildren is None:
return True
else:
return self.children[child].has_file(grandchildren)
return False
def __str__(self):
str_list: List[str] = []
self._stringify_tree(str_list)
return "".join(str_list)
def _stringify_tree(
self, str_list: List[str], preamble: str = "", dir_ptr: str = "─── "
):
"""Recursive method to generate print-friendly version of a Directory."""
space = " "
branch = "│ "
tee = "├── "
last = "└── "
# add this directory's representation
str_list.append(f"{preamble}{dir_ptr}{self.name}\n")
# add directory's children representations
if dir_ptr == tee:
preamble = preamble + branch
else:
preamble = preamble + space
file_keys: List[str] = []
dir_keys: List[str] = []
for key, val in self.children.items():
if val.is_dir:
dir_keys.append(key)
else:
file_keys.append(key)
for index, key in enumerate(sorted(dir_keys)):
if (index == len(dir_keys) - 1) and len(file_keys) == 0:
self.children[key]._stringify_tree(str_list, preamble, last)
else:
self.children[key]._stringify_tree(str_list, preamble, tee)
for index, file in enumerate(sorted(file_keys)):
pointer = last if (index == len(file_keys) - 1) else tee
str_list.append(f"{preamble}{pointer}{file}\n")
def _create_directory_from_file_list(
filename: str,
file_list: List[str],
include: "GlobPattern" = "**",
exclude: "GlobPattern" = (),
) -> Directory:
"""Return a :class:`Directory` file structure representation created from a list of files.
Args:
filename (str): The name given to the top-level directory that will be the
relative root for all file paths found in the file_list.
file_list (List[str]): List of files to add to the top-level directory.
include (Union[List[str], str]): An optional pattern that limits what is included from the file_list to
files whose name matches the pattern.
exclude (Union[List[str], str]): An optional pattern that excludes files whose name match the pattern.
Returns:
:class:`Directory`: a :class:`Directory` file structure representation created from a list of files.
"""
glob_pattern = GlobGroup(include, exclude=exclude, separator="/")
top_dir = Directory(filename, True)
for file in file_list:
if glob_pattern.matches(file):
top_dir._add_file(file)
return top_dir
| pytorch-master | torch/package/file_structure_representation.py |
_magic_methods = [
"__subclasscheck__",
"__hex__",
"__rmul__",
"__float__",
"__idiv__",
"__setattr__",
"__div__",
"__invert__",
"__nonzero__",
"__rshift__",
"__eq__",
"__pos__",
"__round__",
"__rand__",
"__or__",
"__complex__",
"__divmod__",
"__len__",
"__reversed__",
"__copy__",
"__reduce__",
"__deepcopy__",
"__rdivmod__",
"__rrshift__",
"__ifloordiv__",
"__hash__",
"__iand__",
"__xor__",
"__isub__",
"__oct__",
"__ceil__",
"__imod__",
"__add__",
"__truediv__",
"__unicode__",
"__le__",
"__delitem__",
"__sizeof__",
"__sub__",
"__ne__",
"__pow__",
"__bytes__",
"__mul__",
"__itruediv__",
"__bool__",
"__iter__",
"__abs__",
"__gt__",
"__iadd__",
"__enter__",
"__floordiv__",
"__call__",
"__neg__",
"__and__",
"__ixor__",
"__getitem__",
"__exit__",
"__cmp__",
"__getstate__",
"__index__",
"__contains__",
"__floor__",
"__lt__",
"__getattr__",
"__mod__",
"__trunc__",
"__delattr__",
"__instancecheck__",
"__setitem__",
"__ipow__",
"__ilshift__",
"__long__",
"__irshift__",
"__imul__",
"__lshift__",
"__dir__",
"__ge__",
"__int__",
"__ior__",
]
class MockedObject:
_name: str
def __new__(cls, *args, **kwargs):
# _suppress_err is set by us in the mocked module impl, so that we can
# construct instances of MockedObject to hand out to people looking up
# module attributes.
# Any other attempt to construct a MockedOject instance (say, in the
# unpickling process) should give an error.
if not kwargs.get("_suppress_err"):
raise NotImplementedError(
f"Object '{cls._name}' was mocked out during packaging "
f"but it is being used in '__new__'. If this error is "
"happening during 'load_pickle', please ensure that your "
"pickled object doesn't contain any mocked objects."
)
# Otherwise, this is just a regular object creation
# (e.g. `x = MockedObject("foo")`), so pass it through normally.
return super().__new__(cls)
def __init__(self, name: str, _suppress_err: bool):
self.__dict__["_name"] = name
def __repr__(self):
return f"MockedObject({self._name})"
def install_method(method_name):
def _not_implemented(self, *args, **kwargs):
raise NotImplementedError(
f"Object '{self._name}' was mocked out during packaging but it is being used in {method_name}"
)
setattr(MockedObject, method_name, _not_implemented)
for method_name in _magic_methods:
install_method(method_name)
| pytorch-master | torch/package/_mock.py |
import importlib
from abc import ABC, abstractmethod
from pickle import ( # type: ignore[attr-defined] # type: ignore[attr-defined]
_getattribute,
_Pickler,
whichmodule as _pickle_whichmodule,
)
from types import ModuleType
from typing import Any, Dict, List, Optional, Tuple
from ._mangling import demangle, get_mangle_prefix, is_mangled
__all__ = ["ObjNotFoundError", "ObjMismatchError", "Importer", "OrderedImporter"]
class ObjNotFoundError(Exception):
"""Raised when an importer cannot find an object by searching for its name."""
pass
class ObjMismatchError(Exception):
"""Raised when an importer found a different object with the same name as the user-provided one."""
pass
class Importer(ABC):
"""Represents an environment to import modules from.
By default, you can figure out what module an object belongs by checking
__module__ and importing the result using __import__ or importlib.import_module.
torch.package introduces module importers other than the default one.
Each PackageImporter introduces a new namespace. Potentially a single
name (e.g. 'foo.bar') is present in multiple namespaces.
It supports two main operations:
import_module: module_name -> module object
get_name: object -> (parent module name, name of obj within module)
The guarantee is that following round-trip will succeed or throw an ObjNotFoundError/ObjMisMatchError.
module_name, obj_name = env.get_name(obj)
module = env.import_module(module_name)
obj2 = getattr(module, obj_name)
assert obj1 is obj2
"""
modules: Dict[str, ModuleType]
@abstractmethod
def import_module(self, module_name: str) -> ModuleType:
"""Import `module_name` from this environment.
The contract is the same as for importlib.import_module.
"""
pass
def get_name(self, obj: Any, name: Optional[str] = None) -> Tuple[str, str]:
"""Given an object, return a name that can be used to retrieve the
object from this environment.
Args:
obj: An object to get the the module-environment-relative name for.
name: If set, use this name instead of looking up __name__ or __qualname__ on `obj`.
This is only here to match how Pickler handles __reduce__ functions that return a string,
don't use otherwise.
Returns:
A tuple (parent_module_name, attr_name) that can be used to retrieve `obj` from this environment.
Use it like:
mod = importer.import_module(parent_module_name)
obj = getattr(mod, attr_name)
Raises:
ObjNotFoundError: we couldn't retrieve `obj by name.
ObjMisMatchError: we found a different object with the same name as `obj`.
"""
if name is None and obj and _Pickler.dispatch.get(type(obj)) is None:
# Honor the string return variant of __reduce__, which will give us
# a global name to search for in this environment.
# TODO: I guess we should do copyreg too?
reduce = getattr(obj, "__reduce__", None)
if reduce is not None:
try:
rv = reduce()
if isinstance(rv, str):
name = rv
except Exception:
pass
if name is None:
name = getattr(obj, "__qualname__", None)
if name is None:
name = obj.__name__
orig_module_name = self.whichmodule(obj, name)
# Demangle the module name before importing. If this obj came out of a
# PackageImporter, `__module__` will be mangled. See mangling.md for
# details.
module_name = demangle(orig_module_name)
# Check that this name will indeed return the correct object
try:
module = self.import_module(module_name)
obj2, _ = _getattribute(module, name)
except (ImportError, KeyError, AttributeError):
raise ObjNotFoundError(
f"{obj} was not found as {module_name}.{name}"
) from None
if obj is obj2:
return module_name, name
def get_obj_info(obj):
assert name is not None
module_name = self.whichmodule(obj, name)
is_mangled_ = is_mangled(module_name)
location = (
get_mangle_prefix(module_name)
if is_mangled_
else "the current Python environment"
)
importer_name = (
f"the importer for {get_mangle_prefix(module_name)}"
if is_mangled_
else "'sys_importer'"
)
return module_name, location, importer_name
obj_module_name, obj_location, obj_importer_name = get_obj_info(obj)
obj2_module_name, obj2_location, obj2_importer_name = get_obj_info(obj2)
msg = (
f"\n\nThe object provided is from '{obj_module_name}', "
f"which is coming from {obj_location}."
f"\nHowever, when we import '{obj2_module_name}', it's coming from {obj2_location}."
"\nTo fix this, make sure this 'PackageExporter's importer lists "
f"{obj_importer_name} before {obj2_importer_name}."
)
raise ObjMismatchError(msg)
def whichmodule(self, obj: Any, name: str) -> str:
"""Find the module name an object belongs to.
This should be considered internal for end-users, but developers of
an importer can override it to customize the behavior.
Taken from pickle.py, but modified to exclude the search into sys.modules
"""
module_name = getattr(obj, "__module__", None)
if module_name is not None:
return module_name
# Protect the iteration by using a list copy of self.modules against dynamic
# modules that trigger imports of other modules upon calls to getattr.
for module_name, module in self.modules.copy().items():
if (
module_name == "__main__"
or module_name == "__mp_main__" # bpo-42406
or module is None
):
continue
try:
if _getattribute(module, name)[0] is obj:
return module_name
except AttributeError:
pass
return "__main__"
class _SysImporter(Importer):
"""An importer that implements the default behavior of Python."""
def import_module(self, module_name: str):
return importlib.import_module(module_name)
def whichmodule(self, obj: Any, name: str) -> str:
return _pickle_whichmodule(obj, name)
sys_importer = _SysImporter()
class OrderedImporter(Importer):
"""A compound importer that takes a list of importers and tries them one at a time.
The first importer in the list that returns a result "wins".
"""
def __init__(self, *args):
self._importers: List[Importer] = list(args)
def _is_torchpackage_dummy(self, module):
"""Returns true iff this module is an empty PackageNode in a torch.package.
If you intern `a.b` but never use `a` in your code, then `a` will be an
empty module with no source. This can break cases where we are trying to
re-package an object after adding a real dependency on `a`, since
OrderedImportere will resolve `a` to the dummy package and stop there.
See: https://github.com/pytorch/pytorch/pull/71520#issuecomment-1029603769
"""
if not getattr(module, "__torch_package__", False):
return False
if not hasattr(module, "__path__"):
return False
if not hasattr(module, "__file__"):
return True
return module.__file__ is None
def import_module(self, module_name: str) -> ModuleType:
last_err = None
for importer in self._importers:
if not isinstance(importer, Importer):
raise TypeError(
f"{importer} is not a Importer. "
"All importers in OrderedImporter must inherit from Importer."
)
try:
module = importer.import_module(module_name)
if self._is_torchpackage_dummy(module):
continue
return module
except ModuleNotFoundError as err:
last_err = err
if last_err is not None:
raise last_err
else:
raise ModuleNotFoundError(module_name)
def whichmodule(self, obj: Any, name: str) -> str:
for importer in self._importers:
module_name = importer.whichmodule(obj, name)
if module_name != "__main__":
return module_name
return "__main__"
| pytorch-master | torch/package/importer.py |
"""List of Python standard library modules.
Sadly, there is no reliable way to tell whether a module is part of the
standard library except by comparing to a canonical list.
This is taken from https://github.com/PyCQA/isort/tree/develop/isort/stdlibs,
which itself is sourced from the Python documentation.
"""
import sys
def is_stdlib_module(module: str) -> bool:
base_module = module.partition(".")[0]
return base_module in _get_stdlib_modules()
def _get_stdlib_modules():
if sys.version_info.major == 3:
if sys.version_info.minor == 6:
return stdlib3_6
if sys.version_info.minor == 7:
return stdlib3_7
if sys.version_info.minor == 8:
return stdlib3_8
if sys.version_info.minor == 9:
return stdlib3_9
if sys.version_info.minor >= 10:
return sys.stdlib_module_names # type: ignore[attr-defined]
elif sys.version_info.major > 3:
return sys.stdlib_module_names # type: ignore[attr-defined]
raise RuntimeError(f"Unsupported Python version: {sys.version_info}")
stdlib3_6 = {
"_dummy_thread",
"_thread",
"abc",
"aifc",
"argparse",
"array",
"ast",
"asynchat",
"asyncio",
"asyncore",
"atexit",
"audioop",
"base64",
"bdb",
"binascii",
"binhex",
"bisect",
"builtins",
"bz2",
"cProfile",
"calendar",
"cgi",
"cgitb",
"chunk",
"cmath",
"cmd",
"code",
"codecs",
"codeop",
"collections",
"colorsys",
"compileall",
"concurrent",
"configparser",
"contextlib",
"copy",
"copyreg",
"crypt",
"csv",
"ctypes",
"curses",
"datetime",
"dbm",
"decimal",
"difflib",
"dis",
"distutils",
"doctest",
"dummy_threading",
"email",
"encodings",
"ensurepip",
"enum",
"errno",
"faulthandler",
"fcntl",
"filecmp",
"fileinput",
"fnmatch",
"formatter",
"fpectl",
"fractions",
"ftplib",
"functools",
"gc",
"getopt",
"getpass",
"gettext",
"glob",
"grp",
"gzip",
"hashlib",
"heapq",
"hmac",
"html",
"http",
"imaplib",
"imghdr",
"imp",
"importlib",
"inspect",
"io",
"ipaddress",
"itertools",
"json",
"keyword",
"lib2to3",
"linecache",
"locale",
"logging",
"lzma",
"macpath",
"mailbox",
"mailcap",
"marshal",
"math",
"mimetypes",
"mmap",
"modulefinder",
"msilib",
"msvcrt",
"multiprocessing",
"netrc",
"nis",
"nntplib",
"ntpath",
"numbers",
"operator",
"optparse",
"os",
"ossaudiodev",
"parser",
"pathlib",
"pdb",
"pickle",
"pickletools",
"pipes",
"pkgutil",
"platform",
"plistlib",
"poplib",
"posix",
"posixpath",
"pprint",
"profile",
"pstats",
"pty",
"pwd",
"py_compile",
"pyclbr",
"pydoc",
"queue",
"quopri",
"random",
"re",
"readline",
"reprlib",
"resource",
"rlcompleter",
"runpy",
"sched",
"secrets",
"select",
"selectors",
"shelve",
"shlex",
"shutil",
"signal",
"site",
"smtpd",
"smtplib",
"sndhdr",
"socket",
"socketserver",
"spwd",
"sqlite3",
"sre",
"sre_compile",
"sre_constants",
"sre_parse",
"ssl",
"stat",
"statistics",
"string",
"stringprep",
"struct",
"subprocess",
"sunau",
"symbol",
"symtable",
"sys",
"sysconfig",
"syslog",
"tabnanny",
"tarfile",
"telnetlib",
"tempfile",
"termios",
"test",
"textwrap",
"threading",
"time",
"timeit",
"tkinter",
"token",
"tokenize",
"trace",
"traceback",
"tracemalloc",
"tty",
"turtle",
"turtledemo",
"types",
"typing",
"unicodedata",
"unittest",
"urllib",
"uu",
"uuid",
"venv",
"warnings",
"wave",
"weakref",
"webbrowser",
"winreg",
"winsound",
"wsgiref",
"xdrlib",
"xml",
"xmlrpc",
"zipapp",
"zipfile",
"zipimport",
"zlib",
}
stdlib3_7 = {
"_dummy_thread",
"_thread",
"abc",
"aifc",
"argparse",
"array",
"ast",
"asynchat",
"asyncio",
"asyncore",
"atexit",
"audioop",
"base64",
"bdb",
"binascii",
"binhex",
"bisect",
"builtins",
"bz2",
"cProfile",
"calendar",
"cgi",
"cgitb",
"chunk",
"cmath",
"cmd",
"code",
"codecs",
"codeop",
"collections",
"colorsys",
"compileall",
"concurrent",
"configparser",
"contextlib",
"contextvars",
"copy",
"copyreg",
"crypt",
"csv",
"ctypes",
"curses",
"dataclasses",
"datetime",
"dbm",
"decimal",
"difflib",
"dis",
"distutils",
"doctest",
"dummy_threading",
"email",
"encodings",
"ensurepip",
"enum",
"errno",
"faulthandler",
"fcntl",
"filecmp",
"fileinput",
"fnmatch",
"formatter",
"fractions",
"ftplib",
"functools",
"gc",
"getopt",
"getpass",
"gettext",
"glob",
"grp",
"gzip",
"hashlib",
"heapq",
"hmac",
"html",
"http",
"imaplib",
"imghdr",
"imp",
"importlib",
"inspect",
"io",
"ipaddress",
"itertools",
"json",
"keyword",
"lib2to3",
"linecache",
"locale",
"logging",
"lzma",
"macpath",
"mailbox",
"mailcap",
"marshal",
"math",
"mimetypes",
"mmap",
"modulefinder",
"msilib",
"msvcrt",
"multiprocessing",
"netrc",
"nis",
"nntplib",
"ntpath",
"numbers",
"operator",
"optparse",
"os",
"ossaudiodev",
"parser",
"pathlib",
"pdb",
"pickle",
"pickletools",
"pipes",
"pkgutil",
"platform",
"plistlib",
"poplib",
"posix",
"posixpath",
"pprint",
"profile",
"pstats",
"pty",
"pwd",
"py_compile",
"pyclbr",
"pydoc",
"queue",
"quopri",
"random",
"re",
"readline",
"reprlib",
"resource",
"rlcompleter",
"runpy",
"sched",
"secrets",
"select",
"selectors",
"shelve",
"shlex",
"shutil",
"signal",
"site",
"smtpd",
"smtplib",
"sndhdr",
"socket",
"socketserver",
"spwd",
"sqlite3",
"sre",
"sre_compile",
"sre_constants",
"sre_parse",
"ssl",
"stat",
"statistics",
"string",
"stringprep",
"struct",
"subprocess",
"sunau",
"symbol",
"symtable",
"sys",
"sysconfig",
"syslog",
"tabnanny",
"tarfile",
"telnetlib",
"tempfile",
"termios",
"test",
"textwrap",
"threading",
"time",
"timeit",
"tkinter",
"token",
"tokenize",
"trace",
"traceback",
"tracemalloc",
"tty",
"turtle",
"turtledemo",
"types",
"typing",
"unicodedata",
"unittest",
"urllib",
"uu",
"uuid",
"venv",
"warnings",
"wave",
"weakref",
"webbrowser",
"winreg",
"winsound",
"wsgiref",
"xdrlib",
"xml",
"xmlrpc",
"zipapp",
"zipfile",
"zipimport",
"zlib",
}
stdlib3_8 = {
"_dummy_thread",
"_thread",
"abc",
"aifc",
"argparse",
"array",
"ast",
"asynchat",
"asyncio",
"asyncore",
"atexit",
"audioop",
"base64",
"bdb",
"binascii",
"binhex",
"bisect",
"builtins",
"bz2",
"cProfile",
"calendar",
"cgi",
"cgitb",
"chunk",
"cmath",
"cmd",
"code",
"codecs",
"codeop",
"collections",
"colorsys",
"compileall",
"concurrent",
"configparser",
"contextlib",
"contextvars",
"copy",
"copyreg",
"crypt",
"csv",
"ctypes",
"curses",
"dataclasses",
"datetime",
"dbm",
"decimal",
"difflib",
"dis",
"distutils",
"doctest",
"dummy_threading",
"email",
"encodings",
"ensurepip",
"enum",
"errno",
"faulthandler",
"fcntl",
"filecmp",
"fileinput",
"fnmatch",
"formatter",
"fractions",
"ftplib",
"functools",
"gc",
"getopt",
"getpass",
"gettext",
"glob",
"grp",
"gzip",
"hashlib",
"heapq",
"hmac",
"html",
"http",
"imaplib",
"imghdr",
"imp",
"importlib",
"inspect",
"io",
"ipaddress",
"itertools",
"json",
"keyword",
"lib2to3",
"linecache",
"locale",
"logging",
"lzma",
"mailbox",
"mailcap",
"marshal",
"math",
"mimetypes",
"mmap",
"modulefinder",
"msilib",
"msvcrt",
"multiprocessing",
"netrc",
"nis",
"nntplib",
"ntpath",
"numbers",
"operator",
"optparse",
"os",
"ossaudiodev",
"parser",
"pathlib",
"pdb",
"pickle",
"pickletools",
"pipes",
"pkgutil",
"platform",
"plistlib",
"poplib",
"posix",
"posixpath",
"pprint",
"profile",
"pstats",
"pty",
"pwd",
"py_compile",
"pyclbr",
"pydoc",
"queue",
"quopri",
"random",
"re",
"readline",
"reprlib",
"resource",
"rlcompleter",
"runpy",
"sched",
"secrets",
"select",
"selectors",
"shelve",
"shlex",
"shutil",
"signal",
"site",
"smtpd",
"smtplib",
"sndhdr",
"socket",
"socketserver",
"spwd",
"sqlite3",
"sre",
"sre_compile",
"sre_constants",
"sre_parse",
"ssl",
"stat",
"statistics",
"string",
"stringprep",
"struct",
"subprocess",
"sunau",
"symbol",
"symtable",
"sys",
"sysconfig",
"syslog",
"tabnanny",
"tarfile",
"telnetlib",
"tempfile",
"termios",
"test",
"textwrap",
"threading",
"time",
"timeit",
"tkinter",
"token",
"tokenize",
"trace",
"traceback",
"tracemalloc",
"tty",
"turtle",
"turtledemo",
"types",
"typing",
"unicodedata",
"unittest",
"urllib",
"uu",
"uuid",
"venv",
"warnings",
"wave",
"weakref",
"webbrowser",
"winreg",
"winsound",
"wsgiref",
"xdrlib",
"xml",
"xmlrpc",
"zipapp",
"zipfile",
"zipimport",
"zlib",
}
stdlib3_9 = {
"_thread",
"abc",
"aifc",
"argparse",
"array",
"ast",
"asynchat",
"asyncio",
"asyncore",
"atexit",
"audioop",
"base64",
"bdb",
"binascii",
"binhex",
"bisect",
"builtins",
"bz2",
"cProfile",
"calendar",
"cgi",
"cgitb",
"chunk",
"cmath",
"cmd",
"code",
"codecs",
"codeop",
"collections",
"colorsys",
"compileall",
"concurrent",
"configparser",
"contextlib",
"contextvars",
"copy",
"copyreg",
"crypt",
"csv",
"ctypes",
"curses",
"dataclasses",
"datetime",
"dbm",
"decimal",
"difflib",
"dis",
"distutils",
"doctest",
"email",
"encodings",
"ensurepip",
"enum",
"errno",
"faulthandler",
"fcntl",
"filecmp",
"fileinput",
"fnmatch",
"formatter",
"fractions",
"ftplib",
"functools",
"gc",
"getopt",
"getpass",
"gettext",
"glob",
"graphlib",
"grp",
"gzip",
"hashlib",
"heapq",
"hmac",
"html",
"http",
"imaplib",
"imghdr",
"imp",
"importlib",
"inspect",
"io",
"ipaddress",
"itertools",
"json",
"keyword",
"lib2to3",
"linecache",
"locale",
"logging",
"lzma",
"mailbox",
"mailcap",
"marshal",
"math",
"mimetypes",
"mmap",
"modulefinder",
"msilib",
"msvcrt",
"multiprocessing",
"netrc",
"nis",
"nntplib",
"ntpath",
"numbers",
"operator",
"optparse",
"os",
"ossaudiodev",
"parser",
"pathlib",
"pdb",
"pickle",
"pickletools",
"pipes",
"pkgutil",
"platform",
"plistlib",
"poplib",
"posix",
"posixpath",
"pprint",
"profile",
"pstats",
"pty",
"pwd",
"py_compile",
"pyclbr",
"pydoc",
"queue",
"quopri",
"random",
"re",
"readline",
"reprlib",
"resource",
"rlcompleter",
"runpy",
"sched",
"secrets",
"select",
"selectors",
"shelve",
"shlex",
"shutil",
"signal",
"site",
"smtpd",
"smtplib",
"sndhdr",
"socket",
"socketserver",
"spwd",
"sqlite3",
"sre",
"sre_compile",
"sre_constants",
"sre_parse",
"ssl",
"stat",
"statistics",
"string",
"stringprep",
"struct",
"subprocess",
"sunau",
"symbol",
"symtable",
"sys",
"sysconfig",
"syslog",
"tabnanny",
"tarfile",
"telnetlib",
"tempfile",
"termios",
"test",
"textwrap",
"threading",
"time",
"timeit",
"tkinter",
"token",
"tokenize",
"trace",
"traceback",
"tracemalloc",
"tty",
"turtle",
"turtledemo",
"types",
"typing",
"unicodedata",
"unittest",
"urllib",
"uu",
"uuid",
"venv",
"warnings",
"wave",
"weakref",
"webbrowser",
"winreg",
"winsound",
"wsgiref",
"xdrlib",
"xml",
"xmlrpc",
"zipapp",
"zipfile",
"zipimport",
"zlib",
"zoneinfo",
}
| pytorch-master | torch/package/_stdlib.py |
"""Import mangling.
See mangling.md for details.
"""
import re
_mangle_index = 0
class PackageMangler:
"""
Used on import, to ensure that all modules imported have a shared mangle parent.
"""
def __init__(self):
global _mangle_index
self._mangle_index = _mangle_index
# Increment the global index
_mangle_index += 1
# Angle brackets are used so that there is almost no chance of
# confusing this module for a real module. Plus, it is Python's
# preferred way of denoting special modules.
self._mangle_parent = f"<torch_package_{self._mangle_index}>"
def mangle(self, name) -> str:
assert len(name) != 0
return self._mangle_parent + "." + name
def demangle(self, mangled: str) -> str:
"""
Note: This only demangles names that were mangled by this specific
PackageMangler. It will pass through names created by a different
PackageMangler instance.
"""
if mangled.startswith(self._mangle_parent + "."):
return mangled.partition(".")[2]
# wasn't a mangled name
return mangled
def parent_name(self):
return self._mangle_parent
def is_mangled(name: str) -> bool:
return bool(re.match(r"<torch_package_\d+>", name))
def demangle(name: str) -> str:
"""
Note: Unlike PackageMangler.demangle, this version works on any
mangled name, irrespective of which PackageMangler created it.
"""
if is_mangled(name):
first, sep, last = name.partition(".")
# If there is only a base mangle prefix, e.g. '<torch_package_0>',
# then return an empty string.
return last if len(sep) != 0 else ""
return name
def get_mangle_prefix(name: str) -> str:
return name.partition(".")[0] if is_mangled(name) else name
| pytorch-master | torch/package/_mangling.py |
from .analyze.is_from_package import is_from_package
from .file_structure_representation import Directory
from .glob_group import GlobGroup
from .importer import (
Importer,
ObjMismatchError,
ObjNotFoundError,
OrderedImporter,
sys_importer,
)
from .package_exporter import EmptyMatchError, PackageExporter, PackagingError
from .package_importer import PackageImporter
| pytorch-master | torch/package/__init__.py |
import builtins
import importlib
import importlib.machinery
import inspect
import io
import linecache
import os.path
import types
from contextlib import contextmanager
from pathlib import Path
from typing import Any, BinaryIO, Callable, cast, Dict, Iterable, List, Optional, Union
from weakref import WeakValueDictionary
import torch
from torch.serialization import _get_restore_location, _maybe_decode_ascii
from ._directory_reader import DirectoryReader
from ._importlib import (
_calc___package__,
_normalize_line_endings,
_normalize_path,
_resolve_name,
_sanity_check,
)
from ._mangling import demangle, PackageMangler
from ._package_unpickler import PackageUnpickler
from .file_structure_representation import _create_directory_from_file_list, Directory
from .glob_group import GlobPattern
from .importer import Importer
__all__ = ["PackageImporter"]
# This is a list of imports that are implicitly allowed even if they haven't
# been marked as extern. This is to work around the fact that Torch implicitly
# depends on numpy and package can't track it.
# https://github.com/pytorch/MultiPy/issues/46
IMPLICIT_IMPORT_ALLOWLIST: Iterable[str] = [
"numpy",
"numpy.core",
"numpy.core._multiarray_umath",
]
class PackageImporter(Importer):
"""Importers allow you to load code written to packages by :class:`PackageExporter`.
Code is loaded in a hermetic way, using files from the package
rather than the normal python import system. This allows
for the packaging of PyTorch model code and data so that it can be run
on a server or used in the future for transfer learning.
The importer for packages ensures that code in the module can only be loaded from
within the package, except for modules explicitly listed as external during export.
The file ``extern_modules`` in the zip archive lists all the modules that a package externally depends on.
This prevents "implicit" dependencies where the package runs locally because it is importing
a locally-installed package, but then fails when the package is copied to another machine.
"""
"""The dictionary of already loaded modules from this package, equivalent to ``sys.modules`` but
local to this importer.
"""
modules: Dict[str, types.ModuleType]
def __init__(
self,
file_or_buffer: Union[str, torch._C.PyTorchFileReader, Path, BinaryIO],
module_allowed: Callable[[str], bool] = lambda module_name: True,
):
"""Open ``file_or_buffer`` for importing. This checks that the imported package only requires modules
allowed by ``module_allowed``
Args:
file_or_buffer: a file-like object (has to implement :meth:`read`, :meth:`readline`, :meth:`tell`, and :meth:`seek`),
a string, or an ``os.PathLike`` object containing a filename.
module_allowed (Callable[[str], bool], optional): A method to determine if a externally provided module
should be allowed. Can be used to ensure packages loaded do not depend on modules that the server
does not support. Defaults to allowing anything.
Raises:
ImportError: If the package will use a disallowed module.
"""
torch._C._log_api_usage_once("torch.package.PackageImporter")
self.zip_reader: Any
if isinstance(file_or_buffer, torch._C.PyTorchFileReader):
self.filename = "<pytorch_file_reader>"
self.zip_reader = file_or_buffer
elif isinstance(file_or_buffer, (Path, str)):
self.filename = str(file_or_buffer)
if not os.path.isdir(self.filename):
self.zip_reader = torch._C.PyTorchFileReader(self.filename)
else:
self.zip_reader = DirectoryReader(self.filename)
else:
self.filename = "<binary>"
self.zip_reader = torch._C.PyTorchFileReader(file_or_buffer)
self.root = _PackageNode(None)
self.modules = {}
self.extern_modules = self._read_extern()
for extern_module in self.extern_modules:
if not module_allowed(extern_module):
raise ImportError(
f"package '{file_or_buffer}' needs the external module '{extern_module}' "
f"but that module has been disallowed"
)
self._add_extern(extern_module)
for fname in self.zip_reader.get_all_records():
self._add_file(fname)
self.patched_builtins = builtins.__dict__.copy()
self.patched_builtins["__import__"] = self.__import__
# Allow packaged modules to reference their PackageImporter
self.modules["torch_package_importer"] = self # type: ignore[assignment]
self._mangler = PackageMangler()
# used for reduce deserializaiton
self.storage_context: Any = None
self.last_map_location = None
# used for torch.serialization._load
self.Unpickler = lambda *args, **kwargs: PackageUnpickler(self, *args, **kwargs)
def import_module(self, name: str, package=None):
"""Load a module from the package if it hasn't already been loaded, and then return
the module. Modules are loaded locally
to the importer and will appear in ``self.modules`` rather than ``sys.modules``.
Args:
name (str): Fully qualified name of the module to load.
package ([type], optional): Unused, but present to match the signature of importlib.import_module. Defaults to ``None``.
Returns:
types.ModuleType: The (possibly already) loaded module.
"""
# We should always be able to support importing modules from this package.
# This is to support something like:
# obj = importer.load_pickle(...)
# importer.import_module(obj.__module__) <- this string will be mangled
#
# Note that _mangler.demangle will not demangle any module names
# produced by a different PackageImporter instance.
name = self._mangler.demangle(name)
return self._gcd_import(name)
def load_binary(self, package: str, resource: str) -> bytes:
"""Load raw bytes.
Args:
package (str): The name of module package (e.g. ``"my_package.my_subpackage"``).
resource (str): The unique name for the resource.
Returns:
bytes: The loaded data.
"""
path = self._zipfile_path(package, resource)
return self.zip_reader.get_record(path)
def load_text(
self,
package: str,
resource: str,
encoding: str = "utf-8",
errors: str = "strict",
) -> str:
"""Load a string.
Args:
package (str): The name of module package (e.g. ``"my_package.my_subpackage"``).
resource (str): The unique name for the resource.
encoding (str, optional): Passed to ``decode``. Defaults to ``'utf-8'``.
errors (str, optional): Passed to ``decode``. Defaults to ``'strict'``.
Returns:
str: The loaded text.
"""
data = self.load_binary(package, resource)
return data.decode(encoding, errors)
def load_pickle(self, package: str, resource: str, map_location=None) -> Any:
"""Unpickles the resource from the package, loading any modules that are needed to construct the objects
using :meth:`import_module`.
Args:
package (str): The name of module package (e.g. ``"my_package.my_subpackage"``).
resource (str): The unique name for the resource.
map_location: Passed to `torch.load` to determine how tensors are mapped to devices. Defaults to ``None``.
Returns:
Any: The unpickled object.
"""
pickle_file = self._zipfile_path(package, resource)
restore_location = _get_restore_location(map_location)
loaded_storages = {}
loaded_reduces = {}
storage_context = torch._C.DeserializationStorageContext()
def load_tensor(dtype, size, key, location, restore_location):
name = f"{key}.storage"
if storage_context.has_storage(name):
storage = storage_context.get_storage(name, dtype).storage()
else:
tensor = self.zip_reader.get_storage_from_record(
".data/" + name, size, dtype
)
if isinstance(self.zip_reader, torch._C.PyTorchFileReader):
storage_context.add_storage(name, tensor)
storage = tensor.storage()
loaded_storages[key] = restore_location(storage, location)
def persistent_load(saved_id):
assert isinstance(saved_id, tuple)
typename = _maybe_decode_ascii(saved_id[0])
data = saved_id[1:]
if typename == "storage":
storage_type, key, location, size = data
dtype = storage_type.dtype
if key not in loaded_storages:
load_tensor(
dtype,
size,
key,
_maybe_decode_ascii(location),
restore_location,
)
storage = loaded_storages[key]
# TODO: Once we decide to break serialization FC, we can
# stop wrapping with TypedStorage
return torch.storage.TypedStorage(
wrap_storage=storage.untyped(), dtype=dtype
)
elif typename == "reduce_package":
# to fix BC breaking change, objects on this load path
# will be loaded multiple times erroneously
if len(data) == 2:
func, args = data
return func(self, *args)
reduce_id, func, args = data
if reduce_id not in loaded_reduces:
loaded_reduces[reduce_id] = func(self, *args)
return loaded_reduces[reduce_id]
else:
f"Unknown typename for persistent_load, expected 'storage' or 'reduce_package' but got '{typename}'"
# Load the data (which may in turn use `persistent_load` to load tensors)
data_file = io.BytesIO(self.zip_reader.get_record(pickle_file))
unpickler = self.Unpickler(data_file)
unpickler.persistent_load = persistent_load # type: ignore[assignment]
@contextmanager
def set_deserialization_context():
# to let reduce_package access deserializaiton context
self.storage_context = storage_context
self.last_map_location = map_location
try:
yield
finally:
self.storage_context = None
self.last_map_location = None
with set_deserialization_context():
result = unpickler.load()
# TODO from zdevito:
# This stateful weird function will need to be removed in our efforts
# to unify the format. It has a race condition if multiple python
# threads try to read independent files
torch._utils._validate_loaded_sparse_tensors()
return result
def id(self):
"""
Returns internal identifier that torch.package uses to distinguish :class:`PackageImporter` instances.
Looks like::
<torch_package_0>
"""
return self._mangler.parent_name()
def file_structure(
self, *, include: "GlobPattern" = "**", exclude: "GlobPattern" = ()
) -> Directory:
"""Returns a file structure representation of package's zipfile.
Args:
include (Union[List[str], str]): An optional string e.g. ``"my_package.my_subpackage"``, or optional list of strings
for the names of the files to be inluded in the zipfile representation. This can also be
a glob-style pattern, as described in :meth:`PackageExporter.mock`
exclude (Union[List[str], str]): An optional pattern that excludes files whose name match the pattern.
Returns:
:class:`Directory`
"""
return _create_directory_from_file_list(
self.filename, self.zip_reader.get_all_records(), include, exclude
)
def python_version(self):
"""Returns the version of python that was used to create this package.
Note: this function is experimental and not Forward Compatible. The plan is to move this into a lock
file later on.
Returns:
:class:`Optional[str]` a python version e.g. 3.8.9 or None if no version was stored with this package
"""
python_version_path = ".data/python_version"
return (
self.zip_reader.get_record(python_version_path).decode("utf-8").strip()
if self.zip_reader.has_record(python_version_path)
else None
)
def _read_extern(self):
return (
self.zip_reader.get_record(".data/extern_modules")
.decode("utf-8")
.splitlines(keepends=False)
)
def _make_module(
self, name: str, filename: Optional[str], is_package: bool, parent: str
):
mangled_filename = self._mangler.mangle(filename) if filename else None
spec = importlib.machinery.ModuleSpec(
name,
self, # type: ignore[arg-type]
origin="<package_importer>",
is_package=is_package,
)
module = importlib.util.module_from_spec(spec)
self.modules[name] = module
module.__name__ = self._mangler.mangle(name)
ns = module.__dict__
ns["__spec__"] = spec
ns["__loader__"] = self
ns["__file__"] = mangled_filename
ns["__cached__"] = None
ns["__builtins__"] = self.patched_builtins
ns["__torch_package__"] = True
# Add this module to our private global registry. It should be unique due to mangling.
assert module.__name__ not in _package_imported_modules
_package_imported_modules[module.__name__] = module
# pre-emptively install on the parent to prevent IMPORT_FROM from trying to
# access sys.modules
self._install_on_parent(parent, name, module)
if filename is not None:
assert mangled_filename is not None
# pre-emptively install the source in `linecache` so that stack traces,
# `inspect`, etc. work.
assert filename not in linecache.cache # type: ignore[attr-defined]
linecache.lazycache(mangled_filename, ns)
code = self._compile_source(filename, mangled_filename)
exec(code, ns)
return module
def _load_module(self, name: str, parent: str):
cur: _PathNode = self.root
for atom in name.split("."):
if not isinstance(cur, _PackageNode) or atom not in cur.children:
if name in IMPLICIT_IMPORT_ALLOWLIST:
module = self.modules[name] = importlib.import_module(name)
return module
raise ModuleNotFoundError(
f'No module named "{name}" in self-contained archive "{self.filename}"'
f" and the module is also not in the list of allowed external modules: {self.extern_modules}",
name=name,
)
cur = cur.children[atom]
if isinstance(cur, _ExternNode):
module = self.modules[name] = importlib.import_module(name)
return module
return self._make_module(name, cur.source_file, isinstance(cur, _PackageNode), parent) # type: ignore[attr-defined]
def _compile_source(self, fullpath: str, mangled_filename: str):
source = self.zip_reader.get_record(fullpath)
source = _normalize_line_endings(source)
return compile(source, mangled_filename, "exec", dont_inherit=True)
# note: named `get_source` so that linecache can find the source
# when this is the __loader__ of a module.
def get_source(self, module_name) -> str:
# linecache calls `get_source` with the `module.__name__` as the argument, so we must demangle it here.
module = self.import_module(demangle(module_name))
return self.zip_reader.get_record(demangle(module.__file__)).decode("utf-8")
# note: named `get_resource_reader` so that importlib.resources can find it.
# This is otherwise considered an internal method.
def get_resource_reader(self, fullname):
try:
package = self._get_package(fullname)
except ImportError:
return None
if package.__loader__ is not self:
return None
return _PackageResourceReader(self, fullname)
def _install_on_parent(self, parent: str, name: str, module: types.ModuleType):
if not parent:
return
# Set the module as an attribute on its parent.
parent_module = self.modules[parent]
if parent_module.__loader__ is self:
setattr(parent_module, name.rpartition(".")[2], module)
# note: copied from cpython's import code, with call to create module replaced with _make_module
def _do_find_and_load(self, name):
path = None
parent = name.rpartition(".")[0]
module_name_no_parent = name.rpartition(".")[-1]
if parent:
if parent not in self.modules:
self._gcd_import(parent)
# Crazy side-effects!
if name in self.modules:
return self.modules[name]
parent_module = self.modules[parent]
try:
path = parent_module.__path__ # type: ignore[attr-defined]
except AttributeError:
# when we attempt to import a package only containing pybinded files,
# the parent directory isn't always a package as defined by python,
# so we search if the package is actually there or not before calling the error.
if isinstance(
parent_module.__loader__,
importlib.machinery.ExtensionFileLoader,
):
if name not in self.extern_modules:
msg = (
_ERR_MSG
+ "; {!r} is a c extension module which was not externed. C extension modules \
need to be externed by the PackageExporter in order to be used as we do not support interning them.}."
).format(name, name)
raise ModuleNotFoundError(msg, name=name) from None
if not isinstance(
parent_module.__dict__.get(module_name_no_parent),
types.ModuleType,
):
msg = (
_ERR_MSG
+ "; {!r} is a c extension package which does not contain {!r}."
).format(name, parent, name)
raise ModuleNotFoundError(msg, name=name) from None
else:
msg = (_ERR_MSG + "; {!r} is not a package").format(name, parent)
raise ModuleNotFoundError(msg, name=name) from None
module = self._load_module(name, parent)
self._install_on_parent(parent, name, module)
return module
# note: copied from cpython's import code
def _find_and_load(self, name):
module = self.modules.get(name, _NEEDS_LOADING)
if module is _NEEDS_LOADING:
return self._do_find_and_load(name)
if module is None:
message = "import of {} halted; " "None in sys.modules".format(name)
raise ModuleNotFoundError(message, name=name)
# To handle https://github.com/pytorch/pytorch/issues/57490, where std's
# creation of fake submodules via the hacking of sys.modules is not import
# friendly
if name == "os":
self.modules["os.path"] = cast(Any, module).path
elif name == "typing":
self.modules["typing.io"] = cast(Any, module).io
self.modules["typing.re"] = cast(Any, module).re
return module
def _gcd_import(self, name, package=None, level=0):
"""Import and return the module based on its name, the package the call is
being made from, and the level adjustment.
This function represents the greatest common denominator of functionality
between import_module and __import__. This includes setting __package__ if
the loader did not.
"""
_sanity_check(name, package, level)
if level > 0:
name = _resolve_name(name, package, level)
return self._find_and_load(name)
# note: copied from cpython's import code
def _handle_fromlist(self, module, fromlist, *, recursive=False):
"""Figure out what __import__ should return.
The import_ parameter is a callable which takes the name of module to
import. It is required to decouple the function from assuming importlib's
import implementation is desired.
"""
module_name = demangle(module.__name__)
# The hell that is fromlist ...
# If a package was imported, try to import stuff from fromlist.
if hasattr(module, "__path__"):
for x in fromlist:
if not isinstance(x, str):
if recursive:
where = module_name + ".__all__"
else:
where = "``from list''"
raise TypeError(
f"Item in {where} must be str, " f"not {type(x).__name__}"
)
elif x == "*":
if not recursive and hasattr(module, "__all__"):
self._handle_fromlist(module, module.__all__, recursive=True)
elif not hasattr(module, x):
from_name = "{}.{}".format(module_name, x)
try:
self._gcd_import(from_name)
except ModuleNotFoundError as exc:
# Backwards-compatibility dictates we ignore failed
# imports triggered by fromlist for modules that don't
# exist.
if (
exc.name == from_name
and self.modules.get(from_name, _NEEDS_LOADING) is not None
):
continue
raise
return module
def __import__(self, name, globals=None, locals=None, fromlist=(), level=0):
if level == 0:
module = self._gcd_import(name)
else:
globals_ = globals if globals is not None else {}
package = _calc___package__(globals_)
module = self._gcd_import(name, package, level)
if not fromlist:
# Return up to the first dot in 'name'. This is complicated by the fact
# that 'name' may be relative.
if level == 0:
return self._gcd_import(name.partition(".")[0])
elif not name:
return module
else:
# Figure out where to slice the module's name up to the first dot
# in 'name'.
cut_off = len(name) - len(name.partition(".")[0])
# Slice end needs to be positive to alleviate need to special-case
# when ``'.' not in name``.
module_name = demangle(module.__name__)
return self.modules[module_name[: len(module_name) - cut_off]]
else:
return self._handle_fromlist(module, fromlist)
def _get_package(self, package):
"""Take a package name or module object and return the module.
If a name, the module is imported. If the passed or imported module
object is not a package, raise an exception.
"""
if hasattr(package, "__spec__"):
if package.__spec__.submodule_search_locations is None:
raise TypeError("{!r} is not a package".format(package.__spec__.name))
else:
return package
else:
module = self.import_module(package)
if module.__spec__.submodule_search_locations is None:
raise TypeError("{!r} is not a package".format(package))
else:
return module
def _zipfile_path(self, package, resource=None):
package = self._get_package(package)
assert package.__loader__ is self
name = demangle(package.__name__)
if resource is not None:
resource = _normalize_path(resource)
return f"{name.replace('.', '/')}/{resource}"
else:
return f"{name.replace('.', '/')}"
def _get_or_create_package(
self, atoms: List[str]
) -> "Union[_PackageNode, _ExternNode]":
cur = self.root
for i, atom in enumerate(atoms):
node = cur.children.get(atom, None)
if node is None:
node = cur.children[atom] = _PackageNode(None)
if isinstance(node, _ExternNode):
return node
if isinstance(node, _ModuleNode):
name = ".".join(atoms[:i])
raise ImportError(
f"inconsistent module structure. module {name} is not a package, but has submodules"
)
assert isinstance(node, _PackageNode)
cur = node
return cur
def _add_file(self, filename: str):
"""Assembles a Python module out of the given file. Will ignore files in the .data directory.
Args:
filename (str): the name of the file inside of the package archive to be added
"""
*prefix, last = filename.split("/")
if len(prefix) > 1 and prefix[0] == ".data":
return
package = self._get_or_create_package(prefix)
if isinstance(package, _ExternNode):
raise ImportError(
f"inconsistent module structure. package contains a module file {filename}"
f" that is a subpackage of a module marked external."
)
if last == "__init__.py":
package.source_file = filename
elif last.endswith(".py"):
package_name = last[: -len(".py")]
package.children[package_name] = _ModuleNode(filename)
def _add_extern(self, extern_name: str):
*prefix, last = extern_name.split(".")
package = self._get_or_create_package(prefix)
if isinstance(package, _ExternNode):
return # the shorter extern covers this extern case
package.children[last] = _ExternNode()
_NEEDS_LOADING = object()
_ERR_MSG_PREFIX = "No module named "
_ERR_MSG = _ERR_MSG_PREFIX + "{!r}"
class _PathNode:
pass
class _PackageNode(_PathNode):
def __init__(self, source_file: Optional[str]):
self.source_file = source_file
self.children: Dict[str, _PathNode] = {}
class _ModuleNode(_PathNode):
__slots__ = ["source_file"]
def __init__(self, source_file: str):
self.source_file = source_file
class _ExternNode(_PathNode):
pass
# A private global registry of all modules that have been package-imported.
_package_imported_modules: WeakValueDictionary = WeakValueDictionary()
# `inspect` by default only looks in `sys.modules` to find source files for classes.
# Patch it to check our private registry of package-imported modules as well.
_orig_getfile = inspect.getfile
def _patched_getfile(object):
if inspect.isclass(object):
if object.__module__ in _package_imported_modules:
return _package_imported_modules[object.__module__].__file__
return _orig_getfile(object)
inspect.getfile = _patched_getfile
class _PackageResourceReader:
"""Private class used to support PackageImporter.get_resource_reader().
Confirms to the importlib.abc.ResourceReader interface. Allowed to access
the innards of PackageImporter.
"""
def __init__(self, importer, fullname):
self.importer = importer
self.fullname = fullname
def open_resource(self, resource):
from io import BytesIO
return BytesIO(self.importer.load_binary(self.fullname, resource))
def resource_path(self, resource):
# The contract for resource_path is that it either returns a concrete
# file system path or raises FileNotFoundError.
if isinstance(
self.importer.zip_reader, DirectoryReader
) and self.importer.zip_reader.has_record(
os.path.join(self.fullname, resource)
):
return os.path.join(
self.importer.zip_reader.directory, self.fullname, resource
)
raise FileNotFoundError
def is_resource(self, name):
path = self.importer._zipfile_path(self.fullname, name)
return self.importer.zip_reader.has_record(path)
def contents(self):
from pathlib import Path
filename = self.fullname.replace(".", "/")
fullname_path = Path(self.importer._zipfile_path(self.fullname))
files = self.importer.zip_reader.get_all_records()
subdirs_seen = set()
for filename in files:
try:
relative = Path(filename).relative_to(fullname_path)
except ValueError:
continue
# If the path of the file (which is relative to the top of the zip
# namespace), relative to the package given when the resource
# reader was created, has a parent, then it's a name in a
# subdirectory and thus we skip it.
parent_name = relative.parent.name
if len(parent_name) == 0:
yield relative.name
elif parent_name not in subdirs_seen:
subdirs_seen.add(parent_name)
yield parent_name
| pytorch-master | torch/package/package_importer.py |
import _compat_pickle
import pickle
from .importer import Importer
class PackageUnpickler(pickle._Unpickler): # type: ignore[name-defined]
"""Package-aware unpickler.
This behaves the same as a normal unpickler, except it uses `importer` to
find any global names that it encounters while unpickling.
"""
def __init__(self, importer: Importer, *args, **kwargs):
super().__init__(*args, **kwargs)
self._importer = importer
def find_class(self, module, name):
# Subclasses may override this.
if self.proto < 3 and self.fix_imports: # type: ignore[attr-defined]
if (module, name) in _compat_pickle.NAME_MAPPING:
module, name = _compat_pickle.NAME_MAPPING[(module, name)]
elif module in _compat_pickle.IMPORT_MAPPING:
module = _compat_pickle.IMPORT_MAPPING[module]
mod = self._importer.import_module(module)
return getattr(mod, name)
| pytorch-master | torch/package/_package_unpickler.py |
import re
from typing import Iterable, Union
GlobPattern = Union[str, Iterable[str]]
class GlobGroup:
"""A set of patterns that candidate strings will be matched against.
A candidate is composed of a list of segments separated by ``separator``, e.g. "foo.bar.baz".
A pattern contains one or more segments. Segments can be:
- A literal string (e.g. "foo"), which matches exactly.
- A string containing a wildcard (e.g. "torch*", or "foo*baz*"). The wildcard matches
any string, including the empty string.
- A double wildcard ("**"). This matches against zero or more complete segments.
Examples:
``torch.**``: matches ``torch`` and all its submodules, e.g. ``torch.nn`` and ``torch.nn.functional``.
``torch.*``: matches ``torch.nn`` or ``torch.functional``, but not ``torch.nn.functional``.
``torch*.**``: matches ``torch``, ``torchvision``, and all their submodules.
A candidates will match the ``GlobGroup`` if it matches any of the ``include`` patterns and
none of the ``exclude`` patterns.
Args:
include (Union[str, Iterable[str]]): A string or list of strings,
each representing a pattern to be matched against. A candidate
will match if it matches *any* include pattern
exclude (Union[str, Iterable[str]]): A string or list of strings,
each representing a pattern to be matched against. A candidate
will be excluded from matching if it matches *any* exclude pattern.
separator (str): A string that delimits segments in candidates and
patterns. By default this is "." which corresponds to how modules are
named in Python. Another common value for this is "/", which is
the Unix path separator.
"""
def __init__(
self, include: GlobPattern, *, exclude: GlobPattern = (), separator: str = "."
):
self._dbg = f"GlobGroup(include={include}, exclude={exclude})"
self.include = GlobGroup._glob_list(include, separator)
self.exclude = GlobGroup._glob_list(exclude, separator)
self.separator = separator
def __str__(self):
return self._dbg
def __repr__(self):
return self._dbg
def matches(self, candidate: str) -> bool:
candidate = self.separator + candidate
return any(p.fullmatch(candidate) for p in self.include) and all(
not p.fullmatch(candidate) for p in self.exclude
)
@staticmethod
def _glob_list(elems: GlobPattern, separator: str = "."):
if isinstance(elems, str):
return [GlobGroup._glob_to_re(elems, separator)]
else:
return [GlobGroup._glob_to_re(e, separator) for e in elems]
@staticmethod
def _glob_to_re(pattern: str, separator: str = "."):
# to avoid corner cases for the first component, we prefix the candidate string
# with '.' so `import torch` will regex against `.torch`, assuming '.' is the separator
def component_to_re(component):
if "**" in component:
if component == "**":
return "(" + re.escape(separator) + "[^" + separator + "]+)*"
else:
raise ValueError("** can only appear as an entire path segment")
else:
return re.escape(separator) + ("[^" + separator + "]*").join(
re.escape(x) for x in component.split("*")
)
result = "".join(component_to_re(c) for c in pattern.split(separator))
return re.compile(result)
| pytorch-master | torch/package/glob_group.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.