python_code
stringlengths
0
4.04M
repo_name
stringlengths
8
58
file_path
stringlengths
5
147
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # See notes in typer_base.py for how the type computation logic works. # # This typer identifies the tensor size associated with a graph node. # For example, if we have a random variable: # # @rv def flips(): # return Bernoulli(coin()) # # Then we need to know if coin() is a single probability or, if multiple, # what shape the tensor is. BMG only supports two-dimensional arrays # and there are restrictions on how we can produce multi-valued samples, # multiply matrices, and so on. # # Every node must have a *consistent* size; consider for example this # unlikely but legal model: # # @rv def weird(n): # if n == 0: # return Bernoulli(tensor([0.5, 0.5])) # two values # else: # return Normal(0.0, 1.0) # one value # # @rv def flip(): # return Bernoulli(0.5) # # @fun problem(): # return weird(flip()) # # What is the size of the node associated with "problem"? It does not have a consistent # size, so we will mark it as unsized. # # The purpose of this is to avoid doing work to guess at what # the sizes of nodes are in graphs where there is no possibility of this # graph being legal. We also wish to avoid reporting confusing cascading # errors based on incorrect guesses as to what the size of the node "should" # be. Descendents of unsized nodes are also unsized; this is a clear # and easily implemented rule. # # We use this logic in two main places in the compiler. First, when computing # supports for stochastic control flow. If we have something like: # # @rv def flip_two(): # return Bernoulli([0.5, 0.5]) # # @rv def normal(n): # return Normal(0, 1) # # ... # x = normal(flip_two()) # # Then we do a stochastic control flow here; we call # x00 = normal(tensor([0, 0])) # x01 = normal(tensor([0, 1])) # x10 = normal(tensor([1, 0])) # x11 = normal(tensor([1, 1])) # ... # # Right now this kind of stochastic control flow does not work # because we have no ability to generate a BMG graph of the form # CHOOSE(flip_two(), {[0, 0]: x00, ... }) but still we need to # know that this is the kind of stochastic control flow we're # going to attempt *because we need to produce a sensible error # message saying that this is not supported*! # # Second, when doing various rewrites of the graph we need to know what node sizes are # so that we can either rewrite operations that cannot be represented in BMG into # unvectorized operations, or produce sensible error messages if we cannot. from functools import reduce from typing import Callable, Dict, Set import beanmachine.ppl.compiler.bmg_nodes as bn import torch from beanmachine.ppl.compiler.typer_base import TyperBase from torch import Size # We use an impossible value as a marker for unsizable: Unsized = Size([-1]) Scalar = Size([]) # These nodes are always scalars no matter what their input: _always_scalar: Set[type] = { bn.CategoricalNode, bn.CategoricalLogitNode, bn.ExpProductFactorNode, bn.FlatNode, bn.InNode, bn.IsNode, bn.IsNotNode, bn.ItemNode, bn.LogSumExpVectorNode, bn.MatrixSumNode, bn.NotInNode, bn.NotNode, bn.SumNode, bn.ToIntNode, bn.ToNegativeRealNode, bn.ToRealNode, bn.ToPositiveRealNode, bn.ToProbabilityNode, bn.VectorIndexNode, } # The size of these nodes is just the size of broadcasting all their inputs. _broadcast_the_inputs: Set[type] = { bn.AdditionNode, bn.BernoulliLogitNode, bn.BernoulliNode, bn.BetaNode, bn.BinomialLogitNode, bn.BinomialNode, bn.BitAndNode, bn.BitOrNode, bn.BitXorNode, bn.Chi2Node, bn.CholeskyNode, bn.ComplementNode, bn.DivisionNode, bn.DirichletNode, bn.EqualNode, bn.ElementwiseMultiplyNode, bn.ExpM1Node, bn.ExpNode, bn.Exp2Node, bn.FloorDivNode, bn.GammaNode, bn.GreaterThanEqualNode, bn.GreaterThanNode, bn.HalfCauchyNode, bn.HalfNormalNode, bn.InvertNode, bn.LessThanEqualNode, bn.LessThanNode, bn.LogisticNode, bn.LogNode, bn.Log10Node, bn.Log1pNode, bn.Log2Node, bn.LogProbNode, bn.LogSumExpNode, bn.LogAddExpNode, bn.Log1mexpNode, bn.LShiftNode, bn.MatrixAddNode, bn.MatrixComplementNode, bn.MatrixLog1mexpNode, bn.MatrixExpNode, bn.MatrixLogNode, bn.MatrixNegateNode, bn.MatrixPhiNode, bn.MatrixScaleNode, bn.ModNode, bn.MultiplicationNode, bn.NegateNode, bn.NormalNode, bn.NotEqualNode, bn.Observation, bn.PhiNode, bn.PoissonNode, bn.PowerNode, bn.Query, bn.RShiftNode, bn.SampleNode, bn.SquareRootNode, bn.StudentTNode, bn.ToNegativeRealMatrixNode, bn.ToPositiveRealMatrixNode, bn.ToRealMatrixNode, bn.UniformNode, bn.TransposeNode, } def _broadcast_two(x: Size, y: Size) -> Size: # Given two sizes, what is their broadcast size, if any? Rather than replicate # the logic in torch which does this computation, we simply construct two # all-zero tensors of the given sizes and try to add them. If the addition succeeds # then the size of the sum is the size we want. # # TODO: Is there a better way to do this other than try it and see what happens? # TODO: Try torch.distributions.utils.broadcast_all if x == Unsized or y == Unsized: return Unsized try: return (torch.zeros(x) + torch.zeros(y)).size() except Exception: return Unsized def _broadcast(*sizes: Size) -> Size: # Many tensor operations in torch have "broadcast" semantics. A brief explanation: # # If we compute tensor([1, 2]) + tensor([20, 30]) we do pairwise addition to get # tensor([21, 32]) as the sum. But what happens if the dimensions do not match? # In this case we "broadcast" the tensors; we find a tensor size greater than or # equal to the sizes of both operands and duplicate the data as necessary. # # For example, if we are adding tensor([1, 2]) + tensor([3]) then the right summand # is broadcast to tensor([3, 3]), and we get tensor([4, 5]) as the sum. # # Note that not all sizes can be broadcast. Summing tensor([1, 2]) + tensor([10, 20, 30]) # is not legal because there is no obvious way to expand [1, 2] to be the same size as # [10, 20, 30]. # # We often need to answer the question "given n sizes, what is the size of the broadcast # of all n sizes?" This method does that computation. return reduce(_broadcast_two, sizes) def size_to_str(size: Size) -> str: if size == Unsized: return "unsized" return "[" + ",".join(str(i) for i in size) + "]" def is_scalar(s: Size) -> bool: return all(d == 1 for d in s) class Sizer(TyperBase[Size]): _dispatch: Dict[type, Callable] def __init__(self) -> None: TyperBase.__init__(self) self._dispatch = { bn.BroadcastNode: self._size_broadcast, bn.ChoiceNode: self._size_choice, bn.ColumnIndexNode: self._size_column, bn.FillMatrixNode: self._size_broadcast, # same as broadcast bn.IfThenElseNode: self._size_if, bn.IndexNode: self._size_index, bn.MatrixMultiplicationNode: self._size_mm, bn.SwitchNode: self._size_switch, bn.TensorNode: lambda n: n._size, bn.ToMatrixNode: self._size_to_matrix, bn.LogSumExpNode: self._size_log_sum_exp_node, bn.LogSumExpVectorNode: self._size_log_sum_exp_vector_node, bn.LogSumExpTorchNode: self._size_log_sum_exp_torch_node, bn.LKJCholeskyNode: self._size_lkj_cholesky, } # TODO: # ColumnIndexNode # LogSumExpTorchNode -- # note that final parameter affects size # VectorIndexNode # LogSumExpVectorNode def _size_choice(self, node: bn.ChoiceNode) -> Size: s = self[node.inputs[1]] for i in range(1, len(node.inputs)): if self[node.inputs[i]] != s: return Unsized return s def _size_if(self, node: bn.IfThenElseNode) -> Size: consequence = self[node.inputs[1]] alternative = self[node.inputs[2]] if consequence != alternative: return Unsized return consequence def _size_index(self, node: bn.IndexNode) -> Size: collection_size = self[node.left] if len(collection_size) == 0: # This operation is illegal in torch, so let's just say unsized. return Unsized result_size = collection_size[1:] assert isinstance(result_size, Size) return result_size def _size_mm(self, node: bn.MatrixMultiplicationNode) -> Size: # Just do the multiplication and see what size we get. # TODO: Torch supports both broadcasting and non-broadcasting versions # of matrix multiplication. We might need to track both separately and # ensure that we compute size, support, and so on, accordingly. # # The behavior of mm is: both operands must be a matrix. # The behavior of matmul is: # * do the dot product if both operands are 1-d # * do the matrix product if both operands are 2-d # * 1-d matmul 2-d converts the 1-d to 2-d and does matrix product # * 2-d matmul 1-d converts the 1-d to 2-d, transposes it and does # matrix product # * n-dimensional cases are complicated. left = torch.zeros(self[node.left]) right = torch.zeros(self[node.right]) return left.matmul(right).size() def _size_switch(self, node: bn.SwitchNode) -> Size: s = self[node.inputs[2]] for i in range(1, (len(node.inputs) - 1) // 2): if self[node.inputs[i * 2 + 2]] != s: return Unsized return s def _size_broadcast(self, node: bn.BMGNode) -> Size: # We have the same logic for broadcast and fill. assert isinstance(node, bn.FillMatrixNode) or isinstance(node, bn.BroadcastNode) rows = node.inputs[1] assert isinstance(rows, bn.NaturalNode) rows = rows.value columns = node.inputs[2] assert isinstance(columns, bn.NaturalNode) columns = columns.value # As with to-matrix below, the sizer gives the size that the # tensor would be *in Python*, not *in Eigen*, so the rows # and columns are swapped. if columns == 1: return Size([rows]) return Size([columns, rows]) def _size_to_matrix(self, node: bn.ToMatrixNode) -> Size: # The size of a 2-d torch tensor is [rows, columns], but # BMG matrices are column-major. Therefore the values are # swapped into the opposite order. That is, if we have # a tensor of the form [[A, B, C], [D, E, F]] with two rows # and three columns, that will be transformed into a ToMatrix # node with three rows and two columns. When we ask "what's the # size of the equivalent of this ToMatrix in torch?" we need # to swap them back. # # Moreover, just because BMG matrices are all exactly-2 dimensional # does not imply that the original tensor was. If we have a vector, # size it as a vector. rows = node.inputs[0] assert isinstance(rows, bn.NaturalNode) rows = rows.value columns = node.inputs[1] assert isinstance(columns, bn.NaturalNode) columns = columns.value if columns == 1: return Size([rows]) return Size([columns, rows]) def _size_column(self, node: bn.ColumnIndexNode) -> Size: size_tensor = self[node.inputs.inputs[0]] # column size is always the last value of the shape since its the inner most group return Size([size_tensor[len(size_tensor) - 1]]) def _size_log_sum_exp_vector_node(self, node: bn.LogSumExpVectorNode) -> Size: # this expects a single-column matrix (and sums together all entries in the column?) operand_size = self[node.operand] dim = len(operand_size) if dim <= 1: return Scalar else: # TODO: is this possible given the expectation? dims = [] for d in range(0, dim - 1): dims.append(operand_size[d]) return Size(dims) def _size_log_sum_exp_node(self, node: bn.LogSumExpNode) -> Size: # expects a list of values and computes log(exp(v_1) + ... + exp(v_n)) # so, the size should be equal to the value size and all input sizes must be the same if len(node.inputs.inputs) == 0: return Unsized operand_size = self[node.inputs.inputs[0]] for operand in node.inputs.inputs: if self[operand] != operand_size: return Unsized return operand_size def _size_log_sum_exp_torch_node(self, node: bn.LogSumExpTorchNode) -> Size: # it has three operands: the tensor being summed, the dimension along which it is summed, and a flag giving the shape if len(node.inputs.inputs) != 3: return Unsized tensor_being_summed = node.inputs.inputs[0] dim_to_sum_node = node.inputs.inputs[1] dim_to_sum = -1 if isinstance(dim_to_sum_node, bn.ConstantNode): dim_to_sum = dim_to_sum_node.value keep_dim_node = node.inputs.inputs[2] keep_dim = None if isinstance(keep_dim_node, bn.ConstantNode): keep_dim = keep_dim_node.value operand_size = self[tensor_being_summed] if keep_dim is False: if dim_to_sum != -1: new_size = [] for j, dim in enumerate(operand_size): if j != dim_to_sum: new_size.append(dim) return Size(new_size) else: return Unsized elif keep_dim is True: return operand_size else: # TODO: we can't compute the size at compile time but we don't have a way to represent dynamic sizes in Size right now return Unsized def _size_lkj_cholesky(self, node: bn.LKJCholeskyNode) -> Size: dim = node.dim assert isinstance(dim, bn.ConstantNode) dim_value = dim.value assert isinstance(dim_value, int) and dim_value >= 2 return Size([dim_value, dim_value]) # This implements the abstract base type method. def _compute_type_inputs_known(self, node: bn.BMGNode) -> Size: # If there is any input node whose size cannot be determined, then *none* # of its descendents can be determined, even if a descendent node always # has the same type regardless of its inputs. This ensures that (1) we only # attempt to assign sizes to graphs that are supported by the BMG type system, # and (2) will help us avoid presenting cascading errors to the user in # the event that a graph violates a typing rule. for i in node.inputs: if self[i] == Unsized: return Unsized if isinstance(node, bn.ConstantNode): if isinstance(node.value, torch.Tensor): return node.value.size() return Scalar if hasattr(node, "_size"): return node._size # pyre-ignore t = type(node) if t in _always_scalar: result = Scalar elif t in _broadcast_the_inputs: result = _broadcast(*(self[i] for i in node.inputs)) elif t in self._dispatch: result = self._dispatch[t](node) else: result = Unsized return result
beanmachine-main
src/beanmachine/ppl/compiler/sizer.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import beanmachine.ppl.compiler.bmg_nodes as bn from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.bmg_types import Boolean from beanmachine.ppl.compiler.fix_problem import ( Inapplicable, node_fixer_first_match, NodeFixer, NodeFixerResult, type_guard, ) from beanmachine.ppl.compiler.lattice_typer import LatticeTyper class BoolComparisonFixer: """This class takes a Bean Machine Graph builder and replaces all comparison operators whose operands are bool with semantically equivalent IF nodes.""" _bmg: BMGraphBuilder _typer: LatticeTyper def __init__(self, bmg: BMGraphBuilder, typer: LatticeTyper) -> None: self._bmg = bmg self._typer = typer def _both_bool(self, n: bn.BMGNode) -> bool: return self._typer.is_bool(n.left) and self._typer.is_bool( # pyre-ignore n.right # pyre-ignore ) def _replace_bool_equals(self, node: bn.EqualNode) -> NodeFixerResult: # 1 == y --> y # x == 1 --> x # 0 == y --> not y # x == 0 --> not x # x == y --> if x then y else not y if not self._both_bool(node): return Inapplicable if bn.is_one(node.left): return node.right if bn.is_one(node.right): return node.left if bn.is_zero(node.left): return self._bmg.add_complement(node.right) if bn.is_zero(node.right): return self._bmg.add_complement(node.left) alt = self._bmg.add_complement(node.right) return self._bmg.add_if_then_else(node.left, node.right, alt) def _replace_bool_not_equals(self, node: bn.NotEqualNode) -> NodeFixerResult: # 1 != y --> not y # x != 1 --> not x # 0 != y --> y # x != 0 --> x # x != y --> if x then not y else y if not self._both_bool(node): return Inapplicable if bn.is_one(node.left): return self._bmg.add_complement(node.right) if bn.is_one(node.right): return self._bmg.add_complement(node.left) if bn.is_zero(node.left): return node.right if bn.is_zero(node.right): return node.left cons = self._bmg.add_complement(node.right) return self._bmg.add_if_then_else(node.left, cons, node.right) def _replace_bool_gte(self, node: bn.GreaterThanEqualNode) -> NodeFixerResult: # 1 >= y --> true # x >= 1 --> x # 0 >= y --> not y # x >= 0 --> true # x >= y --> if x then true else not y if not self._both_bool(node): return Inapplicable if bn.is_one(node.left): return self._bmg.add_constant_of_type(True, Boolean) if bn.is_one(node.right): return node.left if bn.is_zero(node.left): return self._bmg.add_complement(node.right) if bn.is_zero(node.right): return self._bmg.add_constant_of_type(True, Boolean) cons = self._bmg.add_constant_of_type(True, Boolean) alt = self._bmg.add_complement(node.right) return self._bmg.add_if_then_else(node.left, cons, alt) def _replace_bool_gt(self, node: bn.GreaterThanNode) -> NodeFixerResult: # 1 > y --> not y # x > 1 --> false # 0 > y --> false # x > 0 --> x # x > y --> if x then not y else false if not self._both_bool(node): return Inapplicable if bn.is_one(node.left): return self._bmg.add_complement(node.right) if bn.is_one(node.right): return self._bmg.add_constant_of_type(False, Boolean) if bn.is_zero(node.left): return self._bmg.add_constant_of_type(False, Boolean) if bn.is_zero(node.right): return node.left cons = self._bmg.add_complement(node.right) alt = self._bmg.add_constant_of_type(False, Boolean) return self._bmg.add_if_then_else(node.left, cons, alt) def _replace_bool_lte(self, node: bn.LessThanEqualNode) -> NodeFixerResult: # 1 <= y --> y # x <= 1 --> true # 0 <= y --> true # x <= 0 --> not x # x <= y --> if x then y else true if not self._both_bool(node): return Inapplicable if bn.is_one(node.left): return node.right if bn.is_one(node.right): return self._bmg.add_constant_of_type(True, Boolean) if bn.is_zero(node.left): return self._bmg.add_constant_of_type(True, Boolean) if bn.is_zero(node.right): return self._bmg.add_complement(node.left) alt = self._bmg.add_constant_of_type(True, Boolean) return self._bmg.add_if_then_else(node.left, node.right, alt) def _replace_bool_lt(self, node: bn.LessThanNode) -> NodeFixerResult: # 1 < y --> false # x < 1 --> not x # 0 < y --> y # x < 0 --> false # x < y --> if x then false else y if not self._both_bool(node): return Inapplicable if bn.is_one(node.left): return self._bmg.add_constant_of_type(False, Boolean) if bn.is_one(node.right): return self._bmg.add_complement(node.left) if bn.is_zero(node.left): return node.right if bn.is_zero(node.right): return self._bmg.add_constant_of_type(False, Boolean) cons = self._bmg.add_constant_of_type(False, Boolean) return self._bmg.add_if_then_else(node.left, cons, node.right) def bool_comparison_fixer(bmg: BMGraphBuilder, typer: LatticeTyper) -> NodeFixer: bcf = BoolComparisonFixer(bmg, typer) # TODO: Should we treat "x is y" the same as "x == y" when they are # bools, or should that be an error? return node_fixer_first_match( [ type_guard(bn.EqualNode, bcf._replace_bool_equals), type_guard(bn.GreaterThanEqualNode, bcf._replace_bool_gte), type_guard(bn.GreaterThanNode, bcf._replace_bool_gt), type_guard(bn.LessThanEqualNode, bcf._replace_bool_lte), type_guard(bn.LessThanNode, bcf._replace_bool_lt), type_guard(bn.NotEqualNode, bcf._replace_bool_not_equals), ] )
beanmachine-main
src/beanmachine/ppl/compiler/fix_bool_comparisons.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from beanmachine.ppl.model.statistical_model import functional, random_variable allowed_functions = {dict, list, set, super, random_variable, functional} # TODO: Allowing these constructions raises additional problems that # we have not yet solved. For example, what happens if someone # searches a list for a value, but the list contains a graph node? # And so on.
beanmachine-main
src/beanmachine/ppl/compiler/beanstalk_common.py
beanmachine-main
src/beanmachine/ppl/compiler/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # Tensorizing and detensorizing # # TODO: The fact that we use vectorizing and tensorizing as synonyms throughout all this code is # unnecessarily confusing. Pick one and stick to it. # # There are a number of important mismatches between the BMG and PyTorch type systems # that we need to address when converting the accumulated graph of a PyTorch model into # a BMG graph data structure. # # * PyTorch values are tensors: rectangular arrays of arbitrary dimensionality. BMG # values are either "scalars" -- single values -- or Eigen arrays == two-dimensional # rectangles. # # * PyTorch uses the same functions regardless of dimensionality; tensor(10).log() and # tensor([[[10, 20, 30], [40, 50, 60]]]).log() are both represented by .log(). BMG # has distinct operators for LOG and MATRIX_LOG, MULTIPLY and ELEMENTWISE_MULTIPLY # and so on. # # * PyTorch's type system describes *storage format*: a tensor can be a tensor of integers # or a tensor of doubles. BMG's type system describes *data semantics*: floating point # types are real, positive real, negative real and probability, discrete types are natural # and bool. # # Moreover: we are gradually adding matrix-flavor operators to BMG, but they're not all # there yet. It would be nice to be able to compile models that use multidimensional tensors # even if doing so results in a larger graph size due to use of single-value operator nodes. # # To address these problems we perform two rewrites first, before all other graph rewrites: # *tensorizing* (in tensorizer_transformer.py) and *detensorizing* (this module). # # * Tensorizing is the more straightforward operation. We identify graph nodes in the accumulated # graph which correspond to array operators already implemented in BMG. In particular, we # look for nodes representing elementwise multiplication, addition, division, log, exp, and so # on, where the operands are multidimensional arrays. Those nodes are replaced with the appropriate # matrix-aware operator node. # # * Detensorizing is the more complex rewrite because it attempts to implement "batch" operations # by doing them on individual elements of a matrix, and then combining the results back into # a matrix. The long-term goal is to render detensorizing unnecessary by having all the necessary # matrix operators in BMG. # # Detensorizing uses a few basic techniques in combination. We'll go through an example here. # Suppose we have a MatrixAdd -> HalfCauchy -> X, where X is a node that expects a matrix input # but there is no matrix version of HalfCauchy. What do we do? # # * "Splitting" takes an indexible node and breaks it up into its individual scalar quantities. # # MatrixAdd 0 MatrixAdd 1 # | --split-> \ / \ / # HalfCauchy [ index , index ] # | | # ~ HalfCauchy # | | # X ~ # | # X # # In this example the input would be the matrix add and the replacement would be a list of index nodes. # # * "Scattering" takes the now-ill-formed graph produced by splitting and moves the list "down the graph". # # 0 MatrixAdd 1 0 MatrixAdd 1 # \ / \ / \ / \ / # [ index , index ] --scatter--> index index # | | | # HalfCauchy HalfCauchy HalfCauchy # | | | # ~ [~ , ~] # | | # X X # # * Finally, "merging" turns a list of nodes into a tensor node: # # 0 MatrixAdd 1 0 MatrixAdd 1 # \ / \ / \ / \ / # index index --merge--> index index # | | | | # HalfCauchy HalfCauchy HalfCauchy HalfCauchy # | | | | # [~ , ~] ~ ~ # | \ / # X Tensor # | # X # # Now the graph is well-formed again, and we've solved the type system problem that there is # not (yet) a "matrix half Cauchy", at the cost of having to run some tricky code and generating # O(n) index and HalfCauchy nodes. # # The task of the devectorizer transformer is for each node to identify whether it currently needs # to be split, scattered or merged in order to fix a problem. import typing from enum import Enum from typing import Callable, Dict, List import beanmachine.ppl.compiler.bmg_nodes as bn from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.broadcaster import broadcast_fnc from beanmachine.ppl.compiler.copy_and_replace import ( Cloner, copy_and_replace, NodeTransformer, TransformAssessment, ) from beanmachine.ppl.compiler.error_report import ErrorReport, UnsizableNode from beanmachine.ppl.compiler.fix_problem import ( GraphFixer, GraphFixerResult, sequential_graph_fixer, ) from beanmachine.ppl.compiler.sizer import is_scalar, Size, Sizer, Unsized from beanmachine.ppl.compiler.tensorizer_transformer import Tensorizer # These operator nodes take a single matrix input; they do not necessarily produce # a matrix output. _unary_tensor_ops = [ bn.LogSumExpVectorNode, bn.MatrixComplementNode, bn.MatrixExpNode, bn.MatrixLogNode, bn.MatrixLog1mexpNode, bn.MatrixNegateNode, bn.MatrixPhiNode, bn.MatrixSumNode, bn.TransposeNode, bn.ToNegativeRealMatrixNode, bn.ToPositiveRealMatrixNode, bn.ToRealMatrixNode, bn.CholeskyNode, ] # These operator nodes take two matrix inputs. _binary_tensor_ops = [bn.ElementwiseMultiplyNode, bn.MatrixAddNode] # These nodes represent constant matrix values. _tensor_constants = [ bn.ConstantProbabilityMatrixNode, bn.ConstantBooleanMatrixNode, bn.ConstantNegativeRealMatrixNode, bn.ConstantPositiveRealMatrixNode, bn.ConstantRealMatrixNode, bn.ConstantSimplexMatrixNode, bn.ConstantTensorNode, bn.UntypedConstantNode, ] # Thses distributions produce matrix-valued samples. # TODO: Why is categorical on this list? Categorical has a matrix-valued *input* but # produces a natural-valued *output*. This is likely an error; investigate further. _tensor_valued_distributions = [ bn.CategoricalNode, bn.DirichletNode, bn.LKJCholeskyNode, ] # These are nodes which are *possibly* allowed to be the left-hand input of an # indexing operation. _indexable_node_types = [ bn.ColumnIndexNode, bn.ConstantTensorNode, bn.ElementwiseMultiplyNode, bn.IndexNode, bn.MatrixAddNode, bn.MatrixComplementNode, bn.MatrixLogNode, bn.MatrixLog1mexpNode, bn.MatrixExpNode, bn.MatrixPhiNode, bn.MatrixScaleNode, bn.MatrixMultiplicationNode, bn.MatrixNegateNode, bn.SampleNode, bn.TensorNode, bn.ToMatrixNode, bn.UntypedConstantNode, ] # This is used to describe the requirements on the *input* of a node; for example, # matrix scale requires that its first input be a matrix ("TENSOR") and its second # a scalar. class ElementType(Enum): TENSOR = 1 SCALAR = 2 ANY = 3 # This describes what needs to happen to a node. class DevectorizeTransformation(Enum): # The node needs to be rewritten. YES = 1 # The node needs to be rewritten with a merge operation. YES_WITH_MERGE = 2 # The node is fine as it is. NO = 3 def _size_is_devectorizable(s: Size) -> bool: # TODO: support arbitrary devectorizing is_vector_or_matrix = not is_scalar(s) and len(s) <= 2 return s != Unsized and is_vector_or_matrix class CopyContext: def __init__(self): self.devectorized_nodes: Dict[bn.BMGNode, List[bn.BMGNode]] = {} self.clones: Dict[bn.BMGNode, bn.BMGNode] = {} def _parameter_to_type_mm(node: bn.MatrixMultiplicationNode, index: int) -> ElementType: assert index == 0 or index == 1 return ElementType.TENSOR def _parameter_to_type_single_index(index: int) -> ElementType: if index == 0: return ElementType.TENSOR else: return ElementType.SCALAR def _parameter_to_type_multi_index(node: bn.IndexNode, index: int) -> ElementType: if index == 0: return ElementType.TENSOR if len(node.inputs.inputs) > 2: return ElementType.TENSOR else: return ElementType.SCALAR def _parameter_to_type_sample(node: bn.SampleNode, i: int) -> ElementType: if _tensor_valued_distributions.__contains__(type(node.operand)): return ElementType.TENSOR else: return ElementType.SCALAR def _parameter_to_type_query(sizer: Sizer, node: bn.Query, index: int) -> ElementType: assert index == 0 original_size = sizer[node] if original_size == Unsized or not is_scalar(original_size): return ElementType.TENSOR else: return ElementType.SCALAR def _parameter_to_type_obs(node: bn.Observation, index: int) -> ElementType: # TODO: what is the expectation for Observations? # from the dirichlet tests it appears they can be tensors # for everything else, it looks like they must be scalars. # until I find out more, I'm implementing the solution that # enables all existing tests to pass sample = node.inputs.inputs[0] dist = sample.inputs.inputs[0] if _tensor_valued_distributions.__contains__(type(dist)): return ElementType.TENSOR else: return ElementType.SCALAR def _parameter_to_type_matrix_scale( node: bn.MatrixScaleNode, index: int ) -> ElementType: if index == 0: return ElementType.SCALAR if index == 1: return ElementType.TENSOR else: raise ValueError( f"MatrixScale only has 2 inputs but index of {index} was provided" ) def _parameter_to_type_log_sum_exp(node: bn.LogSumExpNode, index: int) -> ElementType: return ElementType.SCALAR def _parameter_to_type_torch_log_sum_exp( node: bn.LogSumExpTorchNode, index: int ) -> ElementType: assert index <= 2 if index == 0: return ElementType.TENSOR else: return ElementType.SCALAR # The devectorizer has two public APIs. # # assess_node determines if the devectorizer can operate on this node at all, which comes down # to determining if we have information about the shape of the value in the original Python model # or not. If we cannot determine the operation's shape then we cannot know how to devectorize it. # # transform_node says how to replace each node; it returns: # # * None, indicating that the node should be deleted # * a node, giving the drop-in replacement for the given node # * a list of nodes, which will later be merged or scattered. class Devectorizer(NodeTransformer): def __init__(self, cloner: Cloner, sizer: Sizer): self.copy_context = CopyContext() self.cloner = cloner self.sizer = sizer self._parameter_to_type = { bn.MatrixMultiplicationNode: _parameter_to_type_mm, bn.ColumnIndexNode: lambda n, i: _parameter_to_type_single_index(i), bn.VectorIndexNode: lambda n, i: _parameter_to_type_single_index(i), bn.SampleNode: _parameter_to_type_sample, bn.LogSumExpNode: _parameter_to_type_log_sum_exp, bn.LogSumExpTorchNode: _parameter_to_type_torch_log_sum_exp, bn.Query: lambda n, i: _parameter_to_type_query(self.sizer, n, i), bn.Observation: _parameter_to_type_obs, bn.MatrixScaleNode: _parameter_to_type_matrix_scale, bn.IndexNode: _parameter_to_type_multi_index, bn.SwitchNode: self._parameter_to_type_switch, } def _parameter_to_type_switch(self, node: bn.SwitchNode, index: int) -> ElementType: if index == 0 or index % 2 == 1: return ElementType.SCALAR else: operand_of_concern = node.inputs.inputs[index] size = self.sizer[operand_of_concern] if size == Unsized: raise ValueError("every node should have been sized") if is_scalar(size): return ElementType.SCALAR else: return ElementType.TENSOR def __requires_element_type_at(self, node: bn.BMGNode, index: int) -> ElementType: node_type = type(node) if _tensor_valued_distributions.__contains__(node_type): return ElementType.TENSOR if _binary_tensor_ops.__contains__(node_type): return ElementType.TENSOR if self._parameter_to_type.__contains__(node_type): return self._parameter_to_type[node_type](node, index) if _unary_tensor_ops.__contains__(node_type): assert index == 0 return ElementType.TENSOR else: return ElementType.SCALAR def __devectorize_transformation_type( self, node: bn.BMGNode ) -> DevectorizeTransformation: size = self.sizer[node] is_eligible_for_devectorize = _size_is_devectorizable(size) and not isinstance( node, bn.Query ) if is_eligible_for_devectorize: # Determine if it needs to be split because the parent was split but could not be merged. # this is the case for example with a tensor version of normal. Suppose we draw a sample and that sample is the # operand to a matrix multiply. The sample doesn't need to be split because it doesn't consume tensors...it needs # to be split because its parent is no longer a tensor and cannot be a tensor def operand_is_no_longer_tensor(n: bn.BMGNode) -> bool: return self.copy_context.devectorized_nodes.__contains__(n) and not ( self.copy_context.clones.__contains__(n) ) has_upstream_scatter_requirement = any( operand_is_no_longer_tensor(o) for o in node.inputs.inputs ) # almost all distributions are scalar valued and cannot be merged if isinstance(node, bn.DistributionNode): if not _tensor_valued_distributions.__contains__(type(node)): return DevectorizeTransformation.YES # it's possible that we need to split X because an operand has become an unmergable tensor # however, what if X is a tensor operand for a downstream tensor consumer? Then we need both has_merge_requirement = False has_downstream_scatter_requirement = False for consumer in node.outputs.items: index_of_me = next( i for i, producer in enumerate(consumer.inputs.inputs) if producer == node ) required_type = self.__requires_element_type_at(consumer, index_of_me) if required_type == ElementType.TENSOR: has_merge_requirement = True elif required_type == ElementType.SCALAR: has_downstream_scatter_requirement = True # it's possible that both tensor and scatter versions of the operands exist, # but we can't use it because a tensorized version of the op this node represents is unsupported node_does_not_support_tensors = all( self.__requires_element_type_at(node, i) == ElementType.SCALAR for i, _ in enumerate(node.inputs.inputs) ) and not _tensor_constants.__contains__(type(node)) needs_devectorize = ( has_upstream_scatter_requirement or node_does_not_support_tensors or has_downstream_scatter_requirement ) if needs_devectorize and has_merge_requirement: return DevectorizeTransformation.YES_WITH_MERGE if needs_devectorize: return DevectorizeTransformation.YES if has_merge_requirement: return DevectorizeTransformation.NO return DevectorizeTransformation.NO def __get_clone_parents( self, node: bn.BMGNode ) -> List[typing.Union[bn.BMGNode, List[bn.BMGNode]]]: parents = [] for j, p in enumerate(node.inputs.inputs): sz = self.sizer[p] if sz == Unsized: parent_was_tensor = False else: parent_was_tensor = not is_scalar(sz) required_element_type = self.__requires_element_type_at(node, j) needs_clone = required_element_type != ElementType.SCALAR or ( not parent_was_tensor and required_element_type == ElementType.SCALAR ) needs_devectorized = ( required_element_type == ElementType.SCALAR and parent_was_tensor ) if needs_clone: if self.copy_context.clones.__contains__(p): parents.append(self.copy_context.clones[p]) else: raise ValueError("encountered a value not in the clone context") elif needs_devectorized: if self.copy_context.devectorized_nodes.__contains__(p): parents.append(self.copy_context.devectorized_nodes[p]) else: raise ValueError("a vectorized parent was not found") else: raise NotImplementedError("This should be unreachable") return parents def __get_clone_parents_flat(self, node: bn.BMGNode) -> List[bn.BMGNode]: parents = [] for p in node.inputs.inputs: if self.copy_context.clones.__contains__(p): parent = self.copy_context.clones[p] parents.append(parent) else: raise ValueError("a unit parent was not found") return parents def __flatten_parents( self, nd: bn.BMGNode, parents: List, creator: Callable ) -> List[bn.BMGNode]: return self.__flatten_parents_with_index(nd, parents, lambda i, s: creator(*s)) def __flatten_parents_with_index( self, node: bn.BMGNode, parents: List, creator: Callable ) -> List[bn.BMGNode]: size = self.sizer[node] item_count = 1 for i in range(0, len(size)): item_count *= size[i] elements: List[bn.BMGNode] = [] broadcast: Dict[int, Callable] = {} for i, parent in enumerate(parents): if isinstance(parent, List): input_size = self.sizer[node.inputs.inputs[i]] broadbast_fnc_maybe = broadcast_fnc(input_size, size) if isinstance(broadbast_fnc_maybe, Callable): broadcast[i] = broadbast_fnc_maybe else: raise ValueError( f"The size {input_size} cannot be broadcast to {size}" ) for i in range(0, item_count): reduced_parents = [] for k, parent in enumerate(parents): if isinstance(parent, List): new_index = broadcast[k](i) reduced_parents.append(parent[new_index]) else: reduced_parents.append(parent) new_node = creator(i, reduced_parents) elements.append(new_node) return elements def _clone(self, node: bn.BMGNode) -> bn.BMGNode: n = self.cloner.clone(node, self.__get_clone_parents_flat(node)) self.copy_context.clones[node] = n return n def __split(self, node: bn.BMGNode) -> List[bn.BMGNode]: # See comments at the top of this module describing the semantics of split. size = self.sizer[node] dim = len(size) index_list = [] # This code is a little confusing because BMG uses column-major matrices # and torch uses row-major tensors. The Sizer always gives the size # that a graph node would be in *torch*, so if we have a Size([2, 3]) # matrix node, that has two rows and three columns in torch, and would # be indexed first by row and then by column. But in BMG, that would # be two columns, three rows, and indexed by column first, then row. # # The practical upshot is: if we have, say, Size([3]) OR Size([1, 3]) # then either way, we will have a one-column, three row BMG node, and # therefore we only need a single level of indexing. n = self._clone(node) if dim == 0: # If we have just a single value then there's no indexing required. index_list.append(n) elif dim == 1: for i in range(0, size[0]): ci = self.cloner.bmg.add_constant(i) ni = self.cloner.bmg.add_index(n, ci) index_list.append(ni) elif size[0] == 1: assert dim == 2 for i in range(0, size[1]): ci = self.cloner.bmg.add_constant(i) ni = self.cloner.bmg.add_index(n, ci) index_list.append(ni) else: # We need two levels of indexing. assert dim == 2 for i in range(0, size[0]): ci = self.cloner.bmg.add_constant(i) ni = self.cloner.bmg.add_index(n, ci) for j in range(0, size[1]): cj = self.cloner.bmg.add_constant(j) nij = self.cloner.bmg.add_index(ni, cj) index_list.append(nij) return index_list def __scatter(self, node: bn.BMGNode) -> List[bn.BMGNode]: # See comments at the top of the module describing the semantics of scatter. parents = self.__get_clone_parents(node) if isinstance(node, bn.SampleNode): new_nodes = self.__flatten_parents( node, parents, self.cloner.bmg.add_sample ) return new_nodes if isinstance(node, bn.OperatorNode) or isinstance(node, bn.DistributionNode): return self.__flatten_parents( node, parents, self.cloner.node_factories[type(node)] ) if isinstance(node, bn.Observation): dim = len(node.value.size()) values = [] if dim == 0: values.append(node.value.item()) elif dim == 1: for i in range(0, node.value.size()[0]): values.append(node.value[i]) else: assert dim == 2 for i in range(0, node.value.size()[0]): for j in range(0, node.value.size()[1]): values.append(node.value[i][j]) return self.__flatten_parents_with_index( node, parents, lambda i, s: self.__add_observation(s, i, values), ) else: raise NotImplementedError() def __add_observation( self, inputs: List[bn.BMGNode], i: int, value: List ) -> bn.Observation: assert len(inputs) == 1 sample = inputs[0] if isinstance(sample, bn.SampleNode): return self.cloner.bmg.add_observation(sample, value[i]) else: raise ValueError("expected a sample as a parent to an observation") def _devectorize(self, node: bn.BMGNode) -> List[bn.BMGNode]: # there are two ways to devectorize a node: (1) we can scatter it or (2) we can split it (clone and index) is_sample_of_scalar_dist = isinstance( node, bn.SampleNode ) and not _tensor_valued_distributions.__contains__(node.operand) not_indexable = not _indexable_node_types.__contains__(type(node)) if not_indexable or is_sample_of_scalar_dist: return self.__scatter(node) else: return self.__split(node) def assess_node( self, node: bn.BMGNode, original: BMGraphBuilder ) -> TransformAssessment: if self.sizer[node] == Unsized: report = ErrorReport() report.add_error( UnsizableNode( node, [self.sizer[p] for p in node.inputs.inputs], original.execution_context.node_locations(node), ) ) return TransformAssessment(False, report) return TransformAssessment(True, ErrorReport()) # a node is either replaced 1-1, 1-many, or deleted def transform_node( self, node: bn.BMGNode, new_inputs: List[bn.BMGNode] ) -> typing.Optional[typing.Union[bn.BMGNode, List[bn.BMGNode]]]: transform_type = self.__devectorize_transformation_type(node) if transform_type == DevectorizeTransformation.YES: image = self._devectorize(node) self.copy_context.devectorized_nodes[node] = image elif transform_type == DevectorizeTransformation.NO: image = self._clone(node) self.copy_context.clones[node] = image elif transform_type == DevectorizeTransformation.YES_WITH_MERGE: image = self._devectorize(node) self.copy_context.devectorized_nodes[node] = image if not self.copy_context.clones.__contains__(node): tensor = self.cloner.bmg.add_tensor(self.sizer[node], *image) self.copy_context.clones[node] = tensor image = self.copy_context.clones[node] else: raise NotImplementedError( "a new type of transformation type was introduced but never implemented" ) return image def vectorized_graph_fixer() -> GraphFixer: def _tensorize(bmg_old: BMGraphBuilder) -> GraphFixerResult: bmg, errors = copy_and_replace(bmg_old, lambda c, s: Tensorizer(c, s)) return bmg, True, errors def _detensorize(bmg_old: BMGraphBuilder) -> GraphFixerResult: bmg, errors = copy_and_replace(bmg_old, lambda c, s: Devectorizer(c, s)) return bmg, True, errors return sequential_graph_fixer([_tensorize, _detensorize])
beanmachine-main
src/beanmachine/ppl/compiler/devectorizer_transformer.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Debugging tools for working with ASTs""" # This module just has some helpful tools that can be used for visualizing # Python ASTs when debugging the compilation process. import ast from ast import AST from typing import Any, List, Tuple import beanmachine.ppl.utils.dotbuilder as db import beanmachine.ppl.utils.treeprinter as tp import black def _get_name(node: Any) -> str: if isinstance(node, list): return "list" if isinstance(node, AST): return type(node).__name__ return str(node) def print_tree(node: AST, unicode: bool = True) -> str: """Takes an AST and produces a string containing a hierarchical view of the tree structure.""" def get_children(node: Any) -> List[Any]: if isinstance(node, list): return node if isinstance(node, AST): return [child for (name, child) in ast.iter_fields(node)] return [] return tp.print_tree(node, get_children, _get_name, unicode) def print_graph(node: AST) -> str: """Takes an AST and produces a string containing a DOT representation of the tree as a graph.""" def get_children(node: Any) -> List[Tuple[str, Any]]: if isinstance(node, list): return [(str(i), a) for i, a in enumerate(node)] if isinstance(node, AST): return list(ast.iter_fields(node)) return [] return db.print_graph([node], get_children, None, _get_name) def print_python(node: AST) -> str: """Takes an AST and produces a string containing a human-readable Python expression that builds the AST node.""" return black.format_str(ast.dump(node), mode=black.FileMode())
beanmachine-main
src/beanmachine/ppl/compiler/ast_tools.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """A rules engine for tree transformation""" from abc import ABC, abstractmethod from typing import Any, Callable, Dict, Iterable, List, Tuple from beanmachine.ppl.compiler.patterns import ( anyPattern, failPattern, is_any, match, Pattern, to_pattern, ) # Logically, a rule is just a projection; it's a partial function from # any value to any other value. # # Since rules are partial functions -- they are allowed to reject their # argument and fail -- we will implement rules as classes with an apply # method that returns a success or failure code. # # There are a number of ways to construct rules; a basic way is to # provide a pattern -- a predicate on values -- and an action to take # if the pattern is matched successfully -- that is, a function from # values to values. # # Rules may be combined together with *rule combinators*; a combinator is # a function which takes one or more rules and produces a rule. _empty = {} class RuleResult(ABC): test: Any def __init__(self, test: Any) -> None: self.test = test @abstractmethod def is_success(self) -> bool: pass @abstractmethod def is_fail(self) -> bool: pass def __str__(self) -> str: return f"{type(self).__name__}:{self.test}" @abstractmethod def expect_success(self) -> Any: pass def __bool__(self) -> bool: return self.is_success() class Fail(RuleResult): def __init__(self, test: Any = None) -> None: RuleResult.__init__(self, test) def is_success(self) -> bool: return False def is_fail(self) -> bool: return True def expect_success(self) -> Any: raise ValueError("Expected success but rewrite rule patten match failed") class Success(RuleResult): result: Any def __init__(self, test: Any, result: Any) -> None: RuleResult.__init__(self, test) self.result = result def is_success(self) -> bool: return True def is_fail(self) -> bool: return False def expect_success(self) -> Any: return self.result class Rule(ABC): """A rule represents a partial function that transforms a value.""" name: str def __init__(self, name: str = "") -> None: self.name = name @abstractmethod def apply(self, test: Any) -> RuleResult: pass def __call__(self, test: Any) -> RuleResult: return self.apply(test) @abstractmethod def always_succeeds(self) -> bool: pass def _identity(x: Any) -> Any: return x class Trace(Rule): """This combinator introduces a side effect to be executed every time the child rule is executed, and when it succeeds or fails. It is useful for debugging.""" rule: Rule logger: Callable[[Rule, Any], None] def __init__(self, rule: Rule, logger: Callable[[Rule, Any], None]) -> None: Rule.__init__(self, rule.name) self.rule = rule self.logger = logger def apply(self, test: Any) -> RuleResult: self.logger(self.rule, None) result = self.rule(test) self.logger(self.rule, result) return result def __str__(self) -> str: return str(self.rule) def always_succeeds(self) -> bool: return self.rule.always_succeeds() def make_logger(log: List[str]) -> Callable[[Rule], Rule]: def logger(rule: Rule, value: Any) -> None: if value is None: log.append(f"Started {rule.name}") else: log.append(f"Finished {rule.name}") def trace(rule: Rule) -> Rule: return Trace(rule, logger) return trace class PatternRule(Rule): """If the test value matches the pattern, then the test value is passed to the projection and the rule succeeds. Otherwise, the rule fails.""" pattern: Pattern projection: Callable[[Any], Any] def __init__( self, pattern: Pattern, projection: Callable[[Any], Any] = _identity, name: str = "pattern", ) -> None: Rule.__init__(self, name) self.pattern = pattern self.projection = projection def apply(self, test: Any) -> RuleResult: match_result = match(self.pattern, test) if match_result.is_fail(): return Fail(test) result = self.projection(test) return Success(test, result) def __str__(self) -> str: return f"{self.name}( {str(to_pattern(self.pattern)) }" def always_succeeds(self) -> bool: return is_any(self.pattern) def projection_rule(projection: Callable[[Any], Any], name: str = "projection") -> Rule: return PatternRule(anyPattern, projection, name) # The identity rule is the rule that always succeeds, and the projection # is an identity function. identity: Rule = projection_rule(_identity, "identity") # The fail rule is the rule that never succeeds. fail: Rule = PatternRule(failPattern, _identity, "fail") # This rule succeeds if the test is a list. is_list: Rule = PatternRule(list, _identity, "is_list") def always_replace(value: Any, name: str = "always_replace") -> Rule: """always_replace(value) produces a rule that replaces anything with the given value. It always succeeds.""" return projection_rule(lambda x: value, name) def pattern_rules( pairs: List[Tuple[Pattern, Callable[[Any], Any]]], name: str = "pattern_rules" ) -> Rule: """Constructs a rule from a sequence of pairs of patterns and projections. Patterns are checked in order, and the first one that matches is used for the projection; if none match then the rule fails.""" rules = (PatternRule(pattern, action, name) for pattern, action in pairs) return FirstMatch(rules) _exception = [Exception] class IgnoreException(Rule): """Apply the given rule; if it throws an exception, the rule fails.""" rule: Rule expected: List[type] def __init__( self, rule: Rule, expected: List[type] = _exception, name: str = "handle" ) -> None: Rule.__init__(self, name) self.rule = rule self.expected = expected def apply(self, test: Any) -> RuleResult: try: return self.rule.apply(test) except Exception as x: if any(isinstance(x, t) for t in self.expected): return Fail(test) # We did not expect this exception; do not eat the bug. raise def __str__(self) -> str: r = str(self.rule) return f"ignore_exception( {r} )" def always_succeeds(self) -> bool: # Presumably you would not be wrapping a rule that never throws, # so let's assume that this can fail. return False def ignore_div_zero(rule: Rule) -> Rule: return IgnoreException(rule, [ZeroDivisionError], "ignore_div_zero") def ignore_runtime_error(rule: Rule) -> Rule: return IgnoreException(rule, [RuntimeError], "ignore_runtime_error") def ignore_value_error(rule: Rule) -> Rule: return IgnoreException(rule, [ValueError], "ignore_value_error") class Check(Rule): """Apply the given rule; if it fails, fail. If it succeeds, the result is the original test value, not the transformed value. This is useful for scenarios where we wish to know if a particular thing is true of a node before we apply an expensive rule to it.""" rule: Rule def __init__(self, rule: Rule, name: str = "check") -> None: Rule.__init__(self, name) self.rule = rule def apply(self, test: Any) -> RuleResult: rule_result = self.rule.apply(test) if rule_result.is_success(): return Success(test, test) return rule_result def __str__(self) -> str: r = str(self.rule) return f"check( {r} )" def always_succeeds(self) -> bool: # Note that it is strange to have a Check which always succeeds # because that is the same as the identity rule. # TODO: Consider implementing some sort of warning for this case? return self.rule.always_succeeds() class Choose(Rule): """Apply the condition rule to the test. If it succeeds, apply the rule in the consequence to its output. If it fails, apply the rule in the alternative to the test. That is, Choose(a, b, c)(test) has the semantics of if a(test) then b(a(test)) else c(test)""" condition: Rule consequence: Rule alternative: Rule def __init__( self, condition: Rule, consequence: Rule, alternative: Rule, name: str = "choose", ) -> None: Rule.__init__(self, name) self.condition = condition self.consequence = consequence self.alternative = alternative def apply(self, test: Any) -> RuleResult: rule_result = self.condition.apply(test) if isinstance(rule_result, Success): return self.consequence.apply(rule_result.result) return self.alternative.apply(test) def __str__(self) -> str: a = str(self.condition) b = str(self.consequence) c = str(self.alternative) return f"choose( {a}, {b}, {c} )" def always_succeeds(self) -> bool: if self.condition.always_succeeds(): return self.consequence.always_succeeds() return self.consequence.always_succeeds() and self.alternative.always_succeeds() def if_then(condition: Rule, consequence: Rule, alternative: Rule = identity) -> Rule: """Apply the condition rule, then apply the original test to either the consequence or the alternative, depending on whether the condition succeeded or failed. Note that this is different than Choose. Choose applies the condition to the result of the condition, not to the original test.""" return Choose(Check(condition), consequence, alternative) class Compose(Rule): """Apply the first rule to the test. If it succeeds, apply the second rule to its output. That is, Compose(a, b)(test) has the semantics of if a(test) then b(a(test)) else fail""" # Compose could be implemented as Choose(a, b, fail), but for debugging # purposes it is better to explicitly implement it. first: Rule second: Rule def __init__(self, first: Rule, second: Rule, name: str = "compose") -> None: Rule.__init__(self, name) self.first = first self.second = second def apply(self, test: Any) -> RuleResult: rule_result = self.first.apply(test) if isinstance(rule_result, Success): return self.second.apply(rule_result.result) return rule_result def __str__(self) -> str: a = str(self.first) b = str(self.second) return f"compose( {a}, {b} )" def always_succeeds(self) -> bool: return self.first.always_succeeds() and self.second.always_succeeds() class Recursive(Rule): """Delay construction of a rule until we need it, so as to avoid recursion.""" rule_maker: Callable[[], Rule] def __init__(self, rule_maker: Callable[[], Rule], name: str = "recursive") -> None: Rule.__init__(self, name) self.rule_maker = rule_maker def apply(self, test: Any) -> RuleResult: return self.rule_maker().apply(test) def __str__(self) -> str: return self.name def always_succeeds(self) -> bool: return False class OrElse(Rule): """Apply the first rule to the test. If it succeeds, use that result. If it fails, apply the second rule to the test and return that.""" # OrElse could be implemented as Choose(first, identity, second), but for debugging # purposes it is better to explicitly implement it. first: Rule second: Rule def __init__(self, first: Rule, second: Rule, name: str = "or_else") -> None: Rule.__init__(self, name) self.first = first self.second = second def apply(self, test: Any) -> RuleResult: rule_result = self.first.apply(test) if isinstance(rule_result, Success): return rule_result return self.second.apply(test) def __str__(self) -> str: a = str(self.first) b = str(self.second) return f"or_else( {a}, {b} )" def always_succeeds(self) -> bool: return self.first.always_succeeds() or self.second.always_succeeds() class FirstMatch(Rule): """Apply each rule to the test until one succeeds; if none succeed, then fail.""" # FirstMatch([a,b,c]) could be implemented as OrElse(a, OrElse(b, c)) but for # debugging purposes it is better to explicitly implement it. rules: List[Rule] def __init__(self, rules: Iterable[Rule], name: str = "first_match") -> None: Rule.__init__(self, name) self.rules = list(rules) def apply(self, test: Any) -> RuleResult: for rule in self.rules: rule_result = rule.apply(test) if isinstance(rule_result, Success): return rule_result return Fail(test) def __str__(self) -> str: rs = ", ".join(str(rule) for rule in self.rules) return f"first_match( {rs} )" def always_succeeds(self) -> bool: return any(r.always_succeeds() for r in self.rules) class TryOnce(Rule): """Apply the rule to the test. If it succeeds, use that result. If it fails, use the test as the result and succeed. This rule always succeeds.""" # TryOnce could be implemented as OrElse(rule, identity), but for debugging # purposes it is better to explicitly implement it. rule: Rule def __init__(self, rule: Rule, name: str = "try_once") -> None: Rule.__init__(self, name) self.rule = rule def apply(self, test: Any) -> RuleResult: rule_result = self.rule.apply(test) if isinstance(rule_result, Success): return rule_result return Success(test, test) def __str__(self) -> str: return f"try_once( {str(self.rule)} )" def always_succeeds(self) -> bool: return True def either_or_both(first: Rule, second: Rule, name: str = "either_or_both") -> Rule: """Do the first rule; if it succeeds, try doing the second rule, but do not worry if it fails. If the first rule fails, do the second rule. The net effect is, either first, or second, or first-then-second happens, or both fail.""" return Choose(first, TryOnce(second), second, name) class SomeOf(Rule): # This is logically the extension of either-or-both to arbitrarily many rules. """Takes a list of rules and composes together as many of them as succeed. At least one must succeed, otherwise the rule fails.""" rules: List[Rule] def __init__(self, rules: List[Rule], name: str = "some_of") -> None: Rule.__init__(self, name) self.rules = rules def apply(self, test: Any) -> RuleResult: result = Fail() current_test = test for current_rule in self.rules: current_result = current_rule.apply(current_test) # If we succeeded, this becomes the input to the next rule. # If we failed, just ignore it and try the next rule. if current_result.is_success(): current_test = current_result.expect_success() result = current_result if result.is_success(): return Success(test, result.expect_success()) return Fail(test) def __str__(self) -> str: rs = ",".join(str(r) for r in self.rules) return f"some_of( {rs} )" def always_succeeds(self) -> bool: return any(r.always_succeeds() for r in self.rules) class AllOf(Rule): # This is logically the extension of composition to arbitrarily many rules. """Takes a list of rules and composes together all of them. All must succeed, otherwise the rule fails.""" rules: List[Rule] def __init__(self, rules: List[Rule], name: str = "all_of") -> None: Rule.__init__(self, name) self.rules = rules def apply(self, test: Any) -> RuleResult: current_test = test result = Success(test, test) for current_rule in self.rules: result = current_rule.apply(current_test) if result.is_fail(): return Fail(test) current_test = result.expect_success() return Success(test, result.expect_success()) def __str__(self) -> str: rs = ",".join(str(r) for r in self.rules) return f"all_of( {rs} )" def always_succeeds(self) -> bool: return all(r.always_succeeds() for r in self.rules) class TryMany(Rule): """Repeatedly apply a rule; the result is that of the last application that succeeded, or the original test if none succeeded. This rule always succeeds.""" # TryMany could be implemented as TryOnce(Compose(rule, Recursive(TryMany(rule)))) # but for debugging purposes it is better to explicitly implement it. rule: Rule def __init__(self, rule: Rule, name: str = "try_many") -> None: Rule.__init__(self, name) self.rule = rule if rule.always_succeeds(): raise ValueError( "TryMany has been given a rule that always succeeds," + " which will cause an infinite loop." ) def apply(self, test: Any) -> RuleResult: current: Success = Success(test, test) while True: rule_result = self.rule.apply(current.result) if isinstance(rule_result, Success): current = rule_result else: return current def __str__(self) -> str: return f"try_many( {str(self.rule)} )" def always_succeeds(self) -> bool: return True def at_least_once(rule: Rule) -> Rule: """Try a rule once; if it fails, fail. If it succeeds, try it again as many times as it keeps succeeding.""" return Compose(rule, TryMany(rule)) class ListEdit: """Consider a rule which descends through an AST looking for a particular statement to replace. If the rule replaces a particular statement with another statement, we can express that with a straightforward rule that succeeds and produces the new statement. But how can we represent rules that either delete a statement (that is, replace it with nothing) or replace it with more than one statement? To express this concept, a rule should succeed and return a ListEdit([s1, s2...]) where the list contains the replacements; if the list is empty then the element is deleted.""" edits: List[Any] def __init__(self, edits: List[Any]) -> None: self.edits = edits remove_from_list = ListEdit([]) def _expand_edits(items: Iterable[Any]) -> Iterable[Any]: """Suppose we have a list [X, Y, Z] and we wish to replace Y with A, B. We will produce the sequence [X, ListEdit([A, B], Z)]; this function expands the ListEdit structure and splices the elements into the list, producing [X, A, B, Z].""" for item in items: if isinstance(item, ListEdit): for expanded in _expand_edits(item.edits): yield expanded else: yield item def _list_unchanged(xs: List[Any], ys: List[Any]) -> bool: if xs is ys: return True # When we do a rewrite step that produces no change, we try to # guarantee that the "rewritten" value is reference identical to # the original value. We can therefore take advantage of this # fact when comparing two reference-unequal lists. Using normal # structural equality on lists verifies that each member has the # same structure as the corresponding member, but we can be faster # than that by verifying that each member is reference equal; if # any member is reference-unequal then something was rewritten # to a different value. if len(xs) != len(ys): return False return all(x is y for x, y in zip(xs, ys)) class AllListMembers(Rule): """Apply a rule to all members. Succeeds if the rule succeeds for all members, and returns a list with the members replaced with the new values. Otherwise, fails.""" rule: Rule def __init__(self, rule: Rule, name: str = "all_list_members") -> None: Rule.__init__(self, name) self.rule = rule def apply(self, test: Any) -> RuleResult: # Easy outs: if not isinstance(test, list): return Fail(test) if len(test) == 0: return Success(test, test) results = [self.rule.apply(child) for child in test] # Were there any failures? if any(result.is_fail() for result in results): return Fail(test) # Splice in any list edits. new_values = list(_expand_edits(result.expect_success() for result in results)) # Is the resulting list different? If not, make sure the result is # reference equal. if _list_unchanged(new_values, test): return Success(test, test) # Everything succeeded but there was at least one different value. return Success(test, new_values) def __str__(self) -> str: return f"all_list_members( {str(self.rule)} )" def always_succeeds(self) -> bool: return False # We have n-ary operators that have both "term" children and "list member" # children; for example Compare(left, ops, comps) has a left child and then # a list of arbitrarily many other operands in comps. When applying a rule to # all children normally Compare is considered to have 3 children and we # apply the rule to each, but it is convenient to apply the rule to all the # logical children -- the ops, the comps, and left -- rather than to the lists # themselves. This combinator enables that scenario. # TODO: always_succeeds will be wrong for the returned object. def list_member_children(rule: Rule) -> Rule: return if_then(is_list, AllListMembers(rule), rule) class AllListEditMembers(Rule): """Rules which are intended to modify a parent list by adding or removing items return a ListEdit([...]) object, but in cases where a rule then recursese upon children -- like top_down -- we'll potentially need to rewrite the elements in edit list. This combinator implements that.""" # The implementation strategy here is to just defer to AllListMembers for # the heavy lifting. rule: AllListMembers def __init__(self, rule: Rule, name: str = "all_list_edit_members") -> None: Rule.__init__(self, name) self.rule = AllListMembers(rule) def apply(self, test: Any) -> RuleResult: if not isinstance(test, ListEdit): return Fail(test) result = self.rule(test.edits) if result.is_fail(): return Fail(test) new_values = result.expect_success() if new_values is test.edits: return Success(test, test) return Success(test, ListEdit(new_values)) def __str__(self) -> str: return f"all_list_edit_members( {str(self.rule)} )" def always_succeeds(self) -> bool: return False class AllTermChildren(Rule): """Apply a rule to all children. Succeeds if the rule succeeds for all children, and returns a constructed object with the children replaced with the new values. Otherwise, fails.""" get_children: Callable[[Any], Dict[str, Any]] construct: Callable[[type, Dict[str, Any]], Any] rule: Rule def __init__( self, rule: Rule, get_children: Callable[[Any], Dict[str, Any]], construct: Callable[[type, Dict[str, Any]], Any], name: str = "all_children", ) -> None: Rule.__init__(self, name) self.rule = rule self.get_children = get_children self.construct = construct def apply(self, test: Any) -> RuleResult: children = self.get_children(test) # Easy out for leaves. if len(children) == 0: return Success(test, test) results = { child_name: self.rule.apply(child_value) for child_name, child_value in children.items() } # Were there any failures? if any(result.is_fail() for result in results.values()): return Fail(test) # Were there any successes that returned a different value? new_values = {n: results[n].expect_success() for n in results} if all(new_values[n] is children[n] for n in new_values): # Everything succeeded and there were no changes. return Success(test, test) # Everything succeeded but there was at least one different value. # Construct a new object. return Success(test, self.construct(type(test), new_values)) def __str__(self) -> str: return f"all_term_children( {str(self.rule)} )" def always_succeeds(self) -> bool: return self.rule.always_succeeds() # The wrapper is just to ensure that always_succeeds has the right semantics. class AllChildren(Rule): """Apply a rule to all children or list members.""" rule: Rule combined_rule: Rule def __init__( self, rule: Rule, get_children: Callable[[Any], Dict[str, Any]], construct: Callable[[type, Dict[str, Any]], Any], name: str = "all_children", ) -> None: Rule.__init__(self, name) self.rule = rule self.combined_rule = FirstMatch( [ AllListMembers(rule), AllListEditMembers(rule), AllTermChildren(rule, get_children, construct), ] ) def apply(self, test: Any) -> RuleResult: return self.combined_rule(test) def __str__(self) -> str: return f"all_children( {str(self.rule)} )" def always_succeeds(self) -> bool: return self.rule.always_succeeds() class SomeListMembers(Rule): """Apply a rule to all members. Succeeds if the rule succeeds for one or more members, and returns a list with the children replaced with the new values. Otherwise, fails.""" rule: Rule def __init__(self, rule: Rule, name: str = "some_list_members") -> None: Rule.__init__(self, name) self.rule = rule def apply(self, test: Any) -> RuleResult: # Easy outs: if not isinstance(test, list): return Fail(test) if len(test) == 0: return Fail(test) results = [self.rule.apply(child) for child in test] # Were there any successes? if not any(result.is_success() for result in results): return Fail(test) # Splice in any list edits. new_values = list( _expand_edits( result.expect_success() if result.is_success() else result.test for result in results ) ) # Were there any successes that returned a different value? if _list_unchanged(new_values, test): return Success(test, test) # Everything succeeded but there was at least one different value. return Success(test, new_values) def __str__(self) -> str: return f"some_list_members( {str(self.rule)} )" def always_succeeds(self) -> bool: return False class SomeChildren(Rule): """Apply a rule to all children. Succeeds if the rule succeeds for one or more children, and returns a constructed object with the children replaced with the new values. Otherwise, fails.""" get_children: Callable[[Any], Dict[str, Any]] construct: Callable[[type, Dict[str, Any]], Any] rule: Rule def __init__( self, rule: Rule, get_children: Callable[[Any], Dict[str, Any]], construct: Callable[[type, Dict[str, Any]], Any], name: str = "some_children", ) -> None: Rule.__init__(self, name) self.rule = rule self.get_children = get_children self.construct = construct def apply(self, test: Any) -> RuleResult: children = self.get_children(test) # Easy out for leaves. if len(children) == 0: return Fail(test) results = { child_name: self.rule.apply(child_value) for child_name, child_value in children.items() } # Were there any successes? if not any(result.is_success() for result in results.values()): return Fail(test) # Were there any successes that returned a different value? new_values = { n: results[n].expect_success() if results[n].is_success() else results[n].test for n in results } if all(new_values[n] is children[n] for n in new_values): # Everything succeeded and there were no changes. return Success(test, test) # Everything succeeded but there was at least one different value. # Construct a new object. return Success(test, self.construct(type(test), new_values)) def __str__(self) -> str: return f"some_children( {str(self.rule)} )" def always_succeeds(self) -> bool: # Fails if there are no children return False class OneListMember(Rule): """Apply a rule to all members until the first success. Succeeds if it finds one success and returns a list with the child replaced with the new value. Otherwise, fails.""" rule: Rule def __init__(self, rule: Rule, name: str = "one_child") -> None: Rule.__init__(self, name) self.rule = rule def apply(self, test: Any) -> RuleResult: if not isinstance(test, list): return Fail(test) for i, child in enumerate(test): result = self.rule.apply(child) if result.is_success(): new_value = result.expect_success() if new_value is child: return Success(test, test) new_values = test.copy() new_values[i] = new_value new_values = list(_expand_edits(new_values)) if _list_unchanged(new_values, test): return Success(test, test) return Success(test, new_values) return Fail(test) def __str__(self) -> str: return f"one_list_member( {str(self.rule)} )" def always_succeeds(self) -> bool: return False class OneChild(Rule): """Apply a rule to all children until the first success. Succeeds if it finds one success and returns a constructed object with the child replaced with the new value. Otherwise, fails.""" get_children: Callable[[Any], Dict[str, Any]] construct: Callable[[type, Dict[str, Any]], Any] rule: Rule def __init__( self, rule: Rule, get_children: Callable[[Any], Dict[str, Any]], construct: Callable[[type, Dict[str, Any]], Any], name: str = "one_child", ) -> None: Rule.__init__(self, name) self.rule = rule self.get_children = get_children self.construct = construct def apply(self, test: Any) -> RuleResult: children = self.get_children(test) for child_name, child_value in children.items(): result = self.rule.apply(child_value) if result.is_success(): new_value = result.expect_success() if new_value is child_value: return Success(test, test) new_values = children.copy() new_values[child_name] = child_value return Success(test, self.construct(type(test), new_values)) return Fail(test) def __str__(self) -> str: return f"one_child( {str(self.rule)} )" def always_succeeds(self) -> bool: return False class SpecificChild(Rule): """Apply a rule to a specific child. If it succeeds, replace the child with the new value; otherwise, fail. The child is required to exist.""" get_children: Callable[[Any], Dict[str, Any]] construct: Callable[[type, Dict[str, Any]], Any] child: str rule: Rule def __init__( self, child: str, rule: Rule, get_children: Callable[[Any], Dict[str, Any]], construct: Callable[[type, Dict[str, Any]], Any], name: str = "specific_child", ) -> None: Rule.__init__(self, name) self.rule = rule self.get_children = get_children self.construct = construct self.child = child def apply(self, test: Any) -> RuleResult: children = self.get_children(test) assert self.child in children value = children[self.child] result = self.rule.apply(value) if result.is_fail(): return Fail(test) new_value = result.expect_success() if new_value is value: return Success(test, test) new_values = children.copy() new_values[self.child] = new_value return Success(test, self.construct(type(test), new_values)) def __str__(self) -> str: return f"specific_child( {self.child}, {str(self.rule)} )" def always_succeeds(self) -> bool: return self.rule.always_succeeds() class RuleDomain: get_children: Callable[[Any], Dict[str, Any]] construct: Callable[[type, Dict[str, Any]], Any] def __init__( self, get_children: Callable[[Any], Dict[str, Any]], construct: Callable[[type, Dict[str, Any]], Any], ) -> None: self.get_children = get_children self.construct = construct def all_children(self, rule: Rule, name: str = "all_children") -> Rule: return AllChildren(rule, self.get_children, self.construct, name) def some_children(self, rule: Rule, name: str = "some_children") -> Rule: return if_then( is_list, SomeListMembers(rule), SomeChildren(rule, self.get_children, self.construct, name), ) def one_child(self, rule: Rule, name: str = "one_child") -> Rule: return if_then( is_list, OneListMember(rule), OneChild(rule, self.get_children, self.construct, name), ) def specific_child( self, child: str, rule: Rule, name: str = "specific_child" ) -> Rule: """Apply a rule to a specific child. If it succeeds, replace the child with the new value; otherwise, fail. The child is required to exist.""" return SpecificChild(child, rule, self.get_children, self.construct, name) # CONSIDER: Should we implement a class for bottom-up traversal, so that # CONSIDER: there is a place to put a breakpoint, and so on? def bottom_up(self, rule: Rule, name: str = "bottom_up") -> Rule: """The bottom-up combinator applies a rule to all leaves, then to the rewritten parent, and so on up to the root.""" return Compose( self.all_children(Recursive(lambda: self.bottom_up(rule, name))), rule ) # CONSIDER: Similarly. def top_down(self, rule: Rule, name: str = "top_down") -> Rule: """The top-down combinator applies a rule to the root, then to the new root's children, and so on down to the leaves. It succeeds iff the rule succeeds on every node.""" return Compose( rule, self.all_children(Recursive(lambda: self.top_down(rule, name))), name ) def some_top_down(self, rule: Rule, name: str = "some_top_down") -> Rule: """The some-top-down combinator is like top_down, in that it applies a rule to every node in the tree starting from the top. However, top_down requires that the rule succeed for all nodes in the tree; some_top_down applies the rule to as many nodes in the tree as possible, leaves alone nodes for which it fails (aside from possibly rewriting the children), and fails only if the rule applied to no node.""" # This combinator is particularly useful because it ensures that # either progress is made, or the rule fails, and it makes as much # progress as possible on each attempt. # # Note that many(some_top_down(rule)) is a fixpoint combinator. return either_or_both( rule, self.some_children(Recursive(lambda: self.some_top_down(rule, name)), name), ) def some_bottom_up(self, rule: Rule, name: str = "some_bottom_up") -> Rule: """The some-bottom-up combinator is like bottom_up, in that it applies a rule to every node in the tree starting from the leaves. However, bottom_up requires that the rule succeed for all nodes in the tree; some_bottom_up applies the rule to as many nodes in the tree as possible, leaves alone nodes for which it fails, and fails only if the rule applied to no node.""" # This combinator is particularly useful because it ensures that # either progress is made, or the rule fails, and it makes as much # progress as possible on each attempt. # # Note that many(some_bottom_up(rule)) is a fixpoint combinator. return either_or_both( self.some_children( Recursive(lambda: self.some_bottom_up(rule, name)), name ), rule, ) # CONSIDER: Similarly. def down_then_up( self, pre_rule: Rule, post_rule: Rule, name: str = "down_then_up" ) -> Rule: """The down-then-up combinator is a combination of the bottom-up and top-down combinators; it applies the 'pre' rule in a top-down traversal and then the 'post' rule on the way back up.""" return Compose( Compose( pre_rule, self.all_children( Recursive(lambda: self.down_then_up(pre_rule, post_rule, name)) ), ), post_rule, ) def descend_until( self, test: Rule, rule: Rule, name: str = "descend_until" ) -> Rule: """descend_until starts at the top of the tree and descends down it checking every node to see if "test" succeeds. If it does, it stops descending and runs "rule" on that node. It does this on every node that meets the test starting from the root.""" return if_then( test, rule, self.all_children(Recursive(lambda: self.descend_until(test, rule))), )
beanmachine-main
src/beanmachine/ppl/compiler/rules.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import beanmachine.ppl.compiler.bmg_nodes as bn from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.fix_problem import ( Inapplicable, node_fixer_first_match, NodeFixer, NodeFixerResult, type_guard, ) from beanmachine.ppl.compiler.lattice_typer import LatticeTyper class BoolArithmeticFixer: _bmg: BMGraphBuilder _typer: LatticeTyper def __init__(self, bmg: BMGraphBuilder, typer: LatticeTyper) -> None: self._bmg = bmg self._typer = typer def _fix_multiplication(self, n: bn.MultiplicationNode) -> NodeFixerResult: # We can simplify 1*anything, 0*anything or bool*anything # to anything, 0, or an if-then-else respectively. # TODO: We could extend this to multiary multiplication. if len(n.inputs) != 2: return Inapplicable if bn.is_zero(n.inputs[0]): return n.inputs[0] if bn.is_one(n.inputs[0]): return n.inputs[1] if bn.is_zero(n.inputs[1]): return n.inputs[1] if bn.is_one(n.inputs[1]): return n.inputs[0] if self._typer.is_bool(n.inputs[0]): zero = self._bmg.add_constant(0.0) return self._bmg.add_if_then_else(n.inputs[0], n.inputs[1], zero) if self._typer.is_bool(n.inputs[1]): zero = self._bmg.add_constant(0.0) return self._bmg.add_if_then_else(n.inputs[1], n.inputs[0], zero) return Inapplicable def _fix_addition(self, n: bn.AdditionNode) -> NodeFixerResult: # We can simplify 0+anything. # TODO: We could extend this to multiary addition. if len(n.inputs) != 2: return Inapplicable if bn.is_zero(n.inputs[0]): return n.inputs[1] if bn.is_zero(n.inputs[1]): return n.inputs[0] return Inapplicable def _fix_power(self, n: bn.PowerNode) -> NodeFixerResult: # x ** b --> if b then x else 1 if self._typer.is_bool(n.right): one = self._bmg.add_constant(1.0) return self._bmg.add_if_then_else(n.right, n.left, one) return Inapplicable def bool_arithmetic_fixer(bmg: BMGraphBuilder, typer: LatticeTyper) -> NodeFixer: baf = BoolArithmeticFixer(bmg, typer) return node_fixer_first_match( [ type_guard(bn.AdditionNode, baf._fix_addition), type_guard(bn.MultiplicationNode, baf._fix_multiplication), type_guard(bn.PowerNode, baf._fix_power), ] )
beanmachine-main
src/beanmachine/ppl/compiler/fix_bool_arithmetic.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import defaultdict from typing import Dict, List from beanmachine.ppl.compiler import bmg_nodes as bn from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.fix_problems import fix_problems from beanmachine.ppl.compiler.internal_error import InternalError _node_type_to_distribution = { bn.BernoulliNode: "torch.distributions.Bernoulli", bn.BetaNode: "torch.distributions.Beta", bn.NormalNode: "torch.distributions.Normal", } _node_type_to_operator = { bn.MultiplicationNode: "torch.multiply", bn.AdditionNode: "torch.add", bn.DivisionNode: "torch.div", bn.ToRealNode: "", } class ToBMPython: code: str _code: List[str] bmg: BMGraphBuilder node_to_var_id: Dict[bn.BMGNode, int] node_to_func_id: Dict[bn.BMGNode, int] dist_to_rv_id: Dict[bn.BMGNode, int] no_dist_samples: Dict[bn.BMGNode, int] queries: List[str] observations: List[str] def __init__(self, bmg: BMGraphBuilder) -> None: self.code = "" self._code = [ "import beanmachine.ppl as bm", "import torch", ] self.bmg = bmg self.node_to_var_id = {} self.node_to_func_id = {} self.dist_to_rv_id = {} self.no_dist_samples = defaultdict(lambda: 0) self.queries = [] self.observations = [] def _get_node_id_mapping(self, node: bn.BMGNode) -> str: if node in self.node_to_var_id: return f"v{self.node_to_var_id[node]}" elif node in self.node_to_func_id: return f"f{self.node_to_func_id[node]}()" else: raise InternalError("Unsupported node type {node}") def _get_id(self) -> int: return len(self.node_to_var_id) + len(self.node_to_func_id) def _no_dist_samples(self, node: bn.DistributionNode) -> int: return sum(isinstance(o, bn.SampleNode) for o in node.outputs.items) def _inputs(self, node: bn.BMGNode) -> str: input_seq = [] for x in node.inputs: if isinstance(x, bn.SampleNode): input_seq.append( f"{self._get_node_id_mapping(x)}.wrapper(*{self._get_node_id_mapping(x)}.arguments)" ) else: input_seq.append(self._get_node_id_mapping(x)) inputs = ", ".join(input_seq) return inputs def _add_constant(self, node: bn.ConstantNode) -> None: var_id = self._get_id() self.node_to_var_id[node] = var_id t = type(node) v = node.value if ( t is bn.PositiveRealNode or t is bn.NegativeRealNode or t is bn.ProbabilityNode or t is bn.RealNode ): f = str(float(v)) elif t is bn.NaturalNode: f = str(int(v)) else: f = str(float(v)) self._code.append(f"v{var_id} = {f}") def _add_distribution(self, node: bn.DistributionNode) -> None: distr_type = _node_type_to_distribution[type(node)] # pyre-ignore i = self._inputs(node) no_dist_samples = self._no_dist_samples(node) rv_id = len(self.dist_to_rv_id) self.dist_to_rv_id[node] = rv_id if no_dist_samples > 1: param = "i" else: param = "" self._code.append( f"@bm.random_variable\ndef rv{rv_id}({param}):\n\treturn {distr_type}({i})" ) def _add_operator(self, node: bn.OperatorNode) -> None: var_id = self._get_id() operator_type = _node_type_to_operator[type(node)] # pyre-ignore i = self._inputs(node) has_samples = any(isinstance(x, bn.SampleNode) for x in node.inputs) if has_samples: self.node_to_func_id[node] = var_id self._code.append( f"@bm.functional\ndef f{var_id}():\n\treturn {operator_type}({i})" ) else: self.node_to_var_id[node] = var_id self._code.append(f"v{var_id} = {operator_type}({i})") def _add_sample(self, node: bn.SampleNode) -> None: var_id = self._get_id() self.node_to_var_id[node] = var_id rv_id = self.dist_to_rv_id[node.operand] self.no_dist_samples[node.operand] += 1 total_samples = self._no_dist_samples(node.operand) if total_samples > 1: param = f"{self.no_dist_samples[node.operand]}" else: param = "" self._code.append(f"v{var_id} = rv{rv_id}({param})") def _add_query(self, node: bn.Query) -> None: self.queries.append(f"{self._get_node_id_mapping(node.operator)}") def _add_observation(self, node: bn.Observation) -> None: val = node.value # We need this cast since BMG requires boolean observations to be True/False # TODO: Implement selective graph fixing depending on the backend if isinstance(val, bool): val = float(val) self.observations.append( f"{self._get_node_id_mapping(node.observed)} : torch.tensor({val})" ) def _generate_python(self, node: bn.BMGNode) -> None: if isinstance(node, bn.ConstantNode): self._add_constant(node) elif isinstance(node, bn.DistributionNode): self._add_distribution(node) elif isinstance(node, bn.SampleNode): self._add_sample(node) elif isinstance(node, bn.OperatorNode): self._add_operator(node) elif isinstance(node, bn.Query): self._add_query(node) elif isinstance(node, bn.Observation): self._add_observation(node) def _generate_bm_python(self) -> str: bmg, error_report = fix_problems(self.bmg) self.bmg = bmg error_report.raise_errors() for node in self.bmg.all_ancestor_nodes(): self._generate_python(node) self._code.append(f"queries = [{(','.join(self.queries))}]") self._code.append(f"observations = {{{','.join(self.observations)}}}") self.code = "\n".join(self._code) return self.code def to_bm_python(bmg: BMGraphBuilder) -> str: bmp = ToBMPython(bmg) return bmp._generate_bm_python()
beanmachine-main
src/beanmachine/ppl/compiler/gen_bm_python.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections import typing from typing import Callable, Dict, List, Optional, Type import beanmachine.ppl.compiler.bmg_nodes as bn from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.error_report import ErrorReport from beanmachine.ppl.compiler.execution_context import ExecutionContext, FunctionCall from beanmachine.ppl.compiler.sizer import Sizer TransformAssessment = collections.namedtuple( "TransformAssessment", ["node_needs_transform", "error_report"] ) def flatten( inputs: List[Optional[typing.Union[bn.BMGNode, List[bn.BMGNode]]]] ) -> List[bn.BMGNode]: parents = [] for input in inputs: if input is None: continue if isinstance(input, List): for i in input: parents.append(i) else: parents.append(input) return parents class Cloner: def __init__(self, original: BMGraphBuilder): self.bmg_original = original self.bmg = BMGraphBuilder(ExecutionContext()) self.bmg._fix_observe_true = self.bmg_original._fix_observe_true self.sizer = Sizer() self.node_factories = _node_factories(self.bmg) self.value_factories = _constant_factories(self.bmg) self.copy_context = {} def clone(self, original: bn.BMGNode, parents: List[bn.BMGNode]) -> bn.BMGNode: if self.value_factories.__contains__(type(original)): if isinstance(original, bn.ConstantNode): image = self.value_factories[type(original)](original.value) else: raise ValueError( f"Internal compiler error. The type f{type(original)} should not be in the value factory because it does not have a value attribute" ) elif isinstance(original, bn.Observation): assert len(parents) == 1 sample = parents[0] if isinstance(sample, bn.SampleNode): image = self.bmg.add_observation(sample, original.value) else: raise ValueError("observations must have a sample operand") elif isinstance(original, bn.Query): assert len(parents) == 1 return self.bmg.add_query(parents[0], original.rv_identifier) elif isinstance(original, bn.TensorNode): image = self.bmg.add_tensor(self.sizer[original], *parents) else: image = self.node_factories[type(original)](*parents) locations = self.bmg_original.execution_context.node_locations(original) for site in locations: new_args = [] for arg in site.args: if self.copy_context.__contains__(arg): new_args.append(self.copy_context[arg]) else: # TODO: error out instead? it's possible that multiple nodes replace a single node new_args.append(arg) new_site = FunctionCall(site.func, new_args, {}) self.bmg.execution_context.record_node_call(image, new_site) self.copy_context[original] = image return image def _node_factories(bmg: BMGraphBuilder) -> Dict[Type, Callable]: return { bn.BernoulliLogitNode: bmg.add_bernoulli_logit, bn.BernoulliNode: bmg.add_bernoulli, bn.BetaNode: bmg.add_beta, bn.BinomialNode: bmg.add_binomial, bn.BinomialLogitNode: bmg.add_binomial_logit, bn.CategoricalNode: bmg.add_categorical, bn.CategoricalLogitNode: bmg.add_categorical_logit, bn.Chi2Node: bmg.add_chi2, bn.DirichletNode: bmg.add_dirichlet, bn.GammaNode: bmg.add_gamma, bn.HalfCauchyNode: bmg.add_halfcauchy, bn.HalfNormalNode: bmg.add_halfnormal, bn.NormalNode: bmg.add_normal, bn.PoissonNode: bmg.add_poisson, bn.StudentTNode: bmg.add_studentt, bn.UniformNode: bmg.add_uniform, bn.AdditionNode: bmg.add_addition, bn.BitAndNode: bmg.add_bitand, bn.BitOrNode: bmg.add_bitor, bn.BitXorNode: bmg.add_bitxor, bn.BroadcastNode: bmg.add_broadcast, bn.CholeskyNode: bmg.add_cholesky, bn.ColumnIndexNode: bmg.add_column_index, bn.ComplementNode: bmg.add_complement, bn.DivisionNode: bmg.add_division, bn.ElementwiseMultiplyNode: bmg.add_elementwise_multiplication, bn.EqualNode: bmg.add_equal, bn.Exp2Node: bmg.add_exp2, bn.ExpNode: bmg.add_exp, bn.ExpM1Node: bmg.add_expm1, bn.ExpProductFactorNode: bmg.add_exp_product, bn.FillMatrixNode: bmg.add_fill_matrix, bn.GreaterThanNode: bmg.add_greater_than, bn.GreaterThanEqualNode: bmg.add_greater_than_equal, bn.IfThenElseNode: bmg.add_if_then_else, bn.IsNode: bmg.add_is, bn.IsNotNode: bmg.add_is_not, bn.ItemNode: bmg.add_item, bn.IndexNode: bmg.add_index, bn.InNode: bmg.add_in, bn.InvertNode: bmg.add_invert, bn.LessThanNode: bmg.add_less_than, bn.LessThanEqualNode: bmg.add_less_than_equal, bn.LKJCholeskyNode: bmg.add_lkj_cholesky, bn.LogAddExpNode: bmg.add_logaddexp, bn.LogisticNode: bmg.add_logistic, bn.Log10Node: bmg.add_log10, bn.Log1pNode: bmg.add_log1p, bn.Log2Node: bmg.add_log2, bn.Log1mexpNode: bmg.add_log1mexp, bn.LogSumExpVectorNode: bmg.add_logsumexp_vector, bn.LogProbNode: bmg.add_log_prob, bn.LogNode: bmg.add_log, bn.LogSumExpTorchNode: bmg.add_logsumexp_torch, bn.LShiftNode: bmg.add_lshift, bn.MatrixAddNode: bmg.add_matrix_addition, bn.MatrixComplementNode: bmg.add_matrix_complement, bn.MatrixExpNode: bmg.add_matrix_exp, bn.MatrixLogNode: bmg.add_matrix_log, bn.MatrixLog1mexpNode: bmg.add_matrix_log1mexp, bn.MatrixMultiplicationNode: bmg.add_matrix_multiplication, bn.MatrixNegateNode: bmg.add_matrix_negate, bn.MatrixPhiNode: bmg.add_matrix_phi, bn.MatrixScaleNode: bmg.add_matrix_scale, bn.MatrixSumNode: bmg.add_matrix_sum, bn.ModNode: bmg.add_mod, bn.MultiplicationNode: bmg.add_multiplication, bn.NegateNode: bmg.add_negate, bn.NotEqualNode: bmg.add_not_equal, bn.NotNode: bmg.add_not, bn.NotInNode: bmg.add_not_in, bn.PhiNode: bmg.add_phi, bn.PowerNode: bmg.add_power, bn.RShiftNode: bmg.add_rshift, bn.SampleNode: bmg.add_sample, bn.SquareRootNode: bmg.add_squareroot, bn.SwitchNode: bmg.add_switch, bn.SumNode: bmg.add_sum, bn.ToMatrixNode: bmg.add_to_matrix, bn.ToNegativeRealMatrixNode: bmg.add_to_negative_real_matrix, bn.ToPositiveRealMatrixNode: bmg.add_to_positive_real_matrix, bn.ToRealMatrixNode: bmg.add_to_real_matrix, bn.TransposeNode: bmg.add_transpose, bn.ToPositiveRealNode: bmg.add_to_positive_real, bn.ToRealNode: bmg.add_to_real, bn.VectorIndexNode: bmg.add_vector_index, } def _constant_factories(bmg: BMGraphBuilder) -> Dict[Type, Callable]: return { bn.NegativeRealNode: bmg.add_neg_real, bn.NaturalNode: bmg.add_natural, bn.ConstantNode: bmg.add_constant, bn.RealNode: bmg.add_real, bn.PositiveRealNode: bmg.add_pos_real, bn.ProbabilityNode: bmg.add_probability, bn.ConstantBooleanMatrixNode: bmg.add_boolean_matrix, bn.ConstantNaturalMatrixNode: bmg.add_natural_matrix, bn.ConstantNegativeRealMatrixNode: bmg.add_neg_real_matrix, bn.ConstantProbabilityMatrixNode: bmg.add_probability_matrix, bn.ConstantSimplexMatrixNode: bmg.add_simplex, bn.ConstantPositiveRealMatrixNode: bmg.add_pos_real_matrix, bn.ConstantRealMatrixNode: bmg.add_real_matrix, bn.ConstantTensorNode: bmg.add_constant_tensor, bn.UntypedConstantNode: bmg.add_constant, } class NodeTransformer: def assess_node( self, node: bn.BMGNode, original: BMGraphBuilder ) -> TransformAssessment: raise NotImplementedError("this is an abstract base class") # a node is either replaced 1-1, 1-many, or deleted def transform_node( self, node: bn.BMGNode, new_inputs: List[bn.BMGNode] ) -> typing.Optional[typing.Union[bn.BMGNode, List[bn.BMGNode]]]: raise NotImplementedError("this is an abstract base class") def copy_and_replace( bmg_original: BMGraphBuilder, transformer_creator: Callable[[Cloner, Sizer], NodeTransformer], ) -> typing.Tuple[BMGraphBuilder, ErrorReport]: cloner = Cloner(bmg_original) transformer = transformer_creator(cloner, cloner.sizer) for original in bmg_original.all_nodes(): inputs = [] for c in original.inputs.inputs: inputs.append(cloner.copy_context[c]) assessment = transformer.assess_node(original, cloner.bmg_original) if len(assessment.error_report.errors) > 0: return cloner.bmg, assessment.error_report elif assessment.node_needs_transform: image = transformer.transform_node(original, inputs) else: parents = flatten(inputs) image = cloner.clone(original, parents) if not cloner.copy_context.__contains__(original): cloner.copy_context[original] = image return cloner.bmg, ErrorReport()
beanmachine-main
src/beanmachine/ppl/compiler/copy_and_replace.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """This module takes a Bean Machine Graph builder and makes a best effort attempt to transform the accumulated graph to meet the requirements of the BMG type system. All possible transformations are made; if there are nodes that cannot be represented in BMG or cannot be made to meet type requirements, an error report is returned.""" from typing import Optional import beanmachine.ppl.compiler.bmg_nodes as bn import beanmachine.ppl.compiler.bmg_types as bt from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.bmg_node_types import is_supported_by_bmg from beanmachine.ppl.compiler.bmg_requirements import EdgeRequirements from beanmachine.ppl.compiler.error_report import ErrorReport, Violation from beanmachine.ppl.compiler.graph_labels import get_edge_labels from beanmachine.ppl.compiler.internal_error import InternalError from beanmachine.ppl.compiler.lattice_typer import LatticeTyper def _is_real_matrix(t: bt.BMGLatticeType) -> bool: return any( isinstance(t, m) for m in { bt.RealMatrix, bt.PositiveRealMatrix, bt.NegativeRealMatrix, bt.ProbabilityMatrix, } ) def _is_pos_real_matrix(t: bt.BMGLatticeType) -> bool: return any( isinstance(t, m) for m in { bt.PositiveRealMatrix, bt.ProbabilityMatrix, } ) class RequirementsFixer: """This class takes a Bean Machine Graph builder and attempts to fix violations of BMG type system requirements. The basic idea is that every *edge* in the graph has a *requirement*, such as "the type of the input must be Probability". We do a traversal of the input edges of every node in the graph; if the input node meets the requirement, it is unchanged. If it does not, then a new node that has the same semantics that meets the requirement is returned. If there is no such node then an error is added to the error report.""" errors: ErrorReport bmg: BMGraphBuilder _typer: LatticeTyper _reqs: EdgeRequirements def __init__(self, bmg: BMGraphBuilder, typer: LatticeTyper) -> None: self.errors = ErrorReport() self.bmg = bmg self._typer = typer self._reqs = EdgeRequirements(typer) def _type_meets_requirement(self, t: bt.BMGLatticeType, r: bt.Requirement) -> bool: assert t != bt.Untypable if r is bt.any_requirement: return True if r is bt.any_real_matrix: return _is_real_matrix(t) if r is bt.any_pos_real_matrix: return _is_pos_real_matrix(t) if isinstance(r, bt.UpperBound): return bt.supremum(t, r.bound) == r.bound if isinstance(r, bt.AlwaysMatrix): return t == r.bound if r == bt.BooleanMatrix: return isinstance(t, bt.BooleanMatrix) if r == bt.ProbabilityMatrix: return isinstance(t, bt.ProbabilityMatrix) if r == bt.SimplexMatrix: return isinstance(t, bt.SimplexMatrix) return t == r def _node_meets_requirement(self, node: bn.BMGNode, r: bt.Requirement) -> bool: lattice_type = self._typer[node] assert lattice_type is not bt.Untypable if isinstance(r, bt.AlwaysMatrix): return self._typer.is_matrix(node) and self._type_meets_requirement( lattice_type, r.bound ) if r is bt.any_real_matrix or r is bt.any_pos_real_matrix: return self._typer.is_matrix(node) and self._type_meets_requirement( lattice_type, r ) return self._type_meets_requirement(lattice_type, r) def _try_to_meet_constant_requirement( self, node: bn.ConstantNode, requirement: bt.Requirement, ) -> Optional[bn.BMGNode]: # We have a constant node that either (1) is untyped, and therefore # needs to be replaced by an equivalent typed node, or (2) is typed # but is of the wrong type, and needs to be replaced by an equivalent # constant of a larger type. # # Obtain a type for the node. If the node meets an upper bound requirement # then it has a value that can be converted to the desired type. If it # does not meet an UB requirement then there is no equivalent constant # node of the correct type and we give an error. it = self._typer[node] # NOTE: By this point we should have already rejected any graph that contains # a reachable but untypable constant node. See comment in fix_unsupported # regarding UntypedConstantNode support. # Are we trying to use a constant of the wrong size? Try to broadcast the constant # to the correct size first. result = self._try_to_broadcast(node, requirement) if result is not None: return result if requirement is bt.any_real_matrix: if _is_real_matrix(it): # It's already an R, R+, R- or P matrix, but it might be a single # value. Ensure that it is marked as a matrix, not a single value. assert isinstance(it, bt.BMGMatrixType) return self.bmg.add_constant_of_matrix_type(node.value, it) else: # It's some other type, such as Boolean or Natural matrix. # Emit the value as the equivalent real matrix: return self.bmg.add_real_matrix(node.value) if requirement is bt.any_pos_real_matrix: if _is_pos_real_matrix(it): assert isinstance(it, bt.BMGMatrixType) return self.bmg.add_constant_of_matrix_type(node.value, it) else: return self.bmg.add_pos_real_matrix(node.value) if self._type_meets_requirement(it, bt.upper_bound(requirement)): if requirement is bt.any_requirement: # The lattice type of the constant might be Zero or One; in that case, # generate a bool constant node. required_type = bt.lattice_to_bmg(it) else: required_type = bt.requirement_to_type(requirement) if bt.must_be_matrix(requirement): assert isinstance(required_type, bt.BMGMatrixType) result = self.bmg.add_constant_of_matrix_type(node.value, required_type) else: result = self.bmg.add_constant_of_type(node.value, required_type) assert self._node_meets_requirement(result, requirement) return result return None def _meet_constant_requirement( self, node: bn.ConstantNode, requirement: bt.Requirement, consumer: bn.BMGNode, edge: str, ) -> bn.BMGNode: result = self._try_to_meet_constant_requirement(node, requirement) if result is not None: return result # We cannot convert this node to any type that meets the requirement. # Add an error. self.errors.add_error( Violation( node, self._typer[node], requirement, consumer, edge, self.bmg.execution_context.node_locations(consumer), ) ) return node def _convert_operator_to_atomic_type( self, node: bn.OperatorNode, requirement: bt.BMGLatticeType, ) -> bn.BMGNode: # We have been given a node which does not meet a requirement, # but it can be converted to a node which does meet the requirement # that has the same semantics. Start by confirming those preconditions. node_type = self._typer[node] assert node_type != requirement assert bt.is_convertible_to(node_type, requirement) # Converting anything to real or positive real is easy; # there's already a node for that so just insert it on the edge # whose requirement is not met, and the requirement will be met. if requirement == bt.Real: return self.bmg.add_to_real(node) if requirement == bt.PositiveReal: return self.bmg.add_to_positive_real(node) # We are not converting to real or positive real. # Our precondition is that the requirement is larger than # *something*, which means that it cannot be bool. # That means the requirement must be either natural or # probability. Verify this. assert requirement == bt.Natural or requirement == bt.Probability # Our precondition is that the requirement is larger than the # node type. assert node_type == bt.Boolean # There is no "to natural" or "to probability" but since we have # a bool in hand, we can use an if-then-else as a conversion. zero = self.bmg.add_constant_of_type(0.0, requirement) one = self.bmg.add_constant_of_type(1.0, requirement) return self.bmg.add_if_then_else(node, one, zero) def _convert_operator_to_matrix_type( self, node: bn.OperatorNode, requirement: bt.Requirement, ) -> bn.BMGNode: if isinstance(requirement, bt.AlwaysMatrix): requirement = requirement.bound assert isinstance(requirement, bt.BMGMatrixType) # We have been given a node which does not meet a requirement, # but it can be converted to a node which does meet the requirement # that has the same semantics. Start by confirming those preconditions. node_type = self._typer[node] assert node_type != requirement assert bt.is_convertible_to(node_type, requirement) # Converting anything to real matrix or positive/negative real matrix is easy; # there's already a node for that so just insert it on the edge # whose requirement is not met, and the requirement will be met. # TODO: We do not yet handle the case where we are converting from, say, # an atomic probability to a 1x1 real matrix because in practice this # hasn't come up yet. If it does, detect it here and insert a TO_REAL # or TO_POS_REAL followed by a TO_MATRIX and create a test case that # illustrates the scenario. assert self._typer.is_matrix(node) if isinstance(requirement, bt.RealMatrix): return self.bmg.add_to_real_matrix(node) if isinstance(requirement, bt.NegativeRealMatrix): return self.bmg.add_to_negative_real_matrix(node) # TODO: We do not yet handle the case where we are converting from # a matrix of bools to a matrix of naturals or probabilities because # in practice this has not come up yet. If it does, we will need # to create TO_NATURAL_MATRIX and TO_PROB_MATRIX operators in BMG, or # come up with some other way to turn many bools into many naturals # or probabilities. assert isinstance(requirement, bt.PositiveRealMatrix) return self.bmg.add_to_positive_real_matrix(node) def _can_force_to_prob( self, inf_type: bt.BMGLatticeType, requirement: bt.Requirement ) -> bool: # Consider the graph created by a call like: # # Bernoulli(0.5 + some_beta() / 2) # The inf types of the addends are both probability, but there is # no addition operator on probabilities; we will add these as # positive reals, and then get an error when we use it as the parameter # to a Bernoulli. But you and I both know that this is a legal # probability. # # To work around this problem, if we have a *real* or *positive real* used # in a situation where a *probability* is required, we insert an explicit # "clamp this real to a probability" operation. # # TODO: We might want to restrict this. For example, if we have # # Bernoulli(some_normal()) # # then it seems plausible that we ought to produce an error here rather than # clamping the result to a probability. We could allow this feature only # in situations where there was some operator other than a sample, for instance. # # TODO: We might want to build a warning mechanism that informs the developer # of the possibility that they've gotten something wrong here. return ( requirement == bt.Probability or requirement == bt.upper_bound(bt.Probability) ) and (inf_type == bt.Real or inf_type == bt.PositiveReal) def _can_force_to_neg_real( self, node_type: bt.BMGLatticeType, requirement: bt.Requirement ) -> bool: # See notes in method above; we have a similar situation but for # negative reals. Consider for example # # p = beta() * 0.5 + 0.4 # Sum of two probs is a positive real # lp = log(p) # log of positive real is real # x = log1mexp(lp) # error, log1mexp requires negative real # # Failure to deduce that p is probability leads to a seemingly # unrelated error later on. # # If we require a negative real but we have a real, insert a TO_NEG_REAL # node to do a runtime check. return ( requirement == bt.NegativeReal or requirement == bt.upper_bound(bt.NegativeReal) ) and node_type == bt.Real def _try_to_meet_any_real_matrix_requirement( self, node: bn.OperatorNode, requirement: bt.Requirement, ) -> Optional[bn.BMGNode]: assert not self._node_meets_requirement(node, requirement) # Is the requirement that we have a real-valued matrix, but we haven't got # a real-valued matrix? Every value can be converted to a real-valued matrix, # so just insert the conversion node. if requirement is not bt.any_real_matrix: return None result = self.bmg.add_to_real_matrix(node) assert self._node_meets_requirement(result, requirement) return result def _try_to_meet_any_pos_real_matrix_requirement( self, node: bn.OperatorNode, requirement: bt.Requirement, ) -> Optional[bn.BMGNode]: assert not self._node_meets_requirement(node, requirement) # Is the requirement that we have a pos-real-valued matrix? Anything that # is not known to be negative can be a positive real matrix. if requirement is not bt.any_pos_real_matrix: return None node_type = self._typer[node] if isinstance(node_type, bt.NegativeRealMatrix): return None result = self.bmg.add_to_positive_real_matrix(node) assert self._node_meets_requirement(result, requirement) return result def _try_to_meet_upper_bound_requirement( self, node: bn.OperatorNode, requirement: bt.Requirement, ) -> Optional[bn.BMGNode]: assert not self._node_meets_requirement(node, requirement) node_type = self._typer[node] if not self._type_meets_requirement(node_type, bt.upper_bound(requirement)): return None # If we got here then the node did NOT meet the requirement, # but its type DID meet an upper bound requirement, which # implies that the requirement was not an upper bound requirement. assert not isinstance(requirement, bt.UpperBound) # We definitely can meet the requirement by inserting some sort # of conversion logic. We have different helper methods for # the atomic type and matrix type cases. if bt.must_be_matrix(requirement): result = self._convert_operator_to_matrix_type(node, requirement) else: assert isinstance(requirement, bt.BMGLatticeType) result = self._convert_operator_to_atomic_type(node, requirement) assert self._node_meets_requirement(result, requirement) return result def _try_to_force_to_prob(self, node, requirement) -> Optional[bn.BMGNode]: # We cannot make the node meet the requirement "implicitly". We can # "explicitly" meet a requirement of probability if we have a # real or pos real. node_type = self._typer[node] if not self._can_force_to_prob(node_type, requirement): return None assert node_type == bt.Real or node_type == bt.PositiveReal assert self._node_meets_requirement(node, node_type) return self.bmg.add_to_probability(node) def _try_to_force_to_neg_real(self, node, requirement) -> Optional[bn.BMGNode]: # We cannot make the node meet the requirement "implicitly". We can # "explicitly" meet a requirement of neg real if we have a value we do # not know is positive. node_type = self._typer[node] if not self._can_force_to_neg_real(node_type, requirement): return None return self.bmg.add_to_negative_real(node) def _try_to_matrix_fill( self, node: bn.BMGNode, requirement: bt.Requirement, ) -> Optional[bn.BMGNode]: # This detects the case where a scalar is used in a context where a matrix is required, # and we want to replicate that value into a matrix of the required size. # If the requirement is not a matrix type, we cannot fix it here. if not isinstance(requirement, bt.BMGMatrixType) or requirement.is_singleton(): return None # If the value is not a scalar, we cannot fix it here. node_type = self._typer[node] if not isinstance(node_type, bt.BMGMatrixType) or not node_type.is_singleton(): return None # If the value cannot be converted to the matrix element type, we cannot fix it here. scalar_req = requirement.with_dimensions(1, 1) converted_node = self._try_to_meet_requirement(node, scalar_req) if converted_node is None: return None r = self.bmg.add_natural(requirement.rows) c = self.bmg.add_natural(requirement.columns) result = self.bmg.add_fill_matrix(converted_node, r, c) assert self._node_meets_requirement(result, requirement) return result def _try_to_broadcast( self, node: bn.BMGNode, requirement: bt.Requirement, ) -> Optional[bn.BMGNode]: # This detects the case where a matrix of one size is used in a context where a # matrix of another size is required, and the problem can be fixed by broadcasting. # We have a special case for "broadcasting" from a scalar. Try that first. result = self._try_to_matrix_fill(node, requirement) if result is not None: return result # If the requirement is not a matrix type, we cannot fix it here. if not isinstance(requirement, bt.BMGMatrixType) or requirement.is_singleton(): return None # If the value is not a matrix, we cannot fix it here. node_type = self._typer[node] if not isinstance(node_type, bt.BMGMatrixType) or node_type.is_singleton(): return None # If the dimensions are already right, we cannot fix it here. if ( node_type.columns == requirement.columns and node_type.rows == requirement.rows ): return None # If the dimensions are not broadcast-compatible, we cannot fix it here. if node_type.columns != 1 and node_type.columns != requirement.columns: return None if node_type.rows != 1 and node_type.rows != requirement.rows: return None # First attempt to convert the wrong-sized matrix to the desired type, and then # broadcast it if that was successful. wrong_size_req = requirement.with_dimensions(node_type.rows, node_type.columns) converted_node = self._try_to_meet_requirement(node, wrong_size_req) if converted_node is None: return None r = self.bmg.add_natural(requirement.rows) c = self.bmg.add_natural(requirement.columns) result = self.bmg.add_broadcast(converted_node, r, c) assert self._node_meets_requirement(result, requirement) return result def _try_to_meet_operator_requirement( self, node: bn.OperatorNode, requirement: bt.Requirement, ) -> Optional[bn.BMGNode]: # We should not have called this function if the input node already meets # the requirement on the edge. assert not self._node_meets_requirement(node, requirement) # Have we got a requirement of a different size than the size of the node? # If so, try to broadcast to the required size first. result = self._try_to_broadcast(node, requirement) if result is not None: return result # Is the requirement that we have a real-valued matrix, but we haven't got # a real-valued matrix? Every value can be converted to a real-valued matrix, # so that's an easy case. result = self._try_to_meet_any_real_matrix_requirement(node, requirement) if result is not None: return result # Is the requirement that we have any positive real-valued matrix? Every value # except negative real scalars and matrices can be converted to a positive real # matrix. result = self._try_to_meet_any_pos_real_matrix_requirement(node, requirement) if result is not None: return result # If we weaken the requirement to an upper bound requirement, do we meet it? If so, # then there is a conversion node we can add. result = self._try_to_meet_upper_bound_requirement(node, requirement) if result is not None: return result result = self._try_to_force_to_prob(node, requirement) if result is not None: return result result = self._try_to_force_to_neg_real(node, requirement) if result is not None: return result # We couldn't meet the requirement. return None def _meet_operator_requirement( self, node: bn.OperatorNode, requirement: bt.Requirement, consumer: bn.BMGNode, edge: str, ) -> bn.BMGNode: assert not self._node_meets_requirement(node, requirement) result = self._try_to_meet_operator_requirement(node, requirement) if result is not None: return result # We were unable to meet a requirement; add an error. node_type = self._typer[node] self.errors.add_error( Violation( node, node_type, requirement, consumer, edge, self.bmg.execution_context.node_locations(consumer), ) ) return node def _check_requirement_validity( self, node: bn.BMGNode, requirement: bt.Requirement, consumer: bn.BMGNode, edge: str, ) -> None: ice = "Internal compiler error in edge requirements checking:\n" # These lattice types should never be used as requirements; a type requirement # must be a valid BMG type, but these are lattice types used for detecting # expressions which are convertible to more than one BMG type. if requirement in {bt.Tensor, bt.One, bt.Zero, bt.Untypable}: raise InternalError( f"{ice} Requirement {requirement} is an invalid requirement." ) # We should never be checking outgoing edge requirements on a node which # has zero outgoing edges. If we are, something has gone wrong in the compiler. node_type = type(node) if node_type in [bn.Observation, bn.Query, bn.FactorNode]: raise InternalError( f"{ice} Node of type {node_type.__name__} is being checked for requirements but " + "should never have an outgoing edge '{edge}'." ) # The remaining checks determine if a precondition of the requirements checker # is not met. It is always valid to have a constant node even if unsupported by BMG. # The requirements checker will replace it with an equivalent node with a valid # BMG type if necessary, in _meet_constant_requirement above. if isinstance(node, bn.ConstantNode): return # If we get here then the node is not a constant. Leaving aside constants, we # should never ask the requirements checker to consider the requirements on an # outgoing edge from a node that BMG does not even support. The unsupported node # fixer should already have removed all such nodes. if not is_supported_by_bmg(node): raise InternalError( f"{ice} Node of type {node_type.__name__} is being checked for requirements but " + "is not supported by BMG; the unsupported node checker should already " + "have either replaced the node or produced an error." ) # If we get here then the node is supported by BMG. The requirements checker needs to # know the lattice type of the node in order to check whether it meets the requirement, # even if the requirement is "any". If this throws then you probably need to implement # type analysis in the lattice typer. # CONSIDER: Note that we do not make a distinction here between "the lattice typer simply does # not have the code to type check this node" and "the lattice typer tried but failed". # If it is important to make that distinction then we need to have two different "untyped" # objects, one representing "not implemented" and one representing "failure". lattice_type = self._typer[node] if lattice_type is bt.Untypable: raise InternalError( f"{ice} Node of type {node_type.__name__} is being checked for requirements but " + "the lattice typer is unable to assign it a type. Requirements checking always " + "needs to know the lattice type of a node when checking requirements on its " + "outgoing edges." ) def _try_to_meet_requirement( self, node: bn.BMGNode, requirement: bt.Requirement, ) -> Optional[bn.BMGNode]: # Attempts to meet a requirement, returns None if it cannot rather than producing an error. if self._node_meets_requirement(node, requirement): return node if isinstance(node, bn.ConstantNode): return self._try_to_meet_constant_requirement(node, requirement) assert isinstance(node, bn.OperatorNode) return self._try_to_meet_operator_requirement(node, requirement) def meet_requirement( self, node: bn.BMGNode, requirement: bt.Requirement, consumer: bn.BMGNode, edge: str, ) -> bn.BMGNode: """The consumer node consumes the value of the input node. The consumer's requirement is given; the name of this edge is provided for error reporting.""" self._check_requirement_validity(node, requirement, consumer, edge) # If we have an untyped constant node we always need to replace it. if isinstance(node, bn.UntypedConstantNode): return self._meet_constant_requirement(node, requirement, consumer, edge) # If the node already meets the requirement, we're done. if self._node_meets_requirement(node, requirement): return node # In normal operation we should never insert a typed constant node # that is of the wrong type, but we have a few test cases in which # we do so explicitly. Regardless, it is not a problem to convert a # typed constant to the correct type. if isinstance(node, bn.ConstantNode): return self._meet_constant_requirement(node, requirement, consumer, edge) # A distribution's outgoing edges are only queries and their requirements # are always met, so we should have already returned. Therefore the only # remaining possibility is that we have an operator. assert isinstance(node, bn.OperatorNode) return self._meet_operator_requirement(node, requirement, consumer, edge) def fix_problems(self) -> bool: made_progress = False nodes = self.bmg.all_ancestor_nodes() for node in nodes: requirements = self._reqs.requirements(node) # TODO: The edge labels used to visualize the graph in DOT # are not necessarily the best ones for displaying errors. # Consider fixing this. edges = get_edge_labels(node) node_was_updated = False for i in range(len(requirements)): new_input = self.meet_requirement( node.inputs[i], requirements[i], node, edges[i] ) if node.inputs[i] is not new_input: node.inputs[i] = new_input node_was_updated = True if node_was_updated: self._typer.update_type(node) made_progress = True return made_progress def requirements_fixer(bmg: BMGraphBuilder): rf = RequirementsFixer(bmg, LatticeTyper()) made_progress = rf.fix_problems() return bmg, made_progress, rf.errors
beanmachine-main
src/beanmachine/ppl/compiler/fix_requirements.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Tools to transform Bean Machine programs to Bean Machine Graph""" # TODO: This module is badly named; it really should be "python simplifier" or some # such thing. Transformation into a single-assignment-like form is only part of # what it does. # This code transforms Python programs into a much simpler subset of Python with # the same semantics. Some invariants of the simpler language: # # * All "return" statements return either nothing or an identifier. # * All "while" loops are of the form "while True:" # * No "while" loop has an "else" clause. # * All "for" statements have an identifier as their collection. # * All "if" statements have an identifier as their condition. # * There are no statements that are just a single expression. # # * All simple assignments are of the form: # * id = id # * id = simple_expression # TODO: List the simple expressions. # * id[id] = id # * id[id:id] = id # lower and upper may be missing # * id[id:id:id] = id # lower, upper and step may be missing # * id.attr = id # * [id] = id # TODO: What about tuples? # * [*id] = id # * All augmented assignments (+=, *= and so on) have just ids on both sides. # * All unary operators (+, -, ~, not) have an identifier as their operand. # * All binary operators (+, -, *, /, //, %, **, <<, >>, |, ^, &, @) have an identifier # as both operands. # * There are no "and" or "or" operators # * There are no "b if a else c" expressions. # * All comparison operators (<, >, <=, >=, ==, !=, is, is not, in, not in) # are binary operators where both operands are identifiers. # * All indexing operators (a[b]) have identifiers as both collection and index. # * The left side of all attribute accesses ("dot") is an identifier. # That is "id.attr". # * Every literal list contains only identifiers. That is "[id, id, id, ...]" # * Every literal dictionary consists only of identifiers for both keys and values. # That is "{id : id, id : id, ... }" # * Every function call is of the exact form "id(*id, **id)". There are no "normal" # arguments. There are some exceptions to this rule: # * dict() is allowed. (TODO: this could be rewritten to {}) # * dict(key = id) is allowed. # * dict(**id, **id) is allowed. # * TODO: There are similar exceptions for set and list; say what they are. # * There are no dictionary, list or set comprehensions; they are rewritten as loops. # * There are no lambda expressions; they are all rewritten as function definitions # * There are no decorators; they are all rewritten as function calls # * pass statements are preserved # * import statements are preserved # * break and continue statements are preserved # TODO: assert statements are removed in bm_to_bmg; move that functionality here. # TODO: We can reduce "del" statements to one of three forms: "del x", "del x.y", "del x[y]" # where x and y are identifiers. # TODO: Figure out how to desugar try: body except expr as bar: body else: body finally: body # to simplify the expr. # TODO: Figure out how to desugar with expr as target : block # to simplify the expr and target. Note there can be multiple exprs. # TODO: say something about global / nonlocal # TODO: say something about yield # TODO: say something about classes # TODO: say something about type annotations # TODO: say something about async # TODO: say something about conditional expressions # TODO: say something about formatted strings import ast from typing import Any, Callable, List, Tuple from beanmachine.ppl.compiler.ast_patterns import ( ann_assign, assign, ast_boolop, ast_compare, ast_dict, ast_dictComp, ast_domain, ast_for, ast_generator, ast_if, ast_index, ast_list, ast_listComp, ast_luple, ast_return, ast_setComp, ast_true, ast_while, attribute, aug_assign, binop, call, expr, function_def, get_value, index, keyword, match_any, match_every, name, slice_pattern, starred, subscript, unaryop, ) from beanmachine.ppl.compiler.beanstalk_common import allowed_functions from beanmachine.ppl.compiler.patterns import ( anyPattern, HeadTail, ListAll, ListAny, match, negate, nonEmptyList, Pattern, PatternBase, twoPlusList, ) from beanmachine.ppl.compiler.rules import ( FirstMatch as first, ListEdit, PatternRule, Rule, TryMany as many, ) _some_top_down = ast_domain.some_top_down _not_identifier: Pattern = negate(name()) _name_or_none = match_any(name(), None) _neither_name_nor_none: Pattern = negate(_name_or_none) _not_starred: Pattern = negate(starred()) _list_not_identifier: PatternBase = ListAny(_not_identifier) _list_not_starred: PatternBase = ListAny(_not_starred) _list_all_identifiers: PatternBase = ListAll(name()) _not_identifier_keyword: Pattern = keyword(value=_not_identifier) _not_identifier_keywords: PatternBase = ListAny(_not_identifier_keyword) _not_none = negate(None) # TODO: The identifier "dict" should be made global unique in target name space _keyword_with_dict = keyword(arg=None, value=call(func=name(id="dict"), args=[])) _keyword_with_no_arg = keyword(arg=None) _not_keyword_with_no_arg = negate(_keyword_with_no_arg) _list_not_keyword_with_no_arg = ListAny(_not_keyword_with_no_arg) _keyword_with_arg = keyword(arg=negate(None)) _list_with_keyword_with_arg = ListAny(_keyword_with_arg) # _not_in_allowed_functions: Pattern = negate(name(id="dict")) _not_in_allowed_functions: Pattern = negate( match_any(*[name(id=t.__name__) for t in allowed_functions]) ) _binops: Pattern = match_any( ast.Add, ast.BitAnd, ast.BitOr, ast.BitXor, ast.Div, ast.FloorDiv, ast.LShift, ast.MatMult, ast.Mod, ast.Mult, ast.Pow, ast.RShift, ast.Sub, ) _unops: Pattern = match_any(ast.USub, ast.UAdd, ast.Invert, ast.Not) class SingleAssignment: _count: int # TODO: Better naming convention. In particular, _rules is the reflexive # transitive congruent extension of _rule. It would be nice to find # terminology or refactoring that would make this easy to express more # clearly than "s" does, but while still being concise. _rule: Rule _rules: Rule def __init__(self) -> None: self._count = 0 self._rule = first( [ self._handle_compare_all(), self._handle_boolop_all(), self._handle_while(), self._handle_if(), self._handle_unassigned(), self._handle_return(), self._handle_for(), self._handle_assign(), self._eliminate_decorator(), ] ) self._rules = many(_some_top_down(self._rule)) def _unique_id(self, prefix: str) -> str: self._count = self._count + 1 return f"{prefix}{self._count}" def fresh_names( self, prefix_list: List[str], builder: Callable[[Callable[[str, str], ast.Name]], Any], ) -> Any: # This function gives us a way to treat a list of new local variables by their # original name, while giving them fresh names in the generated code to avoid name clashes # TODO: In the type this function, both instances of the type Any should # simply be the same type. It would be nice if there was a good way to use type variables # with Python id = {prefix: self._unique_id(prefix) for prefix in prefix_list} new_name_store = { (p, "store"): ast.Name(id=id[p], ctx=ast.Store()) for p in prefix_list } new_name_load = { (p, "load"): ast.Name(id=id[p], ctx=ast.Load()) for p in prefix_list } new_name = {**new_name_store, **new_name_load} return builder(lambda prefix, hand_side: new_name[(prefix, hand_side)]) def _transform_with_name( self, prefix: str, extract_expr: Callable[[ast.AST], ast.expr], build_new_term: Callable[[ast.AST, ast.AST], ast.AST], extract_pattern: Callable[[ast.AST, ast.AST], List[ast.expr]] = lambda s, n: [ n ], ) -> Callable[[ast.AST], ListEdit]: # Given its arguments (p,e,b) produces a term transformer # r -> p_i = e(r) ; b(r,p_i) where p_i is a new name def _do_it(r: ast.AST) -> ListEdit: id = self._unique_id(prefix) return ListEdit( [ ast.Assign( targets=extract_pattern(r, ast.Name(id=id, ctx=ast.Store())), value=extract_expr(r), ), build_new_term(r, ast.Name(id=id, ctx=ast.Load())), ] ) return _do_it def _transform_with_assign( self, prefix: str, extract_expr: Callable[[ast.AST], ast.expr], build_new_term: Callable[[ast.AST, ast.AST, ast.AST], ListEdit], ) -> Callable[[ast.AST], ListEdit]: # Given its arguments (p,e,b) produces a term transformer # r -> b(r,p_i,p_i = e(r)) where p_i is a new name def _do_it(r: ast.AST) -> ListEdit: id = self._unique_id(prefix) new_assign = ast.Assign( targets=[ast.Name(id=id, ctx=ast.Store())], value=extract_expr(r) ) return build_new_term(r, ast.Name(id=id, ctx=ast.Load()), new_assign) return _do_it def _transform_expr( self, prefix: str, extract_expr: Callable[[ast.AST], ast.expr] ) -> Callable[[ast.AST], ast.AST]: # Given its arguments (p,e) produces a term transformer # r -> p_i = e(r) where p_i is a new name def _do_it(r: ast.AST) -> ast.AST: id = self._unique_id(prefix) return ast.Assign( targets=[ast.Name(id=id, ctx=ast.Store())], value=extract_expr(r) ) return _do_it def _splice_non_identifier( self, original: List[ast.expr] ) -> Tuple[ast.Assign, List[ast.expr]]: id = self._unique_id("a") index, value = next( (i, v) for i, v in enumerate(original) if match(_not_identifier, v) ) rewritten = ( original[:index] + [ast.Name(id=id, ctx=ast.Load())] + original[index + 1 :] ) assignment = ast.Assign(targets=[ast.Name(id=id, ctx=ast.Store())], value=value) return assignment, rewritten def _splice_non_entry( self, keys: List[ast.expr], values: List[ast.expr] ) -> Tuple[ast.Assign, List[ast.expr], List[ast.expr]]: id = self._unique_id("a") keyword_index, keyword = next( ((i, k) for i, k in enumerate(keys) if match(_not_identifier, k)), (len(keys), None), ) value_index, value = next( ((i, v) for i, v in enumerate(values) if match(_not_identifier, v)), (len(values), None), ) if keyword_index <= value_index: keys_new = ( keys[:keyword_index] + [ast.Name(id=id, ctx=ast.Load())] + keys[keyword_index + 1 :] ) assignment = ast.Assign( targets=[ast.Name(id=id, ctx=ast.Store())], value=keyword ) return assignment, keys_new, values else: values_new = ( values[:value_index] + [ast.Name(id=id, ctx=ast.Load())] + values[value_index + 1 :] ) assignment = ast.Assign( targets=[ast.Name(id=id, ctx=ast.Store())], value=value ) return assignment, keys, values_new def _splice_non_identifier_keyword( self, original: List[ast.keyword] ) -> Tuple[ast.Assign, List[ast.keyword]]: id = self._unique_id("a") index, keyword = next( (i, k) for i, k in enumerate(original) if match(_not_identifier_keyword, k) ) rewritten = ( original[:index] + [ast.keyword(arg=keyword.arg, value=ast.Name(id=id, ctx=ast.Load()))] + original[index + 1 :] ) assignment = ast.Assign( targets=[ast.Name(id=id, ctx=ast.Store())], value=keyword.value ) return assignment, rewritten def _splice_non_starred(self, original: List[ast.expr]) -> List[ast.expr]: index, value = next( (i, v) for i, v in enumerate(original) if match(_not_starred, v) ) rewritten = ( original[:index] + [ast.Starred(ast.List(elts=[value], ctx=ast.Load()), ast.Load())] + original[index + 1 :] ) return rewritten # TODO: The identifier "dict" should be made global unique in target name space def _splice_non_double_starred( self, original: List[ast.keyword] ) -> List[ast.keyword]: index, value = next( (i, v) for i, v in enumerate(original) if match(_keyword_with_arg, v) ) rewritten = ( original[:index] + [ ast.keyword( arg=None, value=ast.Call( func=ast.Name(id="dict", ctx=ast.Load()), args=[], keywords=[value], ), ) ] + original[index + 1 :] ) return rewritten def _transform_list( self, ast_op: Callable[[ast.Assign], type] = lambda a: ast.List ) -> Callable[[ast.Assign], ListEdit]: def _do_it(a: ast.Assign) -> ListEdit: c = a.value ast_op_a = ast_op(a) assert isinstance(c, ast_op_a) # pyre-fixme[16]: `expr` has no attribute `elts`. assignment, elts_new = self._splice_non_identifier(c.elts) return ListEdit( [ assignment, # pyre-fixme[16]: `expr` has no attribute `ctx`. ast.Assign(targets=a.targets, value=ast_op_a(elts_new, c.ctx)), ] ) return _do_it def _transform_lists( # For things like ast_op = ast.Dict self, ast_op: type = ast.Dict ) -> Callable[[ast.Assign], ListEdit]: def _do_it(a: ast.Assign) -> ListEdit: c = a.value assert isinstance(c, ast_op) # pyre-fixme[16]: `expr` has no attribute `keys`. # pyre-fixme[16]: `expr` has no attribute `values`. assignment, keys_new, values_new = self._splice_non_entry(c.keys, c.values) return ListEdit( [ assignment, ast.Assign(targets=a.targets, value=ast_op(keys_new, values_new)), ] ) return _do_it def _handle_while_True_else(self) -> Rule: # This rule eliminates a redundant "else" clause from a "while True:" statement. # The purpose of this rule will become clear upon examining the rule which follows. # # Python has a seldom-used structure: # while condition: # body # else: # alternative # # The alternative is only executed if the condition is ever tested and False. # That is, if the loop is exited because of a break, return, or raised exception, # then the alternative is not executed. # # Obviously an else clause on a "while True" cannot be executed; though this is # rare, if we encounter it we can simply eliminate the "else:" entirely. return PatternRule( ast_while(test=ast_true, orelse=negate([])), lambda source_term: ListEdit( [ast.While(test=source_term.test, body=source_term.body, orelse=[])] ), "handle_while_True_else", ) def _handle_while_not_True_else(self) -> Rule: # This rule eliminates all "while condition:" statements where the condition is # not "True", and there is an "else" clause. We rewrite # # while condition: # body # else: # alternative # # to # # while True: # t = condition # if t: # body # else: # break # if not t: # alternative # # which has the same semantics. # return PatternRule( ast_while(test=negate(ast_true), orelse=negate([])), self._transform_with_assign( "w", lambda source_term: source_term.test, lambda source_term, new_name, new_assign: ListEdit( [ ast.While( test=ast.NameConstant(value=True), body=[ new_assign, ast.If( test=new_name, body=source_term.body, orelse=[ast.Break()], ), ], orelse=[], ), ast.If( test=ast.UnaryOp(op=ast.Not(), operand=new_name), body=source_term.orelse, orelse=[], ), ] ), ), "handle_while_not_True_else", ) def _handle_while_not_True(self) -> Rule: # This rule eliminates all "while condition:" statements where the condition is # not "True", and there is no "else" clause. We rewrite # # while condition: # body # # to # # while True: # t = condition # if t: # body # else: # break # # which has the same semantics. # return PatternRule( ast_while(test=negate(ast_true), orelse=[]), self._transform_with_assign( "w", lambda source_term: source_term.test, lambda source_term, new_name, new_assign: ListEdit( [ ast.While( test=ast.NameConstant(value=True), body=[ new_assign, ast.If( test=new_name, body=source_term.body, orelse=[ast.Break()], ), ], orelse=[], ), ] ), ), "handle_while_not_True", ) def _handle_while(self) -> Rule: # This rule eliminates all "else" clauses from while statements and # makes every while of the form "while True". See above for details. return first( [ self._handle_while_True_else(), self._handle_while_not_True_else(), self._handle_while_not_True(), ] ) def _handle_unassigned(self) -> Rule: # This rule eliminates all expressions that are used only for their side # effects and produces a redundant assignment. This is because a great # many other rules are defined in terms of rewriting assignments, and it # is easier to just turn unassigned values into assignments than to change # all those rules. It rewrites: # # complex # # to # # t = complex return PatternRule( expr(), self._transform_expr("u", lambda u: u.value), "handle_unassigned" ) def _handle_return(self) -> Rule: # This rule eliminates all "return" statements where there is a returned # value that is not an identifier. We rewrite: # # return complex # # to # # t = complex # return t # # TODO: Should we also eliminate plain returns? We could rewrite # # return # # as # # t = None # return t # # and thereby maintain the invariant that every return statement # returns an identifier. return PatternRule( ast_return(value=match_every(_not_identifier, _not_none)), self._transform_with_name( "r", lambda source_term: source_term.value, lambda _, new_name: ast.Return(value=new_name), ), "handle_return", ) def _handle_if(self) -> Rule: # This rule eliminates all "if" statements where the condition is not # an identifier. It rewrites: # # if complex: # consequence # else: # alternative # # to # # t = complex # if t: # consequence # else: # alternative # # TODO: We can go further than this and eliminate all else clauses # from the simplified language by: # # t1 = bool(complex) # if t1: # consequence # t2 = not t1 # if t2: # alternative # # Note that we've inserted a call to bool() above. The reason for that # is to ensure that we convert "complex" to bool *once* in this rewrite, # just as it is converted to bool *once* in the original code. The "not" # operator is defined as converting its operand to bool if it is not already # a bool, and then inverting the result. # # In addition to further simplifying the language, we will probably need # this proposed rewrite in order to make stochastic conditional control flows # work properly. return PatternRule( ast_if(test=_not_identifier), self._transform_with_name( "r", lambda source_term: source_term.test, lambda source_term, new_name: ast.If( test=new_name, body=source_term.body, orelse=source_term.orelse ), ), "handle_if", ) def _handle_for(self) -> Rule: # This eliminates all "for" statements where the collection is not an identifier. # It rewrites: # # for id in complex: ... # # to # # t = complex # for id in t: ... # # TODO: the "for" loop in Python supports an "else" clause which is only activated # when the loop is exited via "break". We could eliminate it. # # TODO: the "for" loop could be rewritten as fetching an iterator and iterating # over it until an exception is raised, but that's a rather complicated rewrite # and it might not be necessary to do so. return PatternRule( ast_for(iter=_not_identifier), self._transform_with_name( "f", lambda source_term: source_term.iter, lambda source_term, new_name: ast.For( target=source_term.target, iter=new_name, body=source_term.body, orelse=source_term.orelse, ), ), "handle_for", ) # # Start of a series of rules that will define handle_assign # def _handle_ann_assign(self) -> Rule: return PatternRule(ann_assign(), lambda a: ast.Assign([a.target], a.value)) def _handle_aug_assign_right(self) -> Rule: # This rule eliminates all augmented assignments whose right side # is not an identifier but whose left side is. That is, we rewrite: # id += complex --> t = complex ; id += t return PatternRule( aug_assign(target=name(), value=_not_identifier), self._transform_with_name( "a", lambda source_term: source_term.value, lambda source_term, new_name: ast.AugAssign( target=source_term.target, op=source_term.op, value=new_name, ), ), "handle_aug_assign_right", ) def _handle_aug_assign_left(self) -> Rule: # This rule eliminates all augmented assignments whose left side # is not an identifier AND cannot be simplified further. For example: # # x.y += any # # is rewritten to # # t = x.y # t += any # x.y = t # # When combined with _handle_aug_assign_right and the rules for simplifying # the left side, we can reduce all augmented assignments to having ids on # both sides. # # The simplified left hand sides that we wish to rewrite are: # x[a] += any # x[a:b] += any # a or b can be missing # x[a:b:c] += any # a or b or c can be missing # x.y += any def _do_it(r: ast.AugAssign) -> ListEdit: id = self._unique_id("a") return ListEdit( [ # t = x.y ast.Assign( targets=[ast.Name(id=id, ctx=ast.Store())], value=r.target ), # t += any ast.AugAssign( target=ast.Name(id=id, ctx=ast.Store()), op=r.op, value=r.value ), # x.y = t ast.Assign( targets=[r.target], value=ast.Name(id=id, ctx=ast.Load()) ), ] ) dot = attribute(value=name()) sub1 = subscript(value=name(), slice=index(value=name())) sub2 = subscript( value=name(), slice=slice_pattern( lower=_name_or_none, upper=_name_or_none, step=_name_or_none ), ) return PatternRule( aug_assign(target=match_any(dot, sub1, sub2)), _do_it, "_handle_aug_assign_left", ) def _make_right_assignment_rule( self, right_original: Pattern, extract_expr: Callable[[ast.AST], ast.expr], right_new: Callable[[ast.AST, ast.AST], ast.AST], rule_name: str, name_prefix: str = "a", ) -> Rule: # This helper method produces rules that handle rewrites on the right # side of an assignment. Suppose for instance we are trying to rewrite # "x = -complex" ==> "temp = complex ; x = -temp". # # * target_original is the pattern to match on the right side of the # assignment, "-complex". # * extract_expr is a lambda which takes the right side and extracts the # portion to be assigned to the temporary: the function "-complex" ==> "complex" # * right_new is a lambda which takes the right side and the temporary, and returns # the right side of the new assignment: the function # ("-complex", "temp") ==> "-temp" # * rule_name is the name of the rule, for debugging purposes. return PatternRule( assign(value=right_original), self._transform_with_name( name_prefix, lambda source_term: extract_expr(source_term.value), lambda source_term, new_name: ast.Assign( targets=source_term.targets, value=right_new(source_term.value, new_name), ), ), rule_name, ) def _handle_assign_lambda(self) -> Rule: # This rule eliminates all assignments where the right hand side # is a lambda. # # It rewrites: # # x = lambda args: body # # to: # # def t(args): # return body # x = t def do_it(source_term): id = self._unique_id("a") return ListEdit( [ ast.FunctionDef( name=id, args=source_term.value.args, body=[ast.Return(value=source_term.value.body)], decorator_list=[], returns=None, type_comment=None, ), ast.Assign( targets=source_term.targets, value=ast.Name(id=id, ctx=ast.Load()), ), ] ) return PatternRule(assign(value=ast.Lambda), do_it, "handle_assign_lambda") def _handle_assign_ifexp(self) -> Rule: # This rule eliminates all assignments where the right hand side # is an if-expression: # # x = a if b else c # # becomes # # if b: # t = a # else: # t = c # x = t def do_it(source_term): id = self._unique_id("a") return ListEdit( [ ast.If( test=source_term.value.test, body=[ ast.Assign( targets=[ast.Name(id=id, ctx=ast.Store())], value=source_term.value.body, ) ], orelse=[ ast.Assign( targets=[ast.Name(id=id, ctx=ast.Store())], value=source_term.value.orelse, ) ], ), ast.Assign( targets=source_term.targets, value=ast.Name(id=id, ctx=ast.Load()), ), ] ) return PatternRule(assign(value=ast.IfExp), do_it, "handle_assign_ifexp") def _eliminate_decorator(self) -> Rule: # This rule eliminates a single decorator from a function def: # # @x # @y # def z(): # body # # is rewritten to # # @y # def z(): # body # z = x(z) # # By repeatedly applying this rule we can eliminate all decorators. def do_it(source_term): return ListEdit( [ ast.FunctionDef( name=source_term.name, args=source_term.args, body=source_term.body, # Pop off the outermost decorator... decorator_list=source_term.decorator_list[1:], returns=source_term.returns, type_comment=None, ), # ... and make it into a function call ast.Assign( targets=[ast.Name(id=source_term.name, ctx=ast.Store())], value=ast.Call( func=source_term.decorator_list[0], args=[ast.Name(id=source_term.name, ctx=ast.Load())], keywords=[], ), ), ] ) return PatternRule( function_def(decorator_list=nonEmptyList), do_it, "eliminate_decorator" ) def _handle_assign_unaryop(self) -> Rule: # This rule eliminates all assignments where the right hand side # is a unary operator whose operand is not an identifier. # It rewrites: # # x = -complex # # to: # # t = complex # x = -t return self._make_right_assignment_rule( unaryop(operand=_not_identifier, op=_unops), lambda original_right: original_right.operand, lambda original_right, new_name: ast.UnaryOp( operand=new_name, op=original_right.op ), "handle_assign_unaryop", ) def _handle_assign_unary_dict(self) -> Rule: # This rule eliminates explicit call-style dictionary constructions where # there is a single argument and the value or collection is not an identifier. # # That is, We rewrite: # # x = dict(n = complex) # # to # # t = complex # x = dict(n = t) # # and from # # x = dict(**complex) # # to # # t = complex # x = dict(**complex) return self._make_right_assignment_rule( call( func=name(id="dict"), args=[], keywords=[keyword(value=_not_identifier)] ), lambda original_right: original_right.keywords[0].value, lambda original_right, new_name: ast.Call( func=original_right.func, args=original_right.args, keywords=[ ast.keyword(arg=original_right.keywords[0].arg, value=new_name) ], ), "handle_assign_dict", ) def _handle_assign_subscript_slice_all(self) -> Rule: # This rule address the simplification of subscript operations. return first( [ self._handle_assign_subscript_slice_index_1(), self._handle_assign_subscript_slice_index_2(), self._handle_assign_subscript_slice_lower(), self._handle_assign_subscript_slice_upper(), self._handle_assign_subscript_slice_step(), ] ) def _handle_assign_subscript_slice_index_1(self) -> Rule: # This rule eliminates indexing expressions where the collection # indexed is not an identifier. We rewrite: # # x = complex[anything] # # to # # t = complex # x = t[anything] return self._make_right_assignment_rule( subscript(value=_not_identifier), lambda original_right: original_right.value, lambda original_right, new_name: ast.Subscript( value=new_name, slice=original_right.slice, ctx=original_right.ctx, ), "handle_assign_subscript_slice_index_1", ) def _handle_assign_subscript_slice_index_2(self) -> Rule: # This rule eliminates indexing expressions where the collection # indexed is an identifier but the index is not. We rewrite: # # x = c[complex] # # to # # t = complex # x = c[t] # return self._make_right_assignment_rule( subscript(value=name(), slice=index(value=_not_identifier)), lambda original_right: get_value(original_right.slice), lambda original_right, new_name: ast.Subscript( value=original_right.value, slice=ast_index(value=new_name), ctx=original_right.ctx, ), "handle_assign_subscript_slice_index_2", ) def _handle_assign_subscript_slice_lower(self) -> Rule: """Rewrites like e = a[b.c:] → x = b.c; e = a[x:].""" return self._make_right_assignment_rule( subscript(value=name(), slice=slice_pattern(lower=_neither_name_nor_none)), lambda original_right: original_right.slice.lower, lambda original_right, new_name: ast.Subscript( value=original_right.value, slice=ast.Slice( lower=new_name, upper=original_right.slice.upper, step=original_right.slice.step, ), ctx=ast.Store(), ), "_handle_assign_subscript_slice_lower", ) def _handle_assign_subscript_slice_upper(self) -> Rule: """Rewrites like e = a[:b.c] → x = b.c; e = a[:x].""" return self._make_right_assignment_rule( subscript( value=name(), slice=slice_pattern(lower=_name_or_none, upper=_neither_name_nor_none), ), lambda original_right: original_right.slice.upper, lambda original_right, new_name: ast.Subscript( value=original_right.value, slice=ast.Slice( lower=original_right.slice.lower, upper=new_name, step=original_right.slice.step, ), ctx=ast.Store(), ), "_handle_assign_subscript_slice_upper", ) def _handle_assign_subscript_slice_step(self) -> Rule: """Rewrites like e = a[::b.c] → x = b.c; e = a[::x].""" return self._make_right_assignment_rule( subscript( value=name(), slice=slice_pattern( lower=_name_or_none, upper=_name_or_none, step=_neither_name_nor_none, ), ), lambda original_right: original_right.slice.step, lambda original_right, new_name: ast.Subscript( value=original_right.value, slice=ast.Slice( lower=original_right.slice.lower, upper=original_right.slice.upper, step=new_name, ), ctx=ast.Store(), ), "_handle_assign_subscript_slice_step", ) def _handle_assign_binop_left(self) -> Rule: # This rule eliminates binary operators where the left hand side is not # an identifier. We rewrite: # # x = complex + anything # # to # # t = complex # x = t + anything # return self._make_right_assignment_rule( binop(left=_not_identifier, op=_binops), lambda original_right: original_right.left, lambda original_right, new_name: ast.BinOp( left=new_name, op=original_right.op, right=original_right.right ), "handle_assign_binop_left", ) def _handle_assign_binary_dict_left(self) -> Rule: # This rule eliminates explicit call-style dictionary constructions where # there are exactly two arguments and the left value or collection is not # an identifier. # # We rewrite: # # x = dict(n1 = complex, anything) # # to # # t = complex # x = dict(n1 = t, anything) # # and we rewrite # # x = dict(**complex, anything) # # to # # t = complex # x = dict(**t, anything) return self._make_right_assignment_rule( call( func=name(id="dict"), args=[], keywords=[keyword(value=_not_identifier), anyPattern], ), lambda original_right: original_right.keywords[0].value, lambda original_right, new_name: ast.Call( func=original_right.func, # Name(id="dict", ctx=Load()), args=original_right.args, # [], keywords=[ ast.keyword(arg=original_right.keywords[0].arg, value=new_name) ] + original_right.keywords[1:], ), "handle_assign_binary_dict_left", ) def _handle_assign_binary_dict_right(self) -> Rule: # This rule eliminates explicit call-style dictionary constructions where # there are exactly two arguments and the left value or collection is # an identifier but the right is not. # # Suppose "left" is "n1 = id" or "**id". We rewrite these: # # x = dict(left, n2 = complex) # # or # # x = dict(left, **complex) # # to # # t = complex # x = dict(left, n2 = t) # # or # # t = complex # x = dict(left, **t) return self._make_right_assignment_rule( call( func=name(id="dict"), args=[], keywords=[keyword(value=name()), keyword(value=_not_identifier)], ), lambda original_right: original_right.keywords[1].value, lambda original_right, new_name: ast.Call( func=original_right.func, # Name(id="dict", ctx=Load()), args=original_right.args, # [], keywords=original_right.keywords[:1] + [ast.keyword(arg=original_right.keywords[1].arg, value=new_name)], ), "handle_assign_binary_dict_right", ) def _handle_assign_binop_right(self) -> Rule: # This rule eliminates binary operators where the left hand side is # an identifier but the right is not. We rewrite: # # x = id + anything # # to # # t = complex # x = id + t # return self._make_right_assignment_rule( binop(right=_not_identifier, op=_binops), lambda original_right: original_right.right, lambda original_right, new_name: ast.BinOp( left=original_right.left, op=original_right.op, right=new_name ), "handle_assign_binop_right", ) def _handle_assign_call_function_expression(self) -> Rule: # This rule eliminates function calls where the receiver is not an identifier. # We rewrite: # # x = complex(args) # # to # # t = complex # x = t(args) return self._make_right_assignment_rule( call(func=_not_identifier), lambda original_right: original_right.func, lambda original_right, new_name: ast.Call( func=new_name, args=original_right.args, keywords=original_right.keywords, ), "handle_assign_call_function_expression", ) def _handle_assign_call_single_star_arg(self) -> Rule: # This rule eliminates function calls of the form "id(*complex). # We rewrite: # # x = f(*complex) # # to # # t = complex # x = f(*t) return self._make_right_assignment_rule( call(func=name(), args=[starred(value=_not_identifier)]), lambda original_right: original_right.args[0].value, lambda original_right, new_name: ast.Call( func=original_right.func, args=[ast.Starred(new_name, original_right.args[0].ctx)], keywords=original_right.keywords, ), "handle_assign_call_single_star_arg", "r", ) def _handle_assign_call_single_double_star_arg(self) -> Rule: # This rule eliminates function calls of the form "id(*id, **complex)". # We rewrite: # # x = f(*a, **complex) # # to # # t = complex # x = f(*a, **t) # # Note: In the strategy we have chosen for dealing with keywords # the argument to ** should normally be dict(...). However, # if there is only a single argument of the form above there # is no need for the further checking and the short-cut # transformation is therefore expected to be sound. return self._make_right_assignment_rule( call( func=name(), args=[starred(value=name())], keywords=[keyword(arg=None, value=_not_identifier)], ), lambda original_right: original_right.keywords[0].value, lambda original_right, new_name: ast.Call( func=original_right.func, args=original_right.args, keywords=[ast.keyword(arg=None, value=new_name)], ), "handle_assign_call_single_double_star_arg", "r", ) def _handle_assign_call_two_star_args(self) -> Rule: # This rule eliminates function calls whose argument lists begin with # two starred arguments. We rewrite: # # x = f(*a1, *a2, ...) # # to # # x = f(*(a1 + a2), ...) # # TODO: Ideally, would like to merge [1].ctx with the [0].ctx below return PatternRule( assign( value=call(args=HeadTail(starred(), HeadTail(starred(), anyPattern))) ), lambda source_term: ast.Assign( targets=source_term.targets, value=ast.Call( func=source_term.value.func, args=[ ast.Starred( ast.BinOp( left=source_term.value.args[0].value, op=ast.Add(), right=source_term.value.args[1].value, ), source_term.value.args[0].ctx, ) ] + source_term.value.args[2:], keywords=source_term.value.keywords, ), ), "handle_assign_call_two_star_args", ) def _handle_assign_call_two_double_star_args(self) -> Rule: # This rule eliminates the leftmost pair of double-star arguments from function calls. # Here d1 and d2 are any expressions: # # x = f(a1, a2, ... , **d1, **d2, ...) # # to # # x = f(a1, a2, ..., **(dict(**d1, **d2)), ...) # # Note: Since we are not lifting, no restriction needed on func or args # TODO: Ideally, would like to merge [1].ctx with the [0].ctx below # TODO: The identifier "dict" should be made global unique in target name space return PatternRule( assign( value=call( func=_not_in_allowed_functions, keywords=HeadTail( _keyword_with_no_arg, HeadTail(_keyword_with_no_arg, anyPattern) ), ) ), lambda source_term: ast.Assign( targets=source_term.targets, value=ast.Call( func=source_term.value.func, args=source_term.value.args, keywords=[ ast.keyword( arg=None, value=ast.Call( func=ast.Name(id="dict", ctx=ast.Load()), args=[], keywords=[source_term.value.keywords[0]] + [source_term.value.keywords[1]], ), ) ] + source_term.value.keywords[2:], ), ), "handle_assign_call_two_double_star_args", ) def _handle_assign_call_regular_arg(self) -> Rule: # This rule eliminates the leftmost non-starred argument from # a function argument list. We rewrite: # # x = f(*a1, *a2, anything, ...) # # to # # x = f(*a1, *a2, *[anything], ...) # return PatternRule( assign(value=call(args=_list_not_starred)), lambda source_term: ast.Assign( targets=source_term.targets, value=ast.Call( func=source_term.value.func, args=self._splice_non_starred(source_term.value.args), keywords=source_term.value.keywords, ), ), "_handle_assign_call_regular_arg", ) def _handle_assign_call_keyword_arg(self) -> Rule: # This rule eliminates a named argument from a function call by transforming # it into a double-starred argument; another rule then simplifies the double- # starred argument. We rewrite: # # x = f(... k = anything) # # to # # x = f(**dict(k = anything)) # # TODO: The identifier "dict" should be made global unique in target name space return PatternRule( assign( value=call( func=_not_in_allowed_functions, keywords=_list_with_keyword_with_arg ) ), lambda source_term: ast.Assign( targets=source_term.targets, value=ast.Call( func=source_term.value.func, args=source_term.value.args, keywords=self._splice_non_double_starred( source_term.value.keywords ), ), ), "_handle_assign_call_keyword_arg", ) def _handle_assign_call_empty_regular_arg(self) -> Rule: # This rule eliminates function calls with empty non-named argument lists. # This guarantees that every function call has at least one unnamed argument. # We rewrite: # # x = f(only_named_or_double_starred_args) # # to: # # x = f(*[], only_named_or_double_starred_args) return PatternRule( assign(value=call(func=_not_in_allowed_functions, args=[])), lambda source_term: ast.Assign( targets=source_term.targets, value=ast.Call( func=source_term.value.func, args=[ast.Starred(ast.List([], ast.Load()), ast.Load())], keywords=source_term.value.keywords, ), ), "_handle_assign_call_empty_regular_arg", ) def _handle_assign_call_empty_keyword_arg(self) -> Rule: # This rule eliminates function calls with no ** arguments. That is, # it ensures that every function call has at least one double-starred # argument. We rewrite: # # x = f(no_double_star_arguments) # # to # # x = f(no_double_star_arguments, **{}) # # TODO: The identifier "dict" should be made global unique in target name space return PatternRule( assign(value=call(func=_not_in_allowed_functions, keywords=[])), lambda source_term: ast.Assign( targets=source_term.targets, value=ast.Call( func=source_term.value.func, args=source_term.value.args, keywords=[ ast.keyword(arg=None, value=ast.Dict(keys=[], values=[])) ], ), ), "_handle_assign_call_empty_keyword_arg", ) def _handle_assign_attribute(self) -> Rule: # This rule eliminates attribute lookup ("dot") where the object # of the lookup is not an identifier. We rewrite: # # x = complex.z # # to: # # t = complex # x = t.z # return self._make_right_assignment_rule( attribute(value=_not_identifier), lambda original_right: original_right.value, lambda original_right, new_name: ast.Attribute( value=new_name, attr=original_right.attr, ctx=original_right.ctx ), "handle_assign_attribute", ) def _handle_assign_list(self) -> Rule: # This rule eliminates the leftmost non-identifier from a list. # We rewrite: # # x = [ids, complex, ...] # # to: # # t = complex # x = [ids, t, ...] return PatternRule( assign(value=ast_list(elts=_list_not_identifier)), self._transform_list(), "handle_assign_list", ) def _handle_assign_tuple(self) -> Rule: # This rule eliminates the leftmost non-identifier from a tuple. # We rewrite: # # x = (ids, complex, ...) # # to: # # t = complex # x = (ids, t, ...) return PatternRule( assign(value=ast_list(elts=_list_not_identifier, ast_op=ast.Tuple)), self._transform_list(ast_op=lambda a: ast.Tuple), "handle_assign_tuple", ) def _handle_assign_dictionary_keys(self) -> Rule: # This rule eliminates the leftmost non-identifier dictionary key. # We rewrite: # # x = { complex : anything } # # to: # # t = complex # x = { t : anything } return PatternRule( assign(value=ast_dict(keys=_list_not_identifier)), self._transform_lists(), "handle_assign_dictionary_keys", ) def _handle_assign_dictionary_values(self) -> Rule: # This rule eliminates the leftmost non-identifier dictionary value. # We rewrite: # # x = { anything : complex } # # to: # # t = complex # x = { anything : t } # # TODO: Note that this rule combined with the previous rule # changes the order in which side effects happen. If we have # # x = { a() : b(), c() : d() } # # Then this could be rewritten to: # # t1 = a() # t2 = c() # t3 = b() # t4 = d() # x = { t1 : t3, t2 : t4 } # # Which is not the a(), b(), c(), d() order we expect. # # We might consider fixing these rules so that the leftmost # key-or-value is rewritten, not the leftmost key and then the # leftmost value. However, this is low priority, as it is rare # for there to be a side effect in a dictionary key. return PatternRule( assign(value=ast_dict(values=_list_not_identifier)), self._transform_lists(), "handle_assign_dictionary_values", ) def _nested_ifs_of(self, conditions: List[ast.expr], call: ast.stmt) -> ast.stmt: # Turns a series of conditions into nested ifs if conditions == []: return call else: head, *tail = conditions rest = self._nested_ifs_of(tail, call) return ast.If(test=head, body=[rest], orelse=[]) def _nested_fors_and_ifs_of( self, generators: List[ast.comprehension], call: ast.stmt ) -> ast.stmt: # Turns nested comprehension generators into a nesting for for+if statements # for example [... for i in range(1,2) if odd(i)] into # for i in range(1,2): # if odd(i): # ... if generators == []: return call else: head, *tail = generators rest = self._nested_fors_and_ifs_of(tail, call) return ast.For( target=head.target, iter=head.iter, body=[self._nested_ifs_of(head.ifs, rest)], orelse=[], type_comment=None, ) def _handle_assign_listComp(self) -> Rule: # Rewrite y = [c for v_i in e_i if b_i] into # def p(): # r = [] # for v_i in e_i # if b_i: # r.append(c) # return r # y=p() # # Note that this rewrites both list comprehensions and generator expressions. _empty_ast_arguments = ast.arguments( posonlyargs=[], args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[], ) return PatternRule( assign(value=match_any(ast_generator(), ast_listComp())), lambda term: ListEdit( self.fresh_names( ["p", "r"], lambda new_name: [ ast.FunctionDef( name=new_name("p", "store").id, args=_empty_ast_arguments, body=[ ast.Assign( targets=[new_name("r", "store")], value=ast.List([], ast.Load()), ), self._nested_fors_and_ifs_of( term.value.generators, ast.Expr( ast.Call( func=ast.Attribute( value=new_name("r", "load"), attr="append", ctx=ast.Load(), ), args=[term.value.elt], keywords=[], ) ), ), ast.Return(new_name("r", "load")), ], decorator_list=[], returns=None, type_comment=None, ), ast.Assign( targets=term.targets, value=ast.Call( func=new_name("p", "load"), args=[], keywords=[] ), ), ], ) ), "handle_assign_listComp", ) def _handle_assign_setComp(self) -> Rule: # Rewrite y = {c for v_i in e_i if b_i} into # def p(): # r = set() # for v_i in e_i # if b_i: # r.add(c) # return r # y=p() _empty_ast_arguments = ast.arguments( posonlyargs=[], args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[], ) return PatternRule( assign(value=ast_setComp()), lambda term: ListEdit( self.fresh_names( ["p", "r"], lambda new_name: [ ast.FunctionDef( name=new_name("p", "store").id, args=_empty_ast_arguments, body=[ ast.Assign( targets=[new_name("r", "store")], value=ast.Call( func=ast.Name(id="set", ctx=ast.Load()), args=[], keywords=[], ), ), self._nested_fors_and_ifs_of( term.value.generators, ast.Expr( ast.Call( func=ast.Attribute( value=new_name("r", "load"), attr="add", ctx=ast.Load(), ), args=[term.value.elt], keywords=[], ) ), ), ast.Return(new_name("r", "load")), ], decorator_list=[], returns=None, type_comment=None, ), ast.Assign( targets=term.targets, value=ast.Call( func=new_name("p", "load"), args=[], keywords=[] ), ), ], ) ), "handle_assign_setComp", ) def _handle_assign_dictComp(self) -> Rule: # Rewrite y = {c:d for v_i in e_i if b_i} into # def p(): # r = {} # for v_i in e_i # if b_i: # r.__setitem__(c,d) # return r # y=p() _empty_ast_arguments = ast.arguments( posonlyargs=[], args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[], ) return PatternRule( assign(value=ast_dictComp()), lambda term: ListEdit( self.fresh_names( ["p", "r"], lambda new_name: [ ast.FunctionDef( name=new_name("p", "store").id, args=_empty_ast_arguments, body=[ ast.Assign( targets=[new_name("r", "store")], value=ast.Dict(keys=[], values=[]), ), self._nested_fors_and_ifs_of( term.value.generators, ast.Expr( ast.Call( func=ast.Attribute( value=new_name("r", "load"), attr="__setitem__", ctx=ast.Load(), ), args=[term.value.key, term.value.value], keywords=[], ) ), ), ast.Return(new_name("r", "load")), ], decorator_list=[], returns=None, type_comment=None, ), ast.Assign( targets=term.targets, value=ast.Call( func=new_name("p", "load"), args=[], keywords=[] ), ), ], ) ), "handle_assign_dictComp", ) def _handle_assign(self) -> Rule: return first( [ self._handle_ann_assign(), self._handle_aug_assign_right(), self._handle_aug_assign_left(), self._handle_assign_unaryop(), self._handle_assign_subscript_slice_all(), self._handle_assign_possibly_blocking_right_value(), self._handle_assign_binop_left(), self._handle_assign_binop_right(), self._handle_assign_attribute(), self._handle_assign_list(), self._handle_assign_tuple(), self._handle_assign_dictionary_keys(), self._handle_assign_dictionary_values(), self._handle_assign_ifexp(), # Acceptable rules for handling function calls self._handle_assign_call_function_expression(), # Rules for regular arguments self._handle_assign_call_single_star_arg(), self._handle_assign_call_two_star_args(), self._handle_assign_call_regular_arg(), self._handle_assign_call_empty_regular_arg(), # Rules for keyword arguments self._handle_assign_call_single_double_star_arg(), self._handle_assign_call_two_double_star_args(), self._handle_assign_call_keyword_arg(), self._handle_assign_call_empty_keyword_arg(), # Rules for comprehensions self._handle_assign_listComp(), self._handle_assign_setComp(), self._handle_assign_dictComp(), # Rules for dict (as a special function name) self._handle_assign_unary_dict(), self._handle_assign_binary_dict_left(), self._handle_assign_binary_dict_right(), self._handle_left_value_all(), # Rule to eliminate lambdas self._handle_assign_lambda(), ] ) def _handle_boolop_binarize(self) -> Rule: # This rule eliminates non-binary "and" and "or" operators. # # Boolean operators -- "and" and "or" -- are not necessarily binary operators. # "a and b and c" is parsed as a single "and" operator with three operands! # This rule rewrites "a and b and c and ..." into "(a and b) and c and..." # If the rule is then repeated until it attains a fixpoint we attain the # invariant that every Boolean operator is also a binary operator. return PatternRule( ast_boolop(values=twoPlusList), lambda source_term: ast.BoolOp( op=source_term.op, values=[ ast.BoolOp( source_term.op, [source_term.values[0], source_term.values[1]] ) ] + source_term.values[2:], ), "handle_boolop_binarize", ) def _handle_assign_boolop_linearize(self) -> Rule: # This rule eliminates "and" and "or" operators with two operands where # the left operand is complex. It rewrites: # # x = complex and y # # to # # t = complex # x = t and y # # And similarly for "or". return self._make_right_assignment_rule( ast_boolop(values=[_not_identifier, anyPattern]), lambda original_right: original_right.values[0], lambda original_right, new_name: ast.BoolOp( op=original_right.op, values=[new_name, original_right.values[1]] ), "handle_assign_boolop_linearize", ) def _handle_assign_and2if(self) -> Rule: # This rule entirely eliminates "and" operators with two operands where the # left operand is an identifier. It rewrites: # # x = id and y # # to # # if id: # x = y # else: # x = id return PatternRule( assign(value=ast_boolop(op=ast.And, values=[name(), anyPattern])), lambda source_term: ast.If( test=source_term.value.values[0], body=[ ast.Assign( targets=source_term.targets, value=source_term.value.values[1] ) ], orelse=[ ast.Assign( targets=source_term.targets, value=source_term.value.values[0] ) ], ), "handle_and2if", ) def _handle_assign_or2if(self) -> Rule: # This rule entirely eliminates "or" operators with two operands where the # left operand is an identifier. It rewrites: # # x = id or y # # to # # if id: # x = id # else: # x = y return PatternRule( assign(value=ast_boolop(op=ast.Or, values=[name(), anyPattern])), lambda source_term: ast.If( test=source_term.value.values[0], body=[ ast.Assign( targets=source_term.targets, value=source_term.value.values[0] ) ], orelse=[ ast.Assign( targets=source_term.targets, value=source_term.value.values[1] ) ], ), "handle_or2if", ) def _handle_boolop_all(self) -> Rule: # This rule eliminates all "and" and "or" operators from the program. return first( [ self._handle_boolop_binarize(), self._handle_assign_boolop_linearize(), self._handle_assign_and2if(), self._handle_assign_or2if(), ] ) def _handle_compare_binarize(self) -> Rule: # This rule eliminates non-binary comparison operators where the *second* # leftmost operand is an identifier. This could use some explanation. # # In Python the comparison operators are not necessarily binary operators. # An expression of the form # # x = a < b > c # # is equivalent to # # x = a < b and b > c # # Except that b is evaluated *only once*. We therefore must ensure that # "b" in this case has no side effects before we can do this rewrite. We # rewrite: # # x = anything OP id OP anything ... # # to # # x = (anything OP id) and (id OP anything ...) # return PatternRule( ast_compare( ops=HeadTail(anyPattern, HeadTail(anyPattern, anyPattern)), comparators=HeadTail(name(), anyPattern), ), lambda source_term: ast.BoolOp( op=ast.And(), values=[ ast.Compare( left=source_term.left, ops=[source_term.ops[0]], comparators=[source_term.comparators[0]], ), ast.Compare( left=source_term.comparators[0], ops=source_term.ops[1:], comparators=source_term.comparators[1:], ), ], ), "handle_compare_binarize", ) def _handle_assign_compare_lefthandside(self) -> Rule: # This rule eliminates comparison operations where the leftmost operand # is not an identifier, regardless of how many operands and operators # there are in the operation. It rewrites: # # x = complex OP anything ... # # to # # t = complex # x = t OP anything ... return self._make_right_assignment_rule( ast_compare(left=_not_identifier), lambda original_right: original_right.left, lambda original_right, new_name: ast.Compare( left=new_name, ops=original_right.ops, comparators=original_right.comparators, ), "handle_assign_compare_lefthandside", ) def _handle_assign_compare_righthandside(self) -> Rule: # This rule eliminates comparison operations where the leftmost operand # is an identifier and the second-from-the-leftmost operand is not an # identifier, regardless of how many operands and operators there are in # the operation. It rewrites: # # x = id OP complex ... # # to # # t = complex # x = id OP t ... # return self._make_right_assignment_rule( ast_compare(left=name(), comparators=HeadTail(_not_identifier, anyPattern)), lambda original_right: original_right.comparators[0], lambda original_right, new_name: ast.Compare( left=original_right.left, ops=original_right.ops, comparators=[new_name] + original_right.comparators[1:], ), "handle_assign_compare_righthandside", ) def _handle_compare_all(self) -> Rule: # This rule simplifies the left two operands of a comparison # operations to be both identifiers, and breaks up non-binary # comparison operations by introducing an "and". # # Since we have other rules which eventually eliminate "and" expressions # entirely, repeated application of these rules will reach a fixpoint where # every comparison is a binary operator containing only identifiers. # # For example, the combination of this rule and the Boolean operator rules # when executed until a fixpoint is reached would rewrite: # # x = (a + b) < (c + d) < (e + f) # # to: # # t1 = a + b # t2 = c + d # t3 = t1 < t2 # if t3: # t4 = e + f # x = t2 < t4 # else: # x = t3 # # which has the same semantics. Note that if (a + b) < (c + d) is false # then e + f is never computed. return first( [ self._handle_compare_binarize(), self._handle_assign_compare_righthandside(), self._handle_assign_compare_lefthandside(), ] ) # TODO: We need to find a good way to order things in this file def _handle_left_value_all(self) -> Rule: """Put the left_value of an assignment in SSA form""" return first( [ self._handle_left_value_attributeref(), self._handle_left_value_subscript_value(), self._handle_left_value_subscript_slice_index(), self._handle_left_value_subscript_slice_lower(), self._handle_left_value_subscript_slice_upper(), self._handle_left_value_subscript_slice_step(), self._handle_left_value_list_star(), self._handle_left_value_list_list(), self._handle_left_value_list_not_starred(), self._handle_left_value_list_starred(), ] ) def _make_left_assignment_rule( self, target_original: Pattern, extract_expr: Callable[[ast.AST], ast.expr], target_new: Callable[[ast.AST, ast.AST], ast.AST], rule_name: str, ) -> Rule: # This helper method produces rules that handle rewrites on the left # side of an assignment. Suppose for instance we are trying to rewrite # "complex.attrib = id" ==> "temp = complex ; temp.attrib = id". # # * target_original is the pattern to match on the left side of the # assignment, "complex.attrib". # * extract_expr is a lambda which takes the left side and extracts the # portion to be assigned to the temporary: the function "complex.attrib" ==> "complex" # * target_new is a lambda which takes the left side and the temporary, and returns # the left side of the new assignment: the function # ("complex.attrib", "temp") ==> "temp.attrib" # * rule_name is the name of the rule, for debugging purposes. name_prefix = "x" return PatternRule( assign(targets=[target_original], value=name()), self._transform_with_name( name_prefix, lambda source_term: extract_expr(source_term.targets[0]), lambda source_term, new_name: ast.Assign( targets=[target_new(source_term.targets[0], new_name)], value=source_term.value, ), ), rule_name, ) def _make_left_aug_assignment_rule( self, target_original: Pattern, extract_expr: Callable[[ast.AST], ast.expr], target_new: Callable[[ast.AST, ast.AST], ast.AST], rule_name: str, ) -> Rule: # This helper does the same as _make_left_assignment_rule, except: # * it works for augmented assignments, not normal assignments # * it does not require that the right side be an identifier # (Non-identifier right sides are rewritten later.) name_prefix = "a" return PatternRule( aug_assign(target=target_original), self._transform_with_name( name_prefix, lambda source_term: extract_expr(source_term.target), lambda source_term, new_name: ast.AugAssign( target=target_new(source_term.target, new_name), op=source_term.op, value=source_term.value, ), ), rule_name, ) def _make_left_any_assignment_rule( self, target_original: Pattern, extract_expr: Callable[[ast.AST], ast.expr], target_new: Callable[[ast.AST, ast.AST], ast.AST], rule_name: str, ) -> Rule: # This helper does the same as _make_left_assignment_rule, but for # both regular and augmented assignments. return first( [ self._make_left_assignment_rule( target_original, extract_expr, target_new, rule_name ), self._make_left_aug_assignment_rule( target_original, extract_expr, target_new, rule_name ), ] ) def _handle_left_value_attributeref(self) -> Rule: """Rewrites like complex.attrib = id → temp = complex; temp.attrib = id""" return self._make_left_any_assignment_rule( attribute(value=_not_identifier), # complex.attrib lambda original_left: original_left.value, # complex.attrib ==> complex lambda original_left, new_name: ast.Attribute( value=new_name, attr=original_left.attr, ctx=ast.Store(), ), # (complex.attrib, temp) ==> temp.attrib "handle_left_value_attributeref", ) def _handle_left_value_subscript_value(self) -> Rule: """Rewrites like a.b[c] = z → x = a.b; x[c] = z""" return self._make_left_any_assignment_rule( subscript(value=_not_identifier), lambda original_left: original_left.value, lambda original_left, new_name: ast.Subscript( value=new_name, slice=original_left.slice, ctx=ast.Store(), ), "_handle_left_value_subscript_value", ) def _handle_left_value_subscript_slice_index(self) -> Rule: """Rewrites like a[b.c] = z → x = b.c; a[x] = z""" return self._make_left_any_assignment_rule( subscript(value=name(), slice=index(value=_not_identifier)), lambda original_left: get_value(original_left.slice), lambda original_left, new_name: ast.Subscript( value=original_left.value, slice=ast_index( value=new_name, ctx=ast.Load(), ), ctx=ast.Store(), ), "_handle_left_value_subscript_slice_index", ) def _handle_left_value_subscript_slice_lower(self) -> Rule: """Rewrites like a[b.c:] = z → x = b.c; a[x:] = z.""" return self._make_left_any_assignment_rule( subscript(value=name(), slice=slice_pattern(lower=_neither_name_nor_none)), lambda original_left: original_left.slice.lower, lambda original_left, new_name: ast.Subscript( value=original_left.value, slice=ast.Slice( lower=new_name, upper=original_left.slice.upper, step=original_left.slice.step, ), ctx=ast.Store(), ), "_handle_left_value_subscript_slice_lower", ) def _handle_left_value_subscript_slice_upper(self) -> Rule: """Rewrites like a[:b.c] = z → x = b.c; a[:x] = z.""" return self._make_left_any_assignment_rule( subscript( value=name(), slice=slice_pattern(lower=_name_or_none, upper=_neither_name_nor_none), ), lambda original_left: original_left.slice.upper, lambda original_left, new_name: ast.Subscript( value=original_left.value, slice=ast.Slice( lower=original_left.slice.lower, upper=new_name, step=original_left.slice.step, ), ctx=ast.Store(), ), "_handle_left_value_subscript_slice_upper", ) def _handle_left_value_subscript_slice_step(self) -> Rule: """Rewrites like a[:c:d.e] = z → x = c.d; a[b:c:x] = z.""" return self._make_left_any_assignment_rule( subscript( value=name(), slice=slice_pattern( lower=_name_or_none, upper=_name_or_none, step=_neither_name_nor_none, ), ), lambda original_left: original_left.slice.step, lambda original_left, new_name: ast.Subscript( value=original_left.value, slice=ast.Slice( lower=original_left.slice.lower, upper=original_left.slice.upper, step=new_name, ), ctx=ast.Store(), ), "_handle_left_value_subscript_slice_step", ) def _handle_left_value_list_star(self) -> Rule: """Rewrites like [*a.b] = z → [*y] = z; a.b = y.""" # Note: This type of rewrite should not be "generalized" to # have anything come after *a.b because that would change order # of evaluation within the pattern. return PatternRule( assign( targets=[ast_luple(elts=[starred(value=_not_identifier)])], value=name(), ), self._transform_with_name( "x", lambda source_term: source_term.value, lambda source_term, new_name: ast.Assign( targets=[source_term.targets[0].elts[0].value], value=new_name, ), # pyre-fixme[6]: Expected `(AST, AST) -> List[_ast.expr]` for 4th # param but got `(source_term: Any, new_name: Any) -> # List[_ast.List]`. lambda source_term, new_name: [ ast.List(elts=[ast.Starred(value=new_name)]) ], ), "_handle_left_value_list_star", ) def _handle_left_value_list_list(self) -> Rule: """Rewrites like [[e]] = z → [y] = z; [e] = y.""" # Note: This type of rewrite should not be "generalized" to # have anything come after [e] because that would change order # of evaluation within the pattern. return PatternRule( assign( targets=[ast_luple(elts=[ast_luple(elts=[anyPattern])])], value=name(), ), self._transform_with_name( "x", lambda source_term: source_term.value, lambda source_term, new_name: ast.Assign( targets=[ast.List(elts=[source_term.targets[0].elts[0].elts[0]])], value=new_name, ), # pyre-fixme[6]: Expected `(AST, AST) -> List[_ast.expr]` for 4th # param but got `(source_term: Any, new_name: Any) -> # List[_ast.List]`. lambda source_term, new_name: [ast.List(elts=[new_name])], ), "_handle_left_value_list_list_star", ) def _handle_left_value_list_not_starred(self) -> Rule: """Rewrites like [a.b.c, d] = z → a.b.c = z[0]; [d] = z[1:].""" # Here we are handling lists with more than one element. return PatternRule( assign( targets=[ ast_luple( elts=HeadTail(_not_starred, HeadTail(anyPattern, anyPattern)) ) ], value=name(), ), lambda source_term: ListEdit( [ ast.Assign( targets=[source_term.targets[0].elts[0]], value=ast.Subscript( value=source_term.value, slice=ast_index(value=ast.Num(n=0)), ctx=ast.Load(), ), ), ast.Assign( targets=[ ast.List( elts=source_term.targets[0].elts[1:], ctx=ast.Store() ) ], value=ast.Subscript( value=source_term.value, slice=ast.Slice(lower=ast.Num(n=1), upper=None, step=None), ctx=ast.Load(), ), ), ], ), "_handle_left_value_list_not_starred", ) def _handle_left_value_list_starred(self) -> Rule: """Rewrites like [*c, d] = z → [*c] = z[:-1]; d = z[-1].""" # Here we are handling lists with more than one element. return PatternRule( assign( targets=[ ast_luple( elts=HeadTail(starred(), HeadTail(anyPattern, anyPattern)) ) ], value=name(), ), lambda source_term: ListEdit( [ ast.Assign( targets=[ ast.List( elts=source_term.targets[0].elts[:-1], ctx=ast.Store() ) ], value=ast.Subscript( value=source_term.value, slice=ast.Slice(lower=None, upper=ast.Num(n=-1), step=None), ctx=ast.Load(), ), ), ast.Assign( targets=[source_term.targets[0].elts[-1]], value=ast.Subscript( value=source_term.value, slice=ast_index(value=ast.Num(n=-1)), ctx=ast.Load(), ), ), ], ), "_handle_left_value_list_starred", ) def _handle_assign_possibly_blocking_right_value(self) -> Rule: """Rewrite e1 = e2 → x = e2; e1 = x, as long as e1 and e2 are not names.""" return PatternRule( assign(targets=[_not_identifier], value=_not_identifier), self._transform_with_name( "a", lambda source_term: source_term.value, lambda source_term, new_name: ast.Assign( targets=source_term.targets, value=new_name, ), ), "_handle_assign_possibly_blocking_right_value", ) def single_assignment(self, node: ast.AST) -> ast.AST: return self._rules(node).expect_success() def single_assignment(node: ast.AST) -> ast.AST: s = SingleAssignment() return s.single_assignment(node)
beanmachine-main
src/beanmachine/ppl/compiler/single_assignment.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # TODO: Update this comment """A builder for the BeanMachine Graph language The Beanstalk compiler has, at a high level, five phases. * First, it transforms a Python model into a semantically equivalent program "single assignment" (SA) form that uses only a small subset of Python features. * Second, it transforms that program into a "lifted" form. Portions of the program which do not involve samples are executed normally, but any computation that involves a stochastic node in any way is instead turned into a graph node. Jargon note: Every graph of a model will have some nodes that represent random samples and some which do not. For instance, we might have a simple coin flip model with three nodes: a sample, a distribution, and a constant probability: def flip(): return Bernoulli(0.5) sample --> Bernoulli --> 0.5 We'll refer to the nodes which somehow involve a sample, either directly or indirectly, as "stochastic" nodes. * Third, we actually execute the lifted program and accumulate the graph. * Fourth, the accumulated graph tracks the type information of the original Python program. We mutate the accumulated graph into a form where it obeys the rules of the BMG type system. * Fifth, we either create actual BMG nodes in memory via native code interop, or we emit a program in Python or C++ which does so. This module implements the graph builder that is called during execution of the lifted program; it implements phases three, four and five. """ import inspect from types import MethodType from typing import Any, Callable, Dict, List, Optional, Set, Tuple import beanmachine.ppl.compiler.bmg_nodes as bn import beanmachine.ppl.compiler.profiler as prof import torch from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.bmg_nodes import BMGNode from beanmachine.ppl.compiler.execution_context import ExecutionContext, FunctionCall from beanmachine.ppl.compiler.special_function_caller import ( canonicalize_function, SpecialFunctionCaller, ) from beanmachine.ppl.compiler.support import ( _limit as max_possibilities, ComputeSupport, Infinite, TooBig, Unknown, ) from beanmachine.ppl.inference.utils import _verify_queries_and_observations from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.utils.memoize import MemoizationKey def _has_ordinary_value(x: Any) -> bool: return not isinstance(x, bn.BMGNode) or isinstance(x, bn.ConstantNode) def _get_ordinary_value(x: Any) -> Any: return x.value if isinstance(x, bn.ConstantNode) else x builtin_function_or_method = type(abs) def _is_random_variable_call(f) -> bool: return hasattr(f, "is_random_variable") and f.is_random_variable def _is_functional_call(f) -> bool: return hasattr(f, "is_functional") and f.is_functional def _has_source_code(function: Callable) -> bool: try: inspect.getsource(function) except Exception: return False return True class BMGRuntime: _bmg: BMGraphBuilder # As we construct the graph we may encounter "random variable" values; these # refer to a function that we need to transform into the "lifted" form. This # map tracks those so that we do not repeat work. However, RVIDs contain a # tuple of arguments which might contain tensors, and tensors are hashed by # reference, not by value. We therefore construct a map of RVID-equivalents # which is hashable by the values of the arguments. rv_map: Dict[MemoizationKey, BMGNode] lifted_map: Dict[Callable, Callable] # The graph we accumulate must be acyclic. We assume that an RVID-returning # function is pure, so if at any time such a function calls itself, either # it is impure or it is in an infinite recursion; either way, we will not # be able to construct a correct graph. When we are calling the lifted # form of a functional or random_variable method we track the RVID that # was used to trigger the call; if we ever encounter a call with the same # RVID while the lifted execution is "in flight", we throw an exception # and stop accumulating the graph. in_flight: Set[MemoizationKey] _pd: Optional[prof.ProfilerData] _special_function_caller: SpecialFunctionCaller _context: ExecutionContext def __init__(self) -> None: self._context = ExecutionContext() self._bmg = BMGraphBuilder(self._context) self._pd = None self.rv_map = {} self.lifted_map = {} self.in_flight = set() self._special_function_caller = SpecialFunctionCaller(self._bmg) def _begin(self, s: str) -> None: pd = self._pd if pd is not None: pd.begin(s) def _finish(self, s: str) -> None: pd = self._pd if pd is not None: pd.finish(s) def _record_node_call(self, node: BMGNode) -> None: self._context.record_node_call(node) def _record_node_rv(self, node: BMGNode, rv: RVIdentifier) -> None: call = FunctionCall(rv.function, rv.arguments, {}) self._context.record_node_call(node, call) # # Operators # def _possibly_stochastic_op( self, normal_op: Callable, stochastic_op: Callable, values: List[Any] ) -> Any: # We have a bunch of values that are being passed to a function. # If all the values are ordinary (non-stochastic) then just call # the normal function that takes ordinary values. Otherwise, # convert all the non-nodes to nodes and call the node constructor. # TODO: This logic is duplicated with SpecialFunctionCaller; move # the operator handling into there as well. if all(_has_ordinary_value(v) for v in values): return normal_op(*(_get_ordinary_value(v) for v in values)) node = stochastic_op( *( v if isinstance(v, bn.BMGNode) else self._bmg.add_constant(v) for v in values ) ) self._record_node_call(node) return node def handle_not_in(self, input: Any, other: Any) -> Any: # Unfortunately there is no operator function equivalent of # "not in" so we can't leverage the special function caller here. return self._possibly_stochastic_op( lambda x, y: x not in y, self._bmg.add_not_in, [input, other] ) def _is_stochastic_tuple(self, t: Any): # A stochastic tuple is any tuple where any element is either a graph node # or a stochastic tuple. if not isinstance(t, tuple): return False for item in t: if isinstance(item, BMGNode): return True if self._is_stochastic_tuple(item): return True return False def _handle_tuple_index(self, left: Any, right: Tuple[Any]) -> Any: # We either have a tensor on the left and a stochastic tuple on the # right, or a graph node on the left and a tuple, stochastic or not, # on the right. Either way, we decompose it into multiple index # operations. The rules we're using are: # # * Indexing with an empty tuple is an identity # * Indexing with a single-element tuple just uses the element (see below!) # * If the tuple has multiple elements, break it up into a head element and # a tail tuple. Index with the head, and then index that with the tail. # # TODO: Unfortunately, the second rule does not match the actual behavior of # pytorch. Suppose we have: # # t = tensor([[10, 20], [30, 40]]) # # What is t[(1, 1)] ? # # By our proposed transformation this becomes t[1][(1,)] by the third rule, and then # t[1][1] by the second rule. This is correct, so what's the problem? The problem is, # what is t[((1, 1),)]? # # By our second rule, t[((1, 1),)] becomes t[(1, 1)]; now we are in the # same case as before and end up with tensor(40). But that's not what torch # produces if you run this code! It produces tensor([[30, 40], [30, 40]]). # # We will come back to this point later and consider how to better represent # this kind of indexing operation in the graph; for now we'll just implement # the simplified approximation: # some_tensor[()] is an identity. if len(right) == 0: assert isinstance(left, BMGNode) return left # some_tensor[(x,)] is the same as some_tensor[x] if len(right) == 1: return self.handle_index(left, right[0]) # some_tensor[(head, ...tail...)] is the same as some_tensor[head][...tail...] h = self.handle_index(left, right[0]) return self.handle_index(h, right[1:]) def handle_index(self, left: Any, right: Any) -> Any: if isinstance(left, BMGNode) and isinstance(right, tuple): return self._handle_tuple_index(left, right) if isinstance(left, torch.Tensor) and self._is_stochastic_tuple(right): return self._handle_tuple_index(left, right) # TODO: What if we have a non-tensor indexed with a stochastic value? # A list, for example? return self._possibly_stochastic_op( lambda x, y: x[y], self._bmg.add_index, [left, right] ) def handle_slice(self, left: Any, lower: Any, upper: Any, step: Any) -> Any: if ( isinstance(left, BMGNode) or isinstance(lower, BMGNode) or isinstance(upper, BMGNode) or isinstance(step, BMGNode) ): raise ValueError("Stochastic slices are not yet implemented.") return left[lower:upper:step] # # Control flow # def handle_for(self, iter: Any) -> None: if isinstance(iter, BMGNode): # TODO: Better error raise ValueError("Stochastic control flows are not yet implemented.") def handle_if(self, test: Any) -> None: if isinstance(test, BMGNode): # TODO: Better error raise ValueError("Stochastic control flows are not yet implemented.") # # Function calls # def _handle_random_variable_call_checked( self, function: Any, arguments: List[Any], cs: ComputeSupport ) -> BMGNode: assert isinstance(arguments, list) # Identify the index of the leftmost graph node argument: index = next( (i for i, arg in enumerate(arguments) if isinstance(arg, BMGNode)), -1 ) if index == -1: # There were no graph node arguments. Just make an ordinary # function call rv = function(*arguments) assert isinstance(rv, RVIdentifier) return self._rv_to_node(rv) # We have an RV call where one or more arguments are graph nodes; # each graph node has finite support and the estimate of the number # of combinations we have to try is small. # Replace the given argument with all possible values and recurse. # # TODO: Note that we only memoize calls to RVs when the arguments # contain no graph nodes. Is this acceptable? We could save some # work if we also memoized calls of the form "rv1(rv2())". Right now # we would recompute the support of rv2() on the second such call, # and only get the savings of skipping the method calls on each # individual call. Do some performance testing. replaced_arg = arguments[index] switch_inputs = [replaced_arg] for new_arg in cs[replaced_arg]: key = self._bmg.add_constant(new_arg) new_arguments = list(arguments) new_arguments[index] = new_arg value = self._handle_random_variable_call_checked( function, new_arguments, cs ) switch_inputs.append(key) switch_inputs.append(value) node = self._bmg.add_switch(*switch_inputs) self._record_node_call(node) return node def _handle_random_variable_call( self, function: Any, arguments: List[Any], kwargs: Dict[str, Any] ) -> BMGNode: if len(kwargs) != 0: # TODO: Better error raise ValueError( "Random variable function calls must not have named arguments." ) cs = ComputeSupport() # If we have one or more graph nodes as arguments to an RV function call # then we need to try every possible value for those arguments. We require # that there be a finite number of possibilities, and that the total number # of branches generated for this call is small. Check that *once* before # recursively processing the call one argument at a time. # First let's see if any are not yet implemented. for arg in arguments: if isinstance(arg, BMGNode) and cs[arg] is Unknown: # TODO: Better exception raise ValueError( f"Stochastic control flow not implemented for {str(arg)}." ) # Are any infinite? for arg in arguments: if isinstance(arg, BMGNode) and cs[arg] is Infinite: # TODO: Better exception raise ValueError("Stochastic control flow must have finite support.") # Are any finite but too large? for arg in arguments: if isinstance(arg, BMGNode) and cs[arg] is TooBig: # TODO: Better exception raise ValueError("Stochastic control flow is too complex.") # Every argument has known, finite, small support. How many combinations are there? # TODO: Note that this can be a considerable overestimate. For example, if we # have outer(inner(), inner(), inner()) and the support of inner has 100 elements, # then there are 100 possible code paths to trace through outer, but we assume there # are 1000000. Is there anything we can do about that? # TODO: Make max_possibilities a global tweakable setting of the accumulator. possibilities = 1 for arg in arguments: if isinstance(arg, BMGNode): possibilities *= len(cs[arg]) if possibilities > max_possibilities: # TODO: Better exception raise ValueError("Stochastic control flow is too complex.") return self._handle_random_variable_call_checked(function, arguments, cs) def _handle_functional_call( self, function: Any, arguments: List[Any], kwargs: Dict[str, Any] ) -> BMGNode: if len(kwargs) != 0: # TODO: Better error raise ValueError("Functional calls must not have named arguments.") # We have a call to a functional function. There are two # cases. Either we have only ordinary values for arguments, or # we have one or more graph nodes. *Do we need to handle these # two cases differently?* # # If the arguments are just plain arguments then we can call the # function normally, obtain an RVID back, and then use our usual # mechanism for turning an RVID into a graph node. # # What if the arguments are graph nodes? We can just do the same! # The callee will immediately return an RVID capturing the values # of the graph nodes. We then check to see if this exact call # has happened already; if it has, then we use the cached graph # node from our RVID->node cache. If it has not, we call the lifted # version of the method with the graph node arguments taken from # the RVID, and add the resulting graph node to the cache. # # Since this is a functional, not a random_variable, there is no # stochastic control flow to handle; we just pass the graph nodes in # as values and let the lifted method handle them. # # We lose nothing by doing this and we gain memoization that allows # us to skip doing the call if we have done it before. That's a win. rv = function(*arguments) assert isinstance(rv, RVIdentifier) return self._rv_to_node(rv) def _handle_ordinary_call( self, function: Callable, arguments: List[Any], kwargs: Dict[str, Any] ) -> Any: if not isinstance(function, Callable): raise TypeError( f"_handle_ordinary_call requires Callable but got {type(function)}" ) # We have an ordinary function call to a function that is not on # our list of special functions, and is not a functional, and # is not a random variable. We still need to lift the function # even if its arguments are not graph nodes though! It might do # arithmetic on a random variable even though it is not a functional. # For example, we might have something like: # # @random_variable # def norm1(): # return Normal(0, 1) # # # not a functional # def add_one(): # We call this function with no arguments # return norm1() + 1 # # @random_variable # def norm2(): # return Normal(add_one(), 1) # # Ideally we would like add_one to be marked as functional, but # given that it is not, we need to detect the call to add_one() # as returning a graph node that represents the sum of a sample # and a constant. # It is not already compiled; if we have source code, compile it. # # NOTE: Suppose we have a call to a function which is nested inside # a function that has already been compiled. Illustrative example: # # @rv def norm(): # def my_sum(x, y): # return x + y # return Normal(my_sum(mean(), offset()), 1.0) # # When we compile norm() we will *also* compile my_sum. When we then # call my_sum, we do *not* want to compile it *again*. It is already # in the form "bmg.add_addition(x, y)" and so on; we do not want to # compile that program. # # Fortunately we do not need to even check because the generated code # has no source code! The inspect module does not believe that the code # generated from the AST has any source code, so _has_source_code returns # false, and we call the compiled function exactly as we should. if _has_source_code(function): rewritten_function = self._function_to_bmg_function(function) return self._context.call(rewritten_function, arguments, kwargs) # It is not compiled and we have no source code to compile. # Just call it and hope for the best. # TODO: Do we need to consider the scenario where we do not have # source code, we call a function, and it somehow returns an RVID? # We *could* convert that to a graph node. return self._context.call(function, arguments, kwargs) def handle_function( self, function: Any, arguments: List[Any], kwargs: Optional[Dict[str, Any]] = None, ) -> Any: if kwargs is None: kwargs = {} # Some functions we already have special-purpose handlers for, # like calls to math.exp or tensor.log. if self._special_function_caller.is_special_function( function, arguments, kwargs ): result = self._special_function_caller.do_special_call_maybe_stochastic( function, arguments, kwargs ) if isinstance(result, BMGNode): self._record_node_call(result) return result f, args = canonicalize_function(function, arguments) if _is_random_variable_call(f): return self._handle_random_variable_call(f, args, kwargs) if _is_functional_call(f): return self._handle_functional_call(f, args, kwargs) return self._handle_ordinary_call(f, args, kwargs) def _function_to_bmg_function(self, function: Callable) -> Callable: from beanmachine.ppl.compiler.bm_to_bmg import _bm_function_to_bmg_function # This method presupposes that the function is in its "unbound" form. assert not isinstance(function, MethodType) if function not in self.lifted_map: self.lifted_map[function] = _bm_function_to_bmg_function(function, self) return self.lifted_map[function] def _rv_to_node(self, rv: RVIdentifier) -> BMGNode: key = MemoizationKey(rv.wrapper, rv.arguments) if key not in self.rv_map: if key in self.in_flight: # TODO: Better error message raise RecursionError() self.in_flight.add(key) try: # Under what circumstances does a random variable NOT have source code? # When it is nested inside another rv that has already been compiled! # See the note in _handle_ordinary_call for details. if _has_source_code(rv.function): rewritten_function = self._function_to_bmg_function(rv.function) else: rewritten_function = rv.function # Here we deal with an issue caused by how Python produces the source # code of a function. # # We started with a function that produced a random variable when # called, and then we made a transformation based on the *source code* # of that original function. The *source code* of that original function # might OR might not have been decorated with a random_variable or # functional decorator. For example, if we have: # # @random_variable # def foo(): # return Normal(0., 1.) # # and we have a query on foo() then that is the exact code that # we rewrite, and therefore the rewritten function that comes back # is *also* run through the random_variable decorator. But if instead # we have # # def foo(): # return Normal(0., 1.) # # bar = random_variable(foo) # # and a query on bar(), then when we ask Python for the source code of # bar, it hands us back the *undecorated* source code for foo, and # therefore the rewriter produces an undecorated rewritten function. # # How can we tell which situation we're in? Well, if we're in the first # situation then when we call the rewritten function, we'll get back a # RVID, and if we're in the second situation, we will not. value = self._context.call(rewritten_function, rv.arguments) if isinstance(value, RVIdentifier): # We have a rewritten function with a decorator already applied. # Therefore the rewritten form of the *undecorated* function is # stored in the rv. Call *that* function with the given arguments. value = self._context.call(value.function, rv.arguments) # We now have the value returned by the undecorated random variable # regardless of whether the source code was decorated or not. # If we are calling a random_variable then we must have gotten # back a distribution. This is the first time we have called this # rv with these arguments -- because we had a cache miss -- and # therefore we should generate a new sample node. If by contrast # we are calling a functional then we check below that we got # back either a graph node or a tensor that we can make into a constant. if rv.is_random_variable: value = self._handle_sample(rv, value) finally: self.in_flight.remove(key) if isinstance(value, torch.Tensor): value = self._bmg.add_constant_tensor(value) if not isinstance(value, BMGNode): # TODO: Improve error message raise TypeError("A functional must return a tensor.") self.rv_map[key] = value return value return self.rv_map[key] def _handle_sample(self, rv: RVIdentifier, operand: Any) -> bn.SampleNode: # noqa """As we execute the lifted program, this method is called every time a model function decorated with @bm.random_variable returns; we verify that the returned value is a distribution that we know how to accumulate into the graph, and add a sample node to the graph.""" if isinstance(operand, bn.DistributionNode): sample = self._bmg.add_sample(operand) self._record_node_rv(sample, rv) return sample if not isinstance(operand, torch.distributions.Distribution): # TODO: Better error raise TypeError("A random_variable is required to return a distribution.") d = self._special_function_caller.distribution_to_node(operand) sample = self._bmg.add_sample(d) self._record_node_rv(d, rv) self._record_node_rv(sample, rv) return sample def handle_dot_get(self, operand: Any, name: str) -> Any: # If we have x = foo.bar, foo must not be a sample; we have no way of # representing the "get the value of an attribute" operation in BMG. # However, suppose foo is a distribution of tensors; we do wish to support # operations such as: # x = foo.exp # y = x() # and have y be a graph that applies an EXP node to the SAMPLE node for foo. # This will require some cooperation between handling dots and handling # functions. if isinstance(operand, BMGNode): # If we're invoking a function on a graph node during execution of # the lifted program, that graph node is almost certainly a tensor # in the original program; assume that it is, and see if this is # a function on a tensor that we know how to accumulate into the graph. return self._special_function_caller.bind_torch_instance_function( operand, name ) return getattr(operand, name) def handle_dot_set(self, operand: Any, name: str, value: Any) -> None: # If we have foo.bar = x, foo must not be a sample; we have no way of # representing the "set the value of an attribute" operation in BMG. if isinstance(operand, BMGNode): raise ValueError( f"Setting the value of attribute {name} is not " + "supported in Bean Machine Graph." ) setattr(operand, name, value) def handle_subscript_assign( self, target: Any, index: Any, stop: Any, step: Any, value: Any ) -> None: # If we have "target[index:stop:step] = value" (any of index, stop or step # can be missing or None) then: # * Target must not be a graph node; there are no mutable graph nodes. # * Index, stop and step must not be a graph node; we do not have the ability # to compile stochastic mutations of other tensors. # * If target is a tensor then value must not be a graph node. We cannot # mutate an existing tensor with a stochastic value. if isinstance(target, BMGNode): # TODO: Better error raise ValueError( "Mutating a stochastic value is not supported in Bean Machine Graph." ) if isinstance(index, BMGNode): # TODO: Better error raise ValueError( "Mutating a collection or tensor with a stochastic index is not " + "supported in Bean Machine Graph." ) if isinstance(stop, BMGNode): # TODO: Better error raise ValueError( "Mutating a collection or tensor with a stochastic upper index is not " + "supported in Bean Machine Graph." ) if isinstance(step, BMGNode): # TODO: Better error raise ValueError( "Mutating a collection or tensor with a stochastic step is not " + "supported in Bean Machine Graph." ) if isinstance(value, BMGNode) and isinstance(target, torch.Tensor): raise ValueError( "Mutating a tensor with a stochastic value is not " + "supported in Bean Machine Graph." ) target[index] = value def accumulate_graph( self, queries: List[RVIdentifier], observations: Dict[RVIdentifier, Any], ) -> BMGraphBuilder: _verify_queries_and_observations(queries, observations, True) self._bmg._pd = self._pd self._begin(prof.accumulate) for rv, val in observations.items(): node = self._rv_to_node(rv) assert isinstance(node, bn.SampleNode) self._bmg.add_observation(node, val) for qrv in queries: node = self._rv_to_node(qrv) self._bmg.add_query(node, qrv) self._finish(prof.accumulate) return self._bmg
beanmachine-main
src/beanmachine/ppl/compiler/runtime.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import typing from typing import Callable, List from torch import Size def identity_fnc(a: List[int]) -> List[int]: return a def _create_input_list_from_target_list( product_list: List[int], input_project_size: List[int] ) -> Callable[[List[int]], int]: # given a coordinate index of target, compute a global index of input def input_list_from_target_list(target_list: List[int]) -> int: i = 0 j = len(product_list) - 1 index = 0 for inx in target_list: if input_project_size[i] == 1: i = i + 1 j = j - 1 continue else: next = inx * product_list[j] index = index + next j = j - 1 i = i + 1 return index return input_list_from_target_list def _create_target_index_to_composite( target_size: Size, group_size: List[int] ) -> Callable[[int], List]: # given a global index, produce a coordinate def target_index_to_composite(ti: int) -> List: index_list = [] current_index = ti j = len(target_size) - 1 for _ in target_size: next_index = math.floor(current_index / group_size[j]) index_list.append(next_index) current_index = current_index % group_size[j] j = j - 1 return index_list return target_index_to_composite def _normalize_size(input_size: Size, target_size: Size) -> List[int]: # Make the input size length equal to target size by buffering with 1's input_project_size = [] ones_to_add = len(target_size) - len(input_size) for _ in range(0, ones_to_add): input_project_size.append(1) for dim in input_size: input_project_size.append(dim) return input_project_size def broadcast_fnc(input_size: Size, target_size: Size) -> typing.Optional[Callable]: if input_size == target_size: return identity_fnc input_project_size = _normalize_size(input_size, target_size) assert len(input_project_size) == len(target_size) # the input can be broadcast to the target if # input_dim[i] == target_dim[i] || input_dim[i] == 1 for all i for i in range(0, len(target_size)): if input_project_size[i] != 1 and target_size[i] != input_project_size[i]: return None # in order to map from a composite index to a coordinate index we # need to know how many elements are in each element of each dimension # for example, in the case of a list of matrices we might have the size 4 x 3 x 2 # which means we have a list of 4 elements, where each element is a matrix of 6 elements. # Within the matrix, we have 3 elements, each of size 2. In this case, the group size array # should be [6, 2, 1] group_size = [] current = 1 L = len(target_size) for k in range(0, L).__reversed__(): d = target_size[k] group_size.append(current) current = current * d target_index_to_composite = _create_target_index_to_composite( target_size, group_size ) # product list should be [2, 1, 1] product_list = [] current = 1 # the element at index N-j should be the size of the group at dimension j # for [1,1,3] we want [1,3,3]. For [3,2,1] we want [1,1,2] for k in range(0, len(input_project_size)).__reversed__(): d = input_project_size[k] product_list.append(current) current = current * d input_list_from_target_list = _create_input_list_from_target_list( product_list, input_project_size ) return lambda target_index: input_list_from_target_list( target_index_to_composite(target_index) )
beanmachine-main
src/beanmachine/ppl/compiler/broadcaster.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from abc import ABC, ABCMeta from typing import Any, Iterable, List import beanmachine.ppl.compiler.bmg_types as bt import torch from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.utils.item_counter import ItemCounter from torch import Tensor # Note that we're not going to subclass list or UserList here because we # only need to use the most basic list operations: initialization, getting # an item, and setting an item. We never want to delete items, append to # the end, and so on. class InputList: node: "BMGNode" inputs: List["BMGNode"] def __init__(self, node: "BMGNode", inputs: List["BMGNode"]) -> None: assert isinstance(inputs, list) self.node = node self.inputs = inputs for i in inputs: i.outputs.add_item(node) def __setitem__(self, index: int, value: "BMGNode") -> None: # If this is a no-op, do nothing. old_value = self.inputs[index] if old_value is value: return # Start by maintaining correctness of the input/output relationships. # # (1) The node is no longer an output of the current input at the index. # (2) The node is now an output of the new input at the index. # old_value.outputs.remove_item(self.node) self.inputs[index] = value value.outputs.add_item(self.node) def __getitem__(self, index: int) -> "BMGNode": return self.inputs[index] def __iter__(self): return iter(self.inputs) def __len__(self) -> int: return len(self.inputs) class BMGNode(ABC): """The base class for all graph nodes.""" # A Bayesian network is a acyclic graph in which each node represents # a value or operation; directed edges represent the inputs and # outputs of each node. # # We have a small nomenclature problem here; when describing the shape # of, say, a multiplication in an abstract syntax tree we would say that # the multiplication operator is the "parent" and the pair of operands # are the left and right "children". However, in Bayesian networks # the tradition is to consider the input values as "parents" of the # multiplication, and nodes which consume the product are its "children". # # To avoid this confusion, in this class we will explicitly call out # that the edges represent inputs. inputs: InputList outputs: ItemCounter def __init__(self, inputs: List["BMGNode"]): assert isinstance(inputs, list) self.inputs = InputList(self, inputs) self.outputs = ItemCounter() @property def is_leaf(self) -> bool: return len(self.outputs.items) == 0 # #### # #### Nodes representing constant values # #### class ConstantNode(BMGNode, metaclass=ABCMeta): """This is the base type for all nodes representing constants. Note that every constant node has an associated type in the BMG type system; nodes that represent the "real" 1.0, the "positive real" 1.0, the "probability" 1.0 and the "natural" 1 are all different nodes and are NOT deduplicated.""" value: Any def __init__(self): BMGNode.__init__(self, []) class UntypedConstantNode(ConstantNode): def __init__(self, value: Any) -> None: self.value = value ConstantNode.__init__(self) def __str__(self) -> str: return str(self.value) class BooleanNode(ConstantNode): """A Boolean constant""" value: bool def __init__(self, value: bool): self.value = value ConstantNode.__init__(self) def __str__(self) -> str: return str(self.value) class NaturalNode(ConstantNode): """An integer constant restricted to non-negative values""" value: int def __init__(self, value: int): self.value = value ConstantNode.__init__(self) def __str__(self) -> str: return str(self.value) class PositiveRealNode(ConstantNode): """A real constant restricted to non-negative values""" value: float def __init__(self, value: float): self.value = value ConstantNode.__init__(self) def __str__(self) -> str: return str(self.value) class NegativeRealNode(ConstantNode): """A real constant restricted to non-positive values""" value: float def __init__(self, value: float): self.value = value ConstantNode.__init__(self) def __str__(self) -> str: return str(self.value) class ProbabilityNode(ConstantNode): """A real constant restricted to values from 0.0 to 1.0""" value: float def __init__(self, value: float): self.value = value ConstantNode.__init__(self) def __str__(self) -> str: return str(self.value) class RealNode(ConstantNode): """An unrestricted real constant""" value: float def __init__(self, value: float): self.value = value ConstantNode.__init__(self) def __str__(self) -> str: return str(self.value) class ConstantTensorNode(ConstantNode): """A tensor constant""" value: Tensor def __init__(self, value: Tensor): self.value = value ConstantNode.__init__(self) def __str__(self) -> str: return str(self.value) class ConstantPositiveRealMatrixNode(ConstantTensorNode): def __init__(self, value: Tensor): assert len(value.size()) <= 2 ConstantTensorNode.__init__(self, value) class ConstantRealMatrixNode(ConstantTensorNode): def __init__(self, value: Tensor): assert len(value.size()) <= 2 ConstantTensorNode.__init__(self, value) class ConstantNegativeRealMatrixNode(ConstantTensorNode): def __init__(self, value: Tensor): assert len(value.size()) <= 2 ConstantTensorNode.__init__(self, value) class ConstantProbabilityMatrixNode(ConstantTensorNode): def __init__(self, value: Tensor): assert len(value.size()) <= 2 ConstantTensorNode.__init__(self, value) class ConstantSimplexMatrixNode(ConstantTensorNode): def __init__(self, value: Tensor): assert len(value.size()) <= 2 ConstantTensorNode.__init__(self, value) class ConstantNaturalMatrixNode(ConstantTensorNode): def __init__(self, value: Tensor): assert len(value.size()) <= 2 ConstantTensorNode.__init__(self, value) class ConstantBooleanMatrixNode(ConstantTensorNode): def __init__(self, value: Tensor): assert len(value.size()) <= 2 ConstantTensorNode.__init__(self, value) class TensorNode(BMGNode): """A tensor whose elements are graph nodes.""" _size: torch.Size def __init__(self, items: List[BMGNode], size: torch.Size): assert isinstance(items, list) self._size = size BMGNode.__init__(self, items) def __str__(self) -> str: return "TensorNode" # #### # #### Nodes representing distributions # #### class DistributionNode(BMGNode, metaclass=ABCMeta): """This is the base class for all nodes that represent probability distributions.""" def __init__(self, inputs: List[BMGNode]): BMGNode.__init__(self, inputs) class BernoulliBase(DistributionNode): def __init__(self, probability: BMGNode): DistributionNode.__init__(self, [probability]) @property def probability(self) -> BMGNode: return self.inputs[0] class BernoulliNode(BernoulliBase): """The Bernoulli distribution is a coin flip; it takes a probability and each sample is either 0.0 or 1.0.""" def __init__(self, probability: BMGNode): BernoulliBase.__init__(self, probability) def __str__(self) -> str: return "Bernoulli(" + str(self.probability) + ")" class BernoulliLogitNode(BernoulliBase): """The Bernoulli distribution is a coin flip; it takes a probability and each sample is either 0.0 or 1.0.""" def __init__(self, probability: BMGNode): BernoulliBase.__init__(self, probability) def __str__(self) -> str: return "Bernoulli(" + str(self.probability) + ")" class BetaNode(DistributionNode): """The beta distribution samples are values between 0.0 and 1.0, and so is useful for creating probabilities.""" def __init__(self, alpha: BMGNode, beta: BMGNode): DistributionNode.__init__(self, [alpha, beta]) @property def alpha(self) -> BMGNode: return self.inputs[0] @property def beta(self) -> BMGNode: return self.inputs[1] def __str__(self) -> str: return f"Beta({str(self.alpha)},{str(self.beta)})" class PoissonNode(DistributionNode): """The Poisson distribution samples are non-negative integer valued.""" def __init__(self, rate: BMGNode): DistributionNode.__init__(self, [rate]) @property def rate(self) -> BMGNode: return self.inputs[0] def __str__(self) -> str: return f"Poisson({str(self.rate)})" class BinomialNodeBase(DistributionNode): def __init__(self, count: BMGNode, probability: BMGNode): DistributionNode.__init__(self, [count, probability]) @property def count(self) -> BMGNode: return self.inputs[0] @property def probability(self) -> BMGNode: return self.inputs[1] def __str__(self) -> str: return f"Binomial({self.count}, {self.probability})" def support(self) -> Iterable[Any]: raise ValueError("Support of binomial is not yet implemented.") class BinomialNode(BinomialNodeBase): """The Binomial distribution is the extension of the Bernoulli distribution to multiple flips. The input is the count of flips and the probability of each coming up heads; each sample is the number of heads after "count" flips.""" def __init__(self, count: BMGNode, probability: BMGNode, is_logits: bool = False): BinomialNodeBase.__init__(self, count, probability) class BinomialLogitNode(BinomialNodeBase): """The Binomial distribution is the extension of the Bernoulli distribution to multiple flips. The input is the count of flips and the probability of each coming up heads; each sample is the number of heads after "count" flips.""" # TODO: We do not yet have a BMG node for Binomial # with logits. When we do, add support for it. def __init__(self, count: BMGNode, probability: BMGNode): BinomialNodeBase.__init__(self, count, probability) class CategoricalNodeBase(DistributionNode): """The categorical distribution is the extension of the Bernoulli distribution to multiple outcomes; rather than flipping an unfair coin, this is rolling an unfair n-sided die. The input is the probability of each of n possible outcomes, and each sample is drawn from 0, 1, 2, ... n-1.""" # TODO: we may wish to add bounded integers to the BMG type system. def __init__(self, probability: BMGNode): DistributionNode.__init__(self, [probability]) @property def probability(self) -> BMGNode: return self.inputs[0] def __str__(self) -> str: return "Categorical(" + str(self.probability) + ")" class CategoricalNode(CategoricalNodeBase): def __init__(self, probability: BMGNode): DistributionNode.__init__(self, [probability]) class CategoricalLogitNode(CategoricalNodeBase): def __init__(self, probability: BMGNode): DistributionNode.__init__(self, [probability]) class Chi2Node(DistributionNode): """The chi2 distribution is a distribution of positive real numbers; it is a special case of the gamma distribution.""" def __init__(self, df: BMGNode): DistributionNode.__init__(self, [df]) @property def df(self) -> BMGNode: return self.inputs[0] def __str__(self) -> str: return f"Chi2({str(self.df)})" class DirichletNode(DistributionNode): """The Dirichlet distribution generates simplexs -- vectors whose members are probabilities that add to 1.0, and so it is useful for generating inputs to the categorical distribution.""" def __init__(self, concentration: BMGNode): DistributionNode.__init__(self, [concentration]) @property def concentration(self) -> BMGNode: return self.inputs[0] def __str__(self) -> str: return f"Dirichlet({str(self.concentration)})" class FlatNode(DistributionNode): """The Flat distribution the standard uniform distribution from 0.0 to 1.0.""" def __init__(self): DistributionNode.__init__(self, []) def __str__(self) -> str: return "Flat()" class GammaNode(DistributionNode): """The gamma distribution is a distribution of positive real numbers characterized by positive real concentration and rate parameters.""" def __init__(self, concentration: BMGNode, rate: BMGNode): DistributionNode.__init__(self, [concentration, rate]) @property def concentration(self) -> BMGNode: return self.inputs[0] @property def rate(self) -> BMGNode: return self.inputs[1] def __str__(self) -> str: return f"Gamma({str(self.concentration)}, {str(self.rate)})" class HalfCauchyNode(DistributionNode): """The Cauchy distribution is a bell curve with zero mean and a heavier tail than the normal distribution; it is useful for generating samples that are not as clustered around the mean as a normal. The half Cauchy distribution is just the distribution you get when you take the absolute value of the samples from a Cauchy distribution. The input is a positive scale factor and a sample is a positive real number.""" # TODO: Add support for the Cauchy distribution as well. def __init__(self, scale: BMGNode): DistributionNode.__init__(self, [scale]) @property def scale(self) -> BMGNode: return self.inputs[0] def __str__(self) -> str: return f"HalfCauchy({str(self.scale)})" class NormalNode(DistributionNode): """The normal (or "Gaussian") distribution is a bell curve with a given mean and standard deviation.""" def __init__(self, mu: BMGNode, sigma: BMGNode): DistributionNode.__init__(self, [mu, sigma]) @property def mu(self) -> BMGNode: return self.inputs[0] @property def sigma(self) -> BMGNode: return self.inputs[1] def __str__(self) -> str: return f"Normal({str(self.mu)},{str(self.sigma)})" class HalfNormalNode(DistributionNode): """The half-normal distribution is a half bell curve with a given standard deviation. Mean (for the underlying normal) is taken to be zero.""" def __init__(self, sigma: BMGNode): DistributionNode.__init__(self, [sigma]) @property def sigma(self) -> BMGNode: return self.inputs[0] def __str__(self) -> str: return f"HalfNormal({str(self.sigma)})" class StudentTNode(DistributionNode): """The Student T distribution is a bell curve with zero mean and a heavier tail than the normal distribution. It is useful in statistical analysis because a common situation is to have observations of a normal process but to not know the true mean. Samples from the T distribution can be used to represent the difference between an observed mean and the true mean.""" def __init__(self, df: BMGNode, loc: BMGNode, scale: BMGNode): DistributionNode.__init__(self, [df, loc, scale]) @property def df(self) -> BMGNode: return self.inputs[0] @property def loc(self) -> BMGNode: return self.inputs[1] @property def scale(self) -> BMGNode: return self.inputs[2] def __str__(self) -> str: return f"StudentT({str(self.df)},{str(self.loc)},{str(self.scale)})" class UniformNode(DistributionNode): """The Uniform distribution is a "flat" distribution of values between 0.0 and 1.0.""" # TODO: We do not yet have an implementation of the uniform # distribution as a BMG node. When we do, implement the # feature here. def __init__(self, low: BMGNode, high: BMGNode): DistributionNode.__init__(self, [low, high]) @property def low(self) -> BMGNode: return self.inputs[0] @property def high(self) -> BMGNode: return self.inputs[1] def __str__(self) -> str: return f"Uniform({str(self.low)},{str(self.high)})" class LKJCholeskyNode(DistributionNode): """The LKJ distribution produces correlation matrices of size dim x dim according to a concentration parameter eta. This is the Cholesky factorization of that distribution, so that L L^T ~ LKJ(eta) for L ~ LKJCholesky(eta).""" def __init__(self, dim: BMGNode, eta: BMGNode): DistributionNode.__init__(self, [dim, eta]) @property def dim(self) -> BMGNode: return self.inputs[0] @property def eta(self) -> BMGNode: return self.inputs[1] def __str__(self) -> str: return f"LKJCholesky({str(self.dim)}, {str(self.eta)})" # #### # #### Operators # #### class OperatorNode(BMGNode, metaclass=ABCMeta): """This is the base class for all operators. The inputs are the operands of each operator.""" def __init__(self, inputs: List[BMGNode]): assert isinstance(inputs, list) BMGNode.__init__(self, inputs) # #### # #### Multiary operators # #### class AdditionNode(OperatorNode): """This represents an addition of values.""" def __init__(self, inputs: List[BMGNode]): assert isinstance(inputs, list) OperatorNode.__init__(self, inputs) def __str__(self) -> str: return "(" + "+".join([str(inp) for inp in self.inputs]) + ")" class MultiplicationNode(OperatorNode): """This represents multiplication of values.""" def __init__(self, inputs: List[BMGNode]): assert isinstance(inputs, list) OperatorNode.__init__(self, inputs) def __str__(self) -> str: return "(" + "*".join([str(inp) for inp in self.inputs]) + ")" # We have three kinds of logsumexp nodes. # # * LogSumExpTorchNode represents a call to logsumexp in the original # Python model. It has three operands: the tensor being summed, # the dimension along which it is summed, and a flag giving the shape. # # * LogSumExpNode represents a BMG LOGSUMEXP node. It is an n-ary operator # and produces a real; each of the inputs is one of the summands. # # * LogSumExpVectorNode represents a BMG LOGSUMEXP_VECTOR node. It is a unary # operator that takes a single-column matrix. # # We transform LogSumExpTorchNode into LogSumExpNode or LogSumExpVectorNode # as appropriate. class LogSumExpTorchNode(OperatorNode): def __init__(self, operand: BMGNode, dim: BMGNode, keepdim: BMGNode): OperatorNode.__init__(self, [operand, dim, keepdim]) def __str__(self) -> str: return "LogSumExp" class LogSumExpNode(OperatorNode): """This class represents the LogSumExp operation: for values v_1, ..., v_n we compute log(exp(v_1) + ... + exp(v_n))""" def __init__(self, inputs: List[BMGNode]): assert isinstance(inputs, list) OperatorNode.__init__(self, inputs) def __str__(self) -> str: return "LogSumExp" class ToMatrixNode(OperatorNode): """A 2-d tensor whose elements are graph nodes.""" def __init__(self, rows: NaturalNode, columns: NaturalNode, items: List[BMGNode]): # The first two elements are the row and column counts; they must # be constant naturals. assert isinstance(items, list) assert len(items) >= 1 rc: List[BMGNode] = [rows, columns] BMGNode.__init__(self, rc + items) @property def rows(self) -> NaturalNode: return self.inputs[0] # pyre-ignore @property def columns(self) -> NaturalNode: return self.inputs[1] # pyre-ignore def __str__(self) -> str: return "ToMatrix" class FillMatrixNode(OperatorNode): def __init__(self, value: BMGNode, rows: NaturalNode, columns: NaturalNode): # The row and column nodes must be constant naturals. BMGNode.__init__(self, [value, rows, columns]) def __str__(self) -> str: return "FillMatrix" class BroadcastNode(OperatorNode): def __init__(self, value: BMGNode, rows: NaturalNode, columns: NaturalNode): # The row and column nodes must be constant naturals. BMGNode.__init__(self, [value, rows, columns]) def __str__(self) -> str: return "Broadcast" # #### # #### Control flow operators # #### class IfThenElseNode(OperatorNode): """This class represents a stochastic choice between two options, where the condition is a Boolean.""" # This node will only be generated when tranforming the Python version of # the graph into the BMG format; for instance, if we have a multiplication # of a Bernoulli sample node by 2.0, in the Python form we'll have a scalar # multiplied by a sample of type tensor. In the BMG form the sample will be # of type Boolean and we cannot multiply a Boolean by a Real. Instead we'll # generate "if_then_else(sample, 0.0, 1.0) * 2.0" which typechecks in the # BMG type system. # # Eventually we will probably use this node to represent Python's # "consequence if condition else alternative" syntax, and possibly # other conditional stochastic control flows. def __init__(self, condition: BMGNode, consequence: BMGNode, alternative: BMGNode): OperatorNode.__init__(self, [condition, consequence, alternative]) @property def condition(self) -> BMGNode: return self.inputs[0] @property def consequence(self) -> BMGNode: return self.inputs[1] @property def alternative(self) -> BMGNode: return self.inputs[2] def __str__(self) -> str: i = str(self.condition) t = str(self.consequence) e = str(self.alternative) return f"(if {i} then {t} else {e})" class ChoiceNode(OperatorNode): """This class represents a stochastic choice between n options, where the condition is a natural.""" # See comments in SwitchNode for more details. def __init__(self, condition: BMGNode, items: List[BMGNode]): assert isinstance(items, list) # We should not generate a choice node if there is only one choice. assert len(items) >= 2 c: List[BMGNode] = [condition] BMGNode.__init__(self, c + items) def __str__(self) -> str: return "Choice" # #### # #### Binary operators # #### class BinaryOperatorNode(OperatorNode, metaclass=ABCMeta): """This is the base class for all binary operators.""" def __init__(self, left: BMGNode, right: BMGNode): OperatorNode.__init__(self, [left, right]) @property def left(self) -> BMGNode: return self.inputs[0] @property def right(self) -> BMGNode: return self.inputs[1] class ComparisonNode(BinaryOperatorNode, metaclass=ABCMeta): """This is the base class for all comparison operators.""" def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) class GreaterThanNode(ComparisonNode): def __init__(self, left: BMGNode, right: BMGNode): ComparisonNode.__init__(self, left, right) def __str__(self) -> str: return f"({str(self.left)}>{str(self.right)})" class GreaterThanEqualNode(ComparisonNode): def __init__(self, left: BMGNode, right: BMGNode): ComparisonNode.__init__(self, left, right) def __str__(self) -> str: return f"({str(self.left)}>={str(self.right)})" class LessThanNode(ComparisonNode): def __init__(self, left: BMGNode, right: BMGNode): ComparisonNode.__init__(self, left, right) def __str__(self) -> str: return f"({str(self.left)}<{str(self.right)})" class LessThanEqualNode(ComparisonNode): def __init__(self, left: BMGNode, right: BMGNode): ComparisonNode.__init__(self, left, right) def __str__(self) -> str: return f"({str(self.left)}<={str(self.right)})" class ElementwiseMultiplyNode(BinaryOperatorNode): def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) def __str__(self) -> str: return f"{self.left} * {self.right}" class EqualNode(ComparisonNode): def __init__(self, left: BMGNode, right: BMGNode): ComparisonNode.__init__(self, left, right) def __str__(self) -> str: return f"({str(self.left)}=={str(self.right)})" class NotEqualNode(ComparisonNode): def __init__(self, left: BMGNode, right: BMGNode): ComparisonNode.__init__(self, left, right) def __str__(self) -> str: return f"({str(self.left)}!={str(self.right)})" class IsNode(ComparisonNode): def __init__(self, left: BMGNode, right: BMGNode): ComparisonNode.__init__(self, left, right) class IsNotNode(ComparisonNode): def __init__(self, left: BMGNode, right: BMGNode): ComparisonNode.__init__(self, left, right) class InNode(ComparisonNode): def __init__(self, left: BMGNode, right: BMGNode): ComparisonNode.__init__(self, left, right) class NotInNode(ComparisonNode): def __init__(self, left: BMGNode, right: BMGNode): ComparisonNode.__init__(self, left, right) class BitAndNode(BinaryOperatorNode): def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) class BitOrNode(BinaryOperatorNode): def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) class BitXorNode(BinaryOperatorNode): def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) class DivisionNode(BinaryOperatorNode): """This represents a division.""" # There is no division node in BMG; we will replace # x / y with x * (y ** (-1)) during the "fix problems" # phase. def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) def __str__(self) -> str: return "(" + str(self.left) + "/" + str(self.right) + ")" class FloorDivNode(BinaryOperatorNode): def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) class LShiftNode(BinaryOperatorNode): def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) class ModNode(BinaryOperatorNode): def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) class RShiftNode(BinaryOperatorNode): def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) # LogAddExpNode represents a call to the binary operator logAddExp in the original # code. It is transformed into LogSumExpNode. class LogAddExpNode(BinaryOperatorNode): """This class represents the LogAddExp operation: for values v_1, v_2 we compute log(exp(v_1) + exp(v_2))""" def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) def __str__(self) -> str: return "LogAddExp({self.left}, {self.right})" class LogProbNode(BinaryOperatorNode): """This class represents the log_prob operator on a distribution""" def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) def __str__(self) -> str: return "LogProb({self.left}, {self.right})" class SwitchNode(BMGNode): """This class represents a point in a program where there are multiple control flows based on the value of a stochastic node.""" # For example, suppose we have this contrived model: # # @bm.random_variable def weird(i): # if i == 0: # return Normal(3.0, 4.0) # return Normal(5.0, 6.0) # # @bm.random_variable def flips(): # return Binomial(2, 0.5) # # @bm.random_variable def really_weird(): # return Normal(weird(flips()), 7.0) # # There are three possibilities for weird(flips()) on the last line; # what we need to represent in the graph is: # # * sample once from Normal(0.0, 1.0), call this weird(0) # * sample twice from Normal(1.0, 1.0), call these weird(1) and weird(2) # * sample once from flips() # * choose one of weird(i) based on the sample from flips(). # # We represent this with a switch node. # # 2 0.5 3 4 5 6 # \ / \ / \ / # B N N # | | / \ # ~ 0 ~ 1 ~ 2 ~ # \ \ \ / / / / # switch # \ 7 # \ / # N # | # ~ # # That is, inputs[0] of the switch is the quantity that makes the choice: # a sample from B(2, 0.5). We then have a series of case/value pairs: # # * inputs[c] for c = 1, 3, 5, ... are always constants. # * inputs[v] for v = 2, 4, 6, ... are the values chosen when inputs[0] # takes on the value of the corresponding constant. # # Note that we do not have a generalized switch in BMG. Rather, we have # the simpler cases of (1) the IfThenElse node, where the leftmost input # is a Boolean quantity and the other two inputs are the values, and # (2) a ChoiceNode, which takes a natural and then chooses from amongst # n possible values. # # TODO: Should we implement a general switch node in BMG? # # The runtime creates the switch based on the support of flips(), the first # input to the switch. In this case the support is {0, 1, 2} but there is # no reason why they could not have been 1, 10, 100 instead, if for instance # we had something like "weird(10 ** flips())". def __init__(self, inputs: List[BMGNode]): # TODO: Check that cases are all constant nodes. # TODO: Check that there is one value for each case. BMGNode.__init__(self, inputs) # This represents an indexing operation in the original source code. # It will be replaced by a VectorIndexNode or ColumnIndexNode in the # problem fixing phase. class IndexNode(BinaryOperatorNode): def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) def __str__(self) -> str: return str(self.left) + "[" + str(self.right) + "]" class ItemNode(OperatorNode): """Represents torch.Tensor.item() conversion from tensor to scalar.""" def __init__(self, operand: BMGNode): OperatorNode.__init__(self, [operand]) def __str__(self) -> str: return str(self.inputs[0]) + ".item()" class VectorIndexNode(BinaryOperatorNode): """This represents a stochastic index into a vector. The left operand is the vector and the right operand is the index.""" def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) def __str__(self) -> str: return str(self.left) + "[" + str(self.right) + "]" class ColumnIndexNode(BinaryOperatorNode): def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) def __str__(self) -> str: return "ColumnIndex" class MatrixMultiplicationNode(BinaryOperatorNode): """This represents a matrix multiplication.""" # TODO: We now have matrix multiplication in BMG; finish this implementation def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) def __str__(self) -> str: return "(" + str(self.left) + "*" + str(self.right) + ")" class MatrixAddNode(BinaryOperatorNode): def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) def __str__(self) -> str: return f"{self.left} + {self.right}" class MatrixScaleNode(BinaryOperatorNode): """This represents a matrix scaling.""" def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) def __str__(self) -> str: return "(" + str(self.left) + "*" + str(self.right) + ")" class PowerNode(BinaryOperatorNode): """This represents an x-to-the-y operation.""" def __init__(self, left: BMGNode, right: BMGNode): BinaryOperatorNode.__init__(self, left, right) def __str__(self) -> str: return "(" + str(self.left) + "**" + str(self.right) + ")" # #### # #### Unary operators # #### class UnaryOperatorNode(OperatorNode, metaclass=ABCMeta): """This is the base type of unary operator nodes.""" def __init__(self, operand: BMGNode): OperatorNode.__init__(self, [operand]) @property def operand(self) -> BMGNode: return self.inputs[0] class CholeskyNode(UnaryOperatorNode): """This represents an Cholesky operation; it is generated when a model contains calls to Tensor.cholesky.""" def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "Cholesky(" + str(self.operand) + ")" class ExpNode(UnaryOperatorNode): """This represents an exponentiation operation; it is generated when a model contains calls to Tensor.exp or math.exp.""" def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "Exp(" + str(self.operand) + ")" class Exp2Node(UnaryOperatorNode): def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "Exp2(" + str(self.operand) + ")" class ExpM1Node(UnaryOperatorNode): """This represents the operation exp(x) - 1; it is generated when a model contains calls to Tensor.expm1.""" # TODO: If we have exp(x) - 1 in the graph and x is known to be of type # positive real or negative real then the expression as a whole is of # type real. If we convert such expressions in the graph to expm1(x) # then we can make the type more specific, and also possibly reduce # the number of nodes in the graph. def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "ExpM1(" + str(self.operand) + ")" class LogisticNode(UnaryOperatorNode): """This represents the operation 1/(1+exp(x)); it is generated when a model contains calls to Tensor.sigmoid.""" def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "Logistic(" + str(self.operand) + ")" class LogNode(UnaryOperatorNode): """This represents a log operation; it is generated when a model contains calls to Tensor.log or math.log.""" def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "Log(" + str(self.operand) + ")" class MatrixLogNode(UnaryOperatorNode): """This represents a log operation; it is generated when a model contains calls to Tensor.log or math.log.""" def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "MatrixLog(" + str(self.operand) + ")" class MatrixLog1mexpNode(UnaryOperatorNode): """This represents a log1mexp operation; it is generated when a model contains calls to math.log1mexp or x -> exp -> complement -> log""" def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "MatrixLog1mexp(" + str(self.operand) + ")" class MatrixComplementNode(UnaryOperatorNode): """This represents a complement operation; it is generated when a model contains calls like (1-p) or ~b where p is a probability and b is a boolean""" def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "MatrixComplement(" + str(self.operand) + ")" class MatrixNegateNode(UnaryOperatorNode): """This represents a negate operation on a matrix input""" def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "MatrixNegate(" + str(self.operand) + ")" class Log10Node(UnaryOperatorNode): def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "Log10(" + str(self.operand) + ")" class Log1pNode(UnaryOperatorNode): def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "Log1p(" + str(self.operand) + ")" class Log2Node(UnaryOperatorNode): def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "Log2(" + str(self.operand) + ")" class SquareRootNode(UnaryOperatorNode): def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "SquareRoot(" + str(self.operand) + ")" class Log1mexpNode(UnaryOperatorNode): """This represents a log1mexp operation; it is generated as an optimization when a graph contains x -> exp -> complement -> log""" def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "Log1mexp(" + str(self.operand) + ")" class MatrixExpNode(UnaryOperatorNode): """This represents an exponentiation operation; it is generated when a model contains calls to Tensor.exp or math.exp.""" def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "MatrixExp(" + str(self.operand) + ")" class MatrixPhiNode(UnaryOperatorNode): """This represents an phi operation; it is generated when a model contains calls to Normal(0,1).cdf""" def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "MatrixPhi" + str(self.operand) + ")" class MatrixSumNode(UnaryOperatorNode): def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "MatrixSum(" + str(self.operand) + ")" class TransposeNode(UnaryOperatorNode): """This represents a transpose operation; it is generated when a model contains calls to transpose or Tensor.transpose""" def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "Transpose(" + str(self.operand) + ")" # BMG supports three different kinds of negation: # * The "complement" node with a Boolean operand has the semantics # of logical negation. The input and output are both bool. # # * The "complement" node with a probability operand has the semantics # of (1 - p). The input and output are both probability. # # * The "negate" node has the semantics of (0 - x). The input must be # real, positive real or negative real, and the output is # real, negative real or positive real respectively. # # Note that there is no subtraction operator in BMG; to express x - y # we generate nodes as though (x + (-y)) was written; that is, the # sum of x and a real-number negation of y. # # This presents several problems when accumulating a graph while executing # a Python model, and then turning said graph into a valid BMG, particularly # during type analysis. # # Our strategy is: # # * When we accumulate the graph we will create nodes for addition # (AdditionNode), unary negation (NegationNode) and the "not" # operator (NotNode). We will not generate "complement" nodes # directly from Python source. # # * After accumulating the graph we will do type analysis and use # that to drive a rewriting pass. The rewriting pass will perform # these tasks: # # (1) "not" nodes whose operands are bool will be converted into # "complement" nodes. # # (2) "not" nodes whose operands are not bool will produce an error. # (The "not" operator applied to a non-bool x in Python has the # semantics of "x == 0" and we do not have any way to represent # these semantics in BMG. # # (3) Call a constant "one-like" if it is True, 1, 1.0, or a single- # valued tensor with a one-like value. If we have a one-like node, # call it 1 for short, then we will look for patterns in the # accumulated graph such as # # 1 + (-p) # (-p) + 1 # -(p + -1) # -(-1 + p) # # and replace them with "complement" nodes (where p is a probability or # Boolean expression). # # (4) Other usages of binary + and unary - in the Python model will # be converted to BMG following the rules for addition and negation # in BMG: negation must be real valued, and so on. class NegateNode(UnaryOperatorNode): """This represents a unary minus.""" def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "-" + str(self.operand) class NotNode(UnaryOperatorNode): """This represents a logical not that appears in the Python model.""" def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "not " + str(self.operand) class ComplementNode(UnaryOperatorNode): """This represents a complement of a Boolean or probability value.""" # See notes above NegateNode for details def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "complement " + str(self.operand) # This operator is not supported in BMG. We accumulate it into # the graph in order to produce a good error message. class InvertNode(UnaryOperatorNode): """This represents a bit inversion (~).""" def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "~" + str(self.operand) class PhiNode(UnaryOperatorNode): """This represents a phi operation; that is, the cumulative distribution function of the standard normal.""" def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "Phi(" + str(self.operand) + ")" class SampleNode(UnaryOperatorNode): """This represents a single unique sample from a distribution; if a graph has two sample nodes both taking input from the same distribution, each sample is logically distinct. But if a graph has two nodes that both input from the same sample node, we must treat those two uses of the sample as though they had identical values.""" def __init__(self, operand: DistributionNode): UnaryOperatorNode.__init__(self, operand) @property def operand(self) -> DistributionNode: c = self.inputs[0] assert isinstance(c, DistributionNode) return c def __str__(self) -> str: return "Sample(" + str(self.operand) + ")" class SumNode(UnaryOperatorNode): def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "Sum(" + str(self.operand) + ")" class ToRealNode(UnaryOperatorNode): def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "ToReal(" + str(self.operand) + ")" class ToIntNode(UnaryOperatorNode): """This represents an integer truncation operation; it is generated when a model contains calls to Tensor.int() or int().""" def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "ToInt(" + str(self.operand) + ")" class ToRealMatrixNode(UnaryOperatorNode): def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "ToRealMatrix(" + str(self.operand) + ")" class ToPositiveRealMatrixNode(UnaryOperatorNode): def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "ToPosRealMatrix(" + str(self.operand) + ")" class ToPositiveRealNode(UnaryOperatorNode): def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "ToPosReal(" + str(self.operand) + ")" class ToProbabilityNode(UnaryOperatorNode): def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "ToProb(" + str(self.operand) + ")" class ToNegativeRealNode(UnaryOperatorNode): def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "ToNegReal(" + str(self.operand) + ")" class ToNegativeRealMatrixNode(UnaryOperatorNode): def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "ToNegRealMatrix(" + str(self.operand) + ")" class LogSumExpVectorNode(UnaryOperatorNode): # BMG supports a log-sum-exp operator that takes a one-column tensor. def __init__(self, operand: BMGNode): UnaryOperatorNode.__init__(self, operand) def __str__(self) -> str: return "LogSumExpVector" # #### # #### Marker nodes # #### class Observation(BMGNode): """This represents an observed value of a sample. For example we might have a prior that a mint produces a coin that is uniformly unfair. We could then observe a flip of the coin and if heads, that is small but not zero evidence that the coin is unfair in the heads direction. Given that observation, our belief in the true unfairness of the coin should no loger be uniform.""" # TODO: Here we treat an observation as node which takes input # from a sample and has an associated value. This implementation # choice differs from BMG, which does not treat observations as # nodes in the graph; since an observation is never the input # of any other node, this makes sense. We might consider # following this pattern and making the observation not inherit # from BMGNode. # # TODO: **Observations are logically distinct from models.** # That is, it is common to have one model and many different # sets of observations. (And similarly but less common, we # could imagine having one set of observations used by many # models.) Consider how we might extract the graph from a model # without knowing the observations ahead of time. value: Any def __init__(self, observed: BMGNode, value: Any): # The observed node is required to be a sample by BMG, # but during model transformations it is possible for # an observation to temporarily observe a non-sample. # TODO: Consider adding a verification pass which ensures # this invariant is maintained by the rewriters. self.value = value BMGNode.__init__(self, [observed]) @property def observed(self) -> BMGNode: return self.inputs[0] def __str__(self) -> str: return str(self.observed) + "=" + str(self.value) class Query(BMGNode): """A query is a marker on a node in the graph that indicates to the inference engine that the user is interested in getting a distribution of values of that node.""" # TODO: BMG requires that the target of a query be classified # as an operator and that queries be unique; that is, every node # is queried *exactly* zero or one times. Rather than making # those restrictions here, instead detect bad queries in the # problem fixing phase and report accordingly. # TODO: As with observations, properly speaking there is no # need to represent a query as a *node*, and BMG does not # do so. We might wish to follow this pattern as well. def __init__(self, operator: BMGNode, rvidentifier: RVIdentifier): BMGNode.__init__(self, [operator]) self._rvidentifier = rvidentifier @property def operator(self) -> BMGNode: c = self.inputs[0] return c @property def rv_identifier(self) -> RVIdentifier: return self._rvidentifier def __str__(self) -> str: return "Query(" + str(self.operator) + ")" # The basic idea of the Metropolis algorithm is: each possible state of # the graph is assigned a "score" proportional to the probability density # of that state. We do not know the proportionality constant but we do not # need to because we take the ratio of the current state's score to a proposed # new state's score, and accept or reject the proposal based on the ratio. # # The idea of a "factor" node is that we also multiply the score by a real number # which is high for "more likely" states and low for "less likely" states. By # carefully choosing a factor function we can express our additional knowledge of # the model. # # Factors (like observations and queries) are never used as inputs even though they # compute a value. class FactorNode(BMGNode, metaclass=ABCMeta): """This is the base class for all factors. The inputs are the operands of each factor.""" def __init__(self, inputs: List[BMGNode]): assert isinstance(inputs, list) BMGNode.__init__(self, inputs) # The ExpProduct factor takes one or more inputs, computes their product, # and then multiplies the score by exp(product), so if the product is large # then the factor will be very large; if the product is zero then the factor # will be one, and if the product is negative then the factor will be small. class ExpProductFactorNode(FactorNode): def __init__(self, inputs: List[BMGNode]): assert isinstance(inputs, list) FactorNode.__init__(self, inputs) def __str__(self) -> str: return "ExpProduct" def is_zero(n: BMGNode) -> bool: return isinstance(n, ConstantNode) and bt.is_zero(n.value) def is_one(n: BMGNode) -> bool: return isinstance(n, ConstantNode) and bt.is_one(n.value)
beanmachine-main
src/beanmachine/ppl/compiler/bmg_nodes.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional, Union import beanmachine.ppl.compiler.bmg_nodes as bn from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.copy_and_replace import ( Cloner, copy_and_replace, NodeTransformer, TransformAssessment, ) from beanmachine.ppl.compiler.error_report import ErrorReport, UnsizableNode from beanmachine.ppl.compiler.fix_problem import GraphFixerResult from beanmachine.ppl.compiler.sizer import Sizer, Unsized class CopyGraph(NodeTransformer): def __init__(self, cloner: Cloner, sizer: Sizer): self.sizer = sizer self.cloner = cloner def assess_node( self, node: bn.BMGNode, original: BMGraphBuilder ) -> TransformAssessment: report = ErrorReport() transform = True try: size = self.sizer[node] if size == Unsized: transform = False except RuntimeError: transform = False if not transform: report.add_error( UnsizableNode( node, [self.sizer[input] for input in node.inputs.inputs], self.cloner.bmg_original.execution_context.node_locations(node), ) ) return TransformAssessment(transform, report) def transform_node( self, node: bn.BMGNode, new_inputs: List[bn.BMGNode] ) -> Optional[Union[bn.BMGNode, List[bn.BMGNode]]]: return self.cloner.clone( node, [self.cloner.copy_context[p] for p in node.inputs.inputs] ) def copy(bmg_old: BMGraphBuilder) -> GraphFixerResult: bmg, errors = copy_and_replace(bmg_old, lambda c, s: CopyGraph(c, s)) return bmg, True, errors
beanmachine-main
src/beanmachine/ppl/compiler/copy_transformer.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, List, Optional, Tuple, Type, Union import beanmachine.ppl.compiler.bmg_nodes as bn from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.error_report import BMGError, ErrorReport from beanmachine.ppl.compiler.typer_base import TyperBase # A "node fixer" is a partial function on nodes; it is similar to a "rule". (See rules.py) # What distinguishes a node fixer from a rule? # # * A node fixer is not an instance of a Rule class; it's just a function. # # * A node fixer returns: # 1. None or Inapplicable if the fixer did not know how to fix the problem # TODO: Eliminate use of None as a special return value from a node fixer. # Node fixers should return Inapplicable, Fatal, or a node. # 2. The same node as the input, if the node does not actually need fixing. # 3. A new node, if the fixer did know how to fix the problem # 4. Fatal, if the node definitely cannot be fixed, so compilation should cease. # # Note the subtle difference between (1) and (2). Suppose we compose a set of n # fixers together, as in the first_match combinator below. If the first fixer # returns Inapplicable, then we try the second fixer. If the first fixer returns the # input, then that fixer is saying that the node is already correct, and we # should not try the second fixer. # # * A node fixer mutates an existing graph by adding a new node to it; a Rule just # returns a success code containing a new value. # # * Rules may be combined together with combinators that apply sub-rules to # various branches in a large tree, and the result of such a combination is # itself a Rule. Node fixers are combined together to form more complex fixers, # but they still just operate on individual nodes. The work of applying node fixers # over an entire graph is done by a GraphFixer. class NodeFixerError: pass Inapplicable = NodeFixerError() Fatal = NodeFixerError() NodeFixerResult = Union[bn.BMGNode, None, NodeFixerError] NodeFixer = Callable[[bn.BMGNode], NodeFixerResult] def node_fixer_first_match(fixers: List[NodeFixer]) -> NodeFixer: def first_match(node: bn.BMGNode) -> NodeFixerResult: for fixer in fixers: result = fixer(node) if result is not None and result is not Inapplicable: return result return Inapplicable return first_match def type_guard(t: Type, fixer: Callable) -> NodeFixer: def guarded(node: bn.BMGNode) -> Optional[bn.BMGNode]: return fixer(node) if isinstance(node, t) else None return guarded # A GraphFixer is a function that takes no arguments and returns (1) a bool indicating # whether the graph fixer made any change or not, and (2) an error report. If the # error report is non-empty then further processing should stop and the error should # be reported to the user. GraphFixerResult = Tuple[BMGraphBuilder, bool, ErrorReport] GraphFixer = Callable[[BMGraphBuilder], GraphFixerResult] # The identity graph fixer never makes a change or produces an error. identity_graph_fixer: GraphFixer = lambda gb: (gb, False, ErrorReport()) def conditional_graph_fixer( condition: Callable[[BMGraphBuilder], bool], fixer: Callable[[BMGraphBuilder], GraphFixerResult], ) -> GraphFixer: def _condition_graph_fixer(bmg: BMGraphBuilder) -> GraphFixerResult: return fixer(bmg) if condition(bmg) else identity_graph_fixer(bmg) return _condition_graph_fixer def ancestors_first_graph_fixer( # noqa typer: TyperBase, node_fixer: NodeFixer, get_error: Optional[Callable[[bn.BMGNode, int], Optional[BMGError]]] = None, ) -> GraphFixer: # Applies the node fixer to each node in the graph builder that is an ancestor, # of any sample, query, or observation, starting with ancestors and working # towards decendants. Fixes are done one *edge* at a time. That is, when # we enumerate a node, we check all its input edges to see if the input node # needs to be fixed, and if so, then we update that edge to point from # the fixed node to its new output. # # We enumerate each output node once, but because we then examine each of its # input edges, we will possibly encounter the same input node more than once. # # Rather than rewriting it again, we memoize the result and reuse it. # If a fixer indicates a fatally unfixable node then we attempt to report an error # describing the problem with the edge. However, we will continue to run fixers # on other nodes, hoping that we might report more errors. # # A typer associates type information with each node in the graph. We have some # problems though: # # * We frequently need to accurately know the type of a node when checking to # see if it needs fixing. # * Computing the type of a node requires computing the types of all of its # *ancestor* nodes, which can be quite expensive. # * If a mutation changes an input of a node, that node's type might change, # which could then change the types of all of its *descendant* nodes. # # We solve this performance problem by (1) computing types of nodes on demand # and caching the result, (2) being smart about recomputing the type of a node # and its descendants when the graph is mutated. We therefore tell the typer # that it needs to re-type a node and its descendants only when a node changes. # # CONSIDER: Could we use a simpler algorithm here? That is: for each node, # try to fix the node. If successful, remove all the output edges of the old # node and add output edges to the new node. The problem with this approach # is that we might end up reporting an error on an edge that is NOT in the # subgraph of ancestors of samples, queries and observations, which would be # a bad user experience. def ancestors_first(bmg: BMGraphBuilder) -> GraphFixerResult: errors = ErrorReport() replacements = {} reported = set() nodes = bmg.all_ancestor_nodes() made_progress = False for node in nodes: node_was_updated = False for i in range(len(node.inputs)): c = node.inputs[i] # Have we already reported an error on this node? Skip it. if c in reported: continue # Have we already replaced this input with something? # If so, no need to compute the replacement again. if c in replacements: if node.inputs[i] is not replacements[c]: node.inputs[i] = replacements[c] node_was_updated = True continue replacement = node_fixer(c) if isinstance(replacement, bn.BMGNode): replacements[c] = replacement if node.inputs[i] is not replacement: node.inputs[i] = replacement node_was_updated = True made_progress = True elif replacement is Fatal: reported.add(c) if get_error is not None: error = get_error(node, i) if error is not None: errors.add_error(error) if node_was_updated: typer.update_type(node) return bmg, made_progress, errors return ancestors_first def edge_error_pass( get_error: Callable[[BMGraphBuilder, bn.BMGNode, int], Optional[BMGError]] ) -> GraphFixer: """Given a function that takes an edge in the graph and returns an optional error, build a pass which checks for errors every edge in the graph that is an ancestor of a query, observation, or sample. The edge is given as the descendant node and the index of the parent node.""" def error_pass(bmg: BMGraphBuilder) -> GraphFixerResult: errors = ErrorReport() reported = set() nodes = bmg.all_ancestor_nodes() for node in nodes: for i in range(len(node.inputs)): parent = node.inputs[i] # We might find errors on many edges, but we only report # one error per parent node. if parent in reported: continue error = get_error(bmg, node, i) if error is not None: errors.add_error(error) reported.add(parent) return bmg, False, errors return error_pass def node_error_pass( get_error: Callable[[BMGraphBuilder, bn.BMGNode], Optional[BMGError]] ) -> GraphFixer: """Given a function that takes an node in the graph and returns an optional error, build a pass which checks for errors every node in the graph that is an ancestor of a query, observation, or sample.""" def error_pass(bmg: BMGraphBuilder) -> GraphFixerResult: errors = ErrorReport() nodes = bmg.all_ancestor_nodes() for node in nodes: error = get_error(bmg, node) if error is not None: errors.add_error(error) return bmg, False, errors return error_pass def sequential_graph_fixer(fixers: List[GraphFixer]) -> GraphFixer: """Takes a list of graph fixers and applies each in turn once unless one fails.""" def sequential(bmg: BMGraphBuilder) -> GraphFixerResult: made_progress = False errors = ErrorReport() current = bmg for fixer in fixers: current, fixer_made_progress, errors = fixer(current) made_progress |= fixer_made_progress if errors.any(): break return current, made_progress, errors return sequential def fixpoint_graph_fixer(fixer: GraphFixer) -> GraphFixer: """Executes a graph fixer repeatedly until it stops making progress or produces an error.""" def fixpoint(bmg: BMGraphBuilder) -> GraphFixerResult: current = bmg while True: current, made_progress, errors = fixer(current) if not made_progress or errors.any(): return current, made_progress, errors return fixpoint # TODO: Create a fixpoint combinator on GraphFixers.
beanmachine-main
src/beanmachine/ppl/compiler/fix_problem.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List import beanmachine.ppl.compiler.bmg_nodes as bn import torch from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.bmg_node_types import ( dist_type, factor_type, operator_type, ) from beanmachine.ppl.compiler.bmg_types import _size_to_rc, SimplexMatrix from beanmachine.ppl.compiler.fix_problems import fix_problems from beanmachine.ppl.compiler.lattice_typer import LatticeTyper def _tensor_to_python(t: torch.Tensor) -> str: if len(t.shape) == 0: return str(t.item()) return "[" + ",".join(_tensor_to_python(c) for c in t) + "]" def _matrix_to_python(value: torch.Tensor) -> str: r, c = _size_to_rc(value.size()) v = value.reshape(r, c).transpose(0, 1) t = _tensor_to_python(v) return f"tensor({t})" class GeneratedGraphPython: code: str _code: List[str] bmg: BMGraphBuilder node_to_graph_id: Dict[bn.BMGNode, int] query_to_query_id: Dict[bn.Query, int] def __init__(self, bmg: BMGraphBuilder) -> None: self.code = "" self._code = [ "from beanmachine import graph", "from torch import tensor", "g = graph.Graph()", ] self.bmg = bmg self.node_to_graph_id = {} self.query_to_query_id = {} def _add_observation(self, node: bn.Observation) -> None: graph_id = self.node_to_graph_id[node.observed] self._code.append(f"g.observe(n{graph_id}, {node.value})") def _add_query(self, node: bn.Query) -> None: query_id = len(self.query_to_query_id) self.query_to_query_id[node] = query_id graph_id = self.node_to_graph_id[node.operator] self._code.append(f"q{query_id} = g.query(n{graph_id})") def _inputs(self, node: bn.BMGNode) -> str: if isinstance(node, bn.LKJCholeskyNode): # The LKJ dimension parameter has already been folded into the sample type input_seq = [self.node_to_graph_id[node.inputs[1]]] else: input_seq = (self.node_to_graph_id[x] for x in node.inputs) inputs = ", ".join("n" + str(x) for x in input_seq) return "[" + inputs + "]" def _add_factor(self, node: bn.FactorNode) -> None: graph_id = len(self.node_to_graph_id) self.node_to_graph_id[node] = graph_id i = self._inputs(node) ft = str(factor_type(node)) self._code.append(f"n{graph_id} = g.add_factor(") self._code.append(f" graph.{ft},") self._code.append(f" {i},") self._code.append(")") def _add_distribution(self, node: bn.DistributionNode) -> None: graph_id = len(self.node_to_graph_id) self.node_to_graph_id[node] = graph_id i = self._inputs(node) if isinstance(node, bn.DirichletNode): t = LatticeTyper()[node] assert isinstance(t, SimplexMatrix) self._code.append(f"n{graph_id} = g.add_distribution(") self._code.append(" graph.DistributionType.DIRICHLET,") self._code.append(" graph.ValueType(") self._code.append(" graph.VariableType.COL_SIMPLEX_MATRIX,") self._code.append(" graph.AtomicType.PROBABILITY,") self._code.append(f" {t.rows},") self._code.append(f" {t.columns},") self._code.append(" ),") self._code.append(f" {i},") self._code.append(")") else: distr_type, elt_type = dist_type(node) self.node_to_graph_id[node] = graph_id self._code.append(f"n{graph_id} = g.add_distribution(") self._code.append(f" graph.{distr_type},") self._code.append(f" graph.{elt_type},") self._code.append(f" {i},") self._code.append(")") def _add_operator(self, node: bn.OperatorNode) -> None: graph_id = len(self.node_to_graph_id) self.node_to_graph_id[node] = graph_id i = self._inputs(node) ot = str(operator_type(node)) if len(node.inputs) <= 2: self._code.append(f"n{graph_id} = g.add_operator(graph.{ot}, {i})") else: self._code.append(f"n{graph_id} = g.add_operator(") self._code.append(f" graph.{ot},") self._code.append(f" {i},") self._code.append(")") def _add_constant(self, node: bn.ConstantNode) -> None: # noqa graph_id = len(self.node_to_graph_id) self.node_to_graph_id[node] = graph_id t = type(node) v = node.value if t is bn.PositiveRealNode: f = f"add_constant_pos_real({str(float(v))})" elif t is bn.NegativeRealNode: f = f"add_constant_neg_real({str(float(v))})" elif t is bn.ProbabilityNode: f = f"add_constant_probability({str(float(v))})" elif t is bn.BooleanNode: f = f"add_constant_bool({str(bool(v))})" elif t is bn.NaturalNode: f = f"add_constant_natural({str(int(v))})" elif t is bn.RealNode: f = f"add_constant_real({str(float(v))})" elif t is bn.ConstantPositiveRealMatrixNode: f = f"add_constant_pos_matrix({_matrix_to_python(v)})" elif t is bn.ConstantRealMatrixNode: f = f"add_constant_real_matrix({_matrix_to_python(v)})" elif t is bn.ConstantNegativeRealMatrixNode: f = f"add_constant_neg_matrix({_matrix_to_python(v)})" elif t is bn.ConstantProbabilityMatrixNode: f = f"add_constant_probability_matrix({_matrix_to_python(v)})" elif t is bn.ConstantSimplexMatrixNode: f = f"add_constant_col_simplex_matrix({_matrix_to_python(v)})" elif t is bn.ConstantNaturalMatrixNode: f = f"add_constant_natural_matrix({_matrix_to_python(v)})" elif t is bn.ConstantBooleanMatrixNode: f = f"add_constant_bool_matrix({_matrix_to_python(v)})" elif isinstance(v, torch.Tensor) and v.numel() != 1: f = f"add_constant_real_matrix({_matrix_to_python(v)})" else: f = f"add_constant_real({str(float(v))})" self._code.append(f"n{graph_id} = g.{f}") def _generate_node(self, node: bn.BMGNode) -> None: if isinstance(node, bn.Observation): self._add_observation(node) elif isinstance(node, bn.Query): self._add_query(node) elif isinstance(node, bn.FactorNode): self._add_factor(node) elif isinstance(node, bn.DistributionNode): self._add_distribution(node) elif isinstance(node, bn.OperatorNode): self._add_operator(node) elif isinstance(node, bn.ConstantNode): self._add_constant(node) def _generate_python(self) -> None: bmg, error_report = fix_problems(self.bmg) self.bmg = bmg error_report.raise_errors() for node in self.bmg.all_ancestor_nodes(): self._generate_node(node) self.code = "\n".join(self._code) def to_bmg_python(bmg: BMGraphBuilder) -> GeneratedGraphPython: gg = GeneratedGraphPython(bmg) gg._generate_python() return gg
beanmachine-main
src/beanmachine/ppl/compiler/gen_bmg_python.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import beanmachine.ppl.compiler.bmg_nodes as bn from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.fix_problem import ( Inapplicable, NodeFixer, NodeFixerResult, ) def logsumexp_fixer(bmg: BMGraphBuilder) -> NodeFixer: """This fixer attempts to rewrite log expressions of the form log( exp(a) + exp(b) + exp(c) ...) -> logsumexp(a,b,c, ...) """ def logsumexp_fixer(node: bn.BMGNode) -> NodeFixerResult: if not isinstance(node, bn.LogNode): return Inapplicable addition = node.operand if not isinstance(addition, bn.AdditionNode): return Inapplicable if not all(isinstance(i, bn.ExpNode) for i in addition.inputs): return Inapplicable return bmg.add_logsumexp(*[i.operand for i in addition.inputs]) return logsumexp_fixer
beanmachine-main
src/beanmachine/ppl/compiler/fix_logsumexp.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # See notes in typer_base.py for how the type computation logic works. # See notes in bmg_types.py for the type lattice documentation. # # This typer identifies which nodes in the graph are "typable", and of the # typable nodes, determines the *smallest* lattice type possible for that # node. # # A node is "typable" if (1) it is either a constant or valid BMG node, # and (2) all of its ancestors are typable. If either requirement is not # met then a node is untypable. # # The purpose of this restriction is to avoid doing work to guess at what # the types of nodes are in graphs where there is no possibility of this # graph being legal. We also wish to avoid reporting confusing cascading # errors based on incorrect guesses as to what the type of the node "should" # be. Descendents of untypable nodes are also untyped; this is a clear # and easily implemented rule. # # Suppose then we have a node where all of its ancestors are typeable. What # is the "smallest lattice type" computed here? # # For example, suppose we have an addition with two inputs: a sample # from a beta and a sample from a half Cauchy. The types of the samples # cannot be smaller than Probability and PositiveReal, respectively. # An addition of two dissimilarly-typed nodes is not legal, but we could # make it legal by converting both nodes to PositiveReal OR to Real, # and then outputting that type. The smallest of those two possibilities # is PositiveReal, so this is the lattice type we associate with such # an addition node. # # This class implements rules for each typable BMG node. When adding new # logic for nodes, keep the following in mind: # # * The logic in the base class ensures that types of all ancestors are computed # first. We automatically mark all nodes with untypable ancestors as untypable. # Therefore we can assume that the type of every ancestor node is both computed # and it is a valid type. # # * "Untyped" constant node types are computed solely from their *values*. # For example, a constant tensor(1.0) can have the lattice type One # because we can use that value in a context where a Boolean, Natural, # Probability, and so on, are needed, just by creating a typed constant node # of the appropriate type. # # * "Typed" constant nodes have the type associated with that node regardless # of the type of the value; a constant real node with value 2.0 has type # real, even though it could be a natural or positive real. # # * For non-constant nodes, the lattice type associated with a node should always # be an actual BMG type that the node could have. For example: the BMG addition # operator requires that its output be PostiveReal, NegativeReal or Real, so # the lattice type must be one of those three. We never say "this is a sum of # naturals therefore the sum node is also a natural". And we never say "this # is the sum of two 1x3 simplexes, so therefore the result is a 1x3 positive real # matrix", and so on. # # By following these rules we will be able to more easily compute what the edge # requirements are, what conversion nodes must be inserted, and what errors must # be reported when a graph cannot be transformed as required. import typing from typing import Callable, Dict, Set import beanmachine.ppl.compiler.bmg_nodes as bn import beanmachine.ppl.compiler.bmg_types as bt from beanmachine.ppl.compiler.typer_base import TyperBase # Node types which always have the same lattice type # no matter what the types of their inputs are. _requires_nothing: Dict[type, bt.BMGLatticeType] = { # Distributions bn.BernoulliLogitNode: bt.Boolean, bn.BernoulliNode: bt.Boolean, bn.BetaNode: bt.Probability, bn.BinomialNode: bt.Natural, bn.CategoricalNode: bt.Natural, bn.FlatNode: bt.Probability, bn.GammaNode: bt.PositiveReal, bn.HalfCauchyNode: bt.PositiveReal, bn.NormalNode: bt.Real, bn.HalfNormalNode: bt.PositiveReal, bn.StudentTNode: bt.Real, bn.PoissonNode: bt.Natural, # Factors bn.ExpProductFactorNode: bt.Real, # Operators bn.LogisticNode: bt.Probability, # Note, log_prob returns the log of a positive real, not the # log of a probability, so it is real, not negative real. bn.LogProbNode: bt.Real, bn.LogSumExpNode: bt.Real, bn.LogSumExpVectorNode: bt.Real, bn.Log1mexpNode: bt.NegativeReal, bn.PhiNode: bt.Probability, bn.ToNegativeRealNode: bt.NegativeReal, bn.ToIntNode: bt.Natural, bn.ToRealNode: bt.Real, bn.ToPositiveRealNode: bt.PositiveReal, bn.ToProbabilityNode: bt.Probability, # Typed constants bn.ConstantTensorNode: bt.Tensor, bn.BooleanNode: bt.Boolean, bn.NaturalNode: bt.Natural, bn.NegativeRealNode: bt.NegativeReal, bn.PositiveRealNode: bt.PositiveReal, bn.ProbabilityNode: bt.Probability, bn.RealNode: bt.Real, } # This maps FROM the *python* type of a node which represents a constant matrix # TO a canonical instance of the *graph type object*. We need to be able to # inspect a node's python type and then construct a graph type object that # has the same dimensionality as the node. _constant_matrix_graph_types: Dict[type, bt.BMGMatrixType] = { bn.ConstantBooleanMatrixNode: bt.Boolean, bn.ConstantNaturalMatrixNode: bt.Natural, bn.ConstantNegativeRealMatrixNode: bt.NegativeReal, bn.ConstantPositiveRealMatrixNode: bt.PositiveReal, bn.ConstantProbabilityMatrixNode: bt.Probability, bn.ConstantRealMatrixNode: bt.Real, bn.ConstantSimplexMatrixNode: bt.SimplexMatrix(1, 1), } # These are the node types which always represent a matrix in BMG. # Even if the node is a 1x1 matrix, it is a matrix and not an atomic value. _always_matrix_types: Set[type] = { bn.BroadcastNode, bn.CholeskyNode, bn.ColumnIndexNode, bn.ConstantBooleanMatrixNode, bn.ConstantNaturalMatrixNode, bn.ConstantNegativeRealMatrixNode, bn.ConstantPositiveRealMatrixNode, bn.ConstantProbabilityMatrixNode, bn.ConstantRealMatrixNode, bn.ConstantSimplexMatrixNode, bn.ElementwiseMultiplyNode, bn.FillMatrixNode, bn.MatrixAddNode, bn.MatrixExpNode, bn.MatrixLogNode, bn.MatrixLog1mexpNode, bn.MatrixComplementNode, bn.MatrixPhiNode, bn.MatrixScaleNode, bn.ToMatrixNode, bn.TransposeNode, } def _broadcast_size(left: bt.BMGMatrixType, right: bt.BMGMatrixType): def helper(x, y): return x == 1 or y == 1 or x == y if not helper(left.rows, right.rows): return None if not helper(left.columns, right.columns): return None rows = left.rows if right.rows == 1 else right.rows cols = left.columns if right.columns == 1 else right.columns return (rows, cols) class LatticeTyper(TyperBase[bt.BMGLatticeType]): _dispatch: Dict[type, Callable] def __init__(self) -> None: TyperBase.__init__(self) self._dispatch = { bn.Observation: self._type_observation, bn.Query: self._type_query, bn.DirichletNode: self._type_dirichlet, # Operators bn.AdditionNode: self._type_addition, bn.BroadcastNode: self._type_broadcast, bn.ChoiceNode: self._type_choice, bn.CholeskyNode: self._type_cholesky, bn.ColumnIndexNode: self._type_column_index, bn.ComplementNode: self._type_complement, bn.ElementwiseMultiplyNode: self._type_binary_elementwise_op, bn.ExpM1Node: self._type_expm1, bn.ExpNode: self._type_exp, # Fill matrix has same type rules as broadcast, so # let's not duplicate the code. bn.FillMatrixNode: self._type_broadcast, bn.IfThenElseNode: self._type_if, bn.LKJCholeskyNode: self._type_lkj_cholesky, bn.LogNode: self._type_log, bn.MatrixAddNode: self._type_binary_elementwise_op, bn.MatrixMultiplicationNode: self._type_matrix_multiplication, bn.MatrixScaleNode: self._type_matrix_scale, bn.MatrixExpNode: self._type_matrix_exp, bn.MatrixLogNode: self._type_matrix_log, bn.MatrixLog1mexpNode: self._type_matrix_log1mexp, bn.MatrixComplementNode: self._type_matrix_complement, bn.MatrixPhiNode: self._type_matrix_phi, bn.MatrixSumNode: self._type_matrix_sum, bn.MultiplicationNode: self._type_multiplication, bn.MatrixNegateNode: self._type_matrix_negate, bn.NegateNode: self._type_negate, bn.PowerNode: self._type_power, bn.SampleNode: self._type_sample, bn.ToMatrixNode: self._type_to_matrix, bn.ToNegativeRealMatrixNode: self._type_to_neg_real_matrix, bn.ToPositiveRealMatrixNode: self._type_to_pos_real_matrix, bn.ToRealMatrixNode: self._type_to_real_matrix, bn.VectorIndexNode: self._type_index, bn.TensorNode: self._type_tensor_node, bn.TransposeNode: self._type_transpose, } def _lattice_type_for_element_type( self, element_type: bt.BMGElementType ) -> bt.BMGLatticeType: if element_type == bt.positive_real_element: return bt.PositiveReal if element_type == bt.negative_real_element: return bt.NegativeReal if element_type == bt.real_element: return bt.Real if element_type == bt.probability_element: return bt.Probability if element_type == bt.bool_element: return bt.Boolean if element_type == bt.natural_element: return bt.Natural else: raise ValueError("unrecognized element type") def _type_binary_elementwise_op( self, node: bn.BinaryOperatorNode ) -> bt.BMGLatticeType: # Elementwise multiplication and addition require that the operands be # of the same type and size, and that's the resulting type. Rather than # enforcing that here, find the supremum of the element types and a size # where both operands can be broadcast to that size. We'll then add the # appropriate broadcast nodes in the requirements fixer. left_type = self[node.left] right_type = self[node.right] assert isinstance(left_type, bt.BMGMatrixType) assert isinstance(right_type, bt.BMGMatrixType) bsize = _broadcast_size(left_type, right_type) if bsize is None: return bt.Untypable rows, cols = bsize op_type = bt.supremum( self._lattice_type_for_element_type(left_type.element_type), self._lattice_type_for_element_type(right_type.element_type), ) if bt.supremum(op_type, bt.NegativeReal) == bt.NegativeReal: return bt.NegativeRealMatrix(rows, cols) if bt.supremum(op_type, bt.PositiveReal) == bt.PositiveReal: return bt.PositiveRealMatrix(rows, cols) return bt.RealMatrix(rows, cols) _matrix_tpe_constructors = { bt.Real: lambda r, c: bt.RealMatrix(r, c), bt.PositiveReal: lambda r, c: bt.PositiveRealMatrix(r, c), bt.NegativeReal: lambda r, c: bt.NegativeRealMatrix(r, c), bt.Probability: lambda r, c: bt.ProbabilityMatrix(r, c), bt.Boolean: lambda r, c: bt.BooleanMatrix(r, c), bt.NaturalMatrix: lambda r, c: bt.NaturalMatrix(r, c), } def _type_tensor_node(self, node: bn.TensorNode) -> bt.BMGLatticeType: size = node._size element_type = bt.supremum(*[self[i] for i in node.inputs]) if len(size) == 0: return element_type if len(size) == 1: rows = 1 columns = size[0] elif len(size) == 2: rows = size[0] columns = size[1] else: return bt.Untypable return self._matrix_tpe_constructors[element_type](rows, columns) def _type_matrix_exp(self, node: bn.MatrixExpNode) -> bt.BMGLatticeType: assert len(node.inputs) == 1 op = self[node.operand] assert op is not bt.Untypable assert isinstance(op, bt.BMGMatrixType) if isinstance(op, bt.NegativeRealMatrix): return bt.ProbabilityMatrix(op.rows, op.columns) return bt.PositiveRealMatrix(op.rows, op.columns) def _type_matrix_phi(self, node: bn.MatrixPhiNode) -> bt.BMGLatticeType: assert len(node.inputs) == 1 op = self[node.operand] assert op is not bt.Untypable assert isinstance(op, bt.BMGMatrixType) return bt.ProbabilityMatrix(op.rows, op.columns) def _type_matrix_log(self, node: bn.MatrixLogNode) -> bt.BMGLatticeType: assert len(node.inputs) == 1 op = self[node.operand] assert op is not bt.Untypable assert isinstance(op, bt.BMGMatrixType) if isinstance(op, bt.ProbabilityMatrix): return bt.NegativeRealMatrix(op.rows, op.columns) return bt.RealMatrix(op.rows, op.columns) def _type_matrix_log1mexp(self, node: bn.MatrixLog1mexpNode) -> bt.BMGLatticeType: assert len(node.inputs) == 1 op = self[node.operand] assert op is not bt.Untypable assert isinstance(op, bt.BMGMatrixType) op_element_type = self._lattice_type_for_element_type(op.element_type) assert bt.supremum(bt.NegativeReal, op_element_type) == bt.NegativeReal return bt.RealMatrix(op.rows, op.columns) def _type_matrix_complement( self, node: bn.MatrixComplementNode ) -> bt.BMGLatticeType: assert len(node.inputs) == 1 op = self[node.operand] assert op is not bt.Untypable assert isinstance(op, bt.BroadcastMatrixType) or isinstance( op, bt.SimplexMatrix ) if isinstance(op, bt.SimplexMatrix): return bt.SimplexMatrix(op.rows, op.columns) op_element_type = self._lattice_type_for_element_type(op.element_type) if bt.supremum(bt.Boolean, op_element_type) == bt.Boolean: return bt.BooleanMatrix(op.rows, op.columns) if bt.supremum(bt.Probability, op_element_type) == bt.Probability: return bt.ProbabilityMatrix(op.rows, op.columns) return bt.Untypable def _type_matrix_sum(self, node: bn.MatrixSumNode) -> bt.BMGLatticeType: operand_type = self[node.operand] assert isinstance(operand_type, bt.BMGMatrixType) operand_element_type = self._lattice_type_for_element_type( operand_type.element_type ) return operand_element_type def _type_matrix_negate(self, node: bn.MatrixNegateNode) -> bt.BMGLatticeType: assert len(node.inputs) == 1 op = self[node.operand] assert op is not bt.Untypable assert isinstance(op, bt.BMGMatrixType) op_element_type = self._lattice_type_for_element_type(op.element_type) if ( bt.supremum(bt.PositiveReal, op_element_type) == bt.PositiveReal or bt.supremum(bt.Probability, op_element_type) == bt.Probability ): return bt.NegativeRealMatrix(op.rows, op.columns) if bt.supremum(bt.NegativeReal, op_element_type) == bt.NegativeReal: return bt.PositiveRealMatrix(op.rows, op.columns) return bt.RealMatrix(op.rows, op.columns) def _type_observation(self, node: bn.Observation) -> bt.BMGLatticeType: return self[node.observed] def _type_query(self, node: bn.Query) -> bt.BMGLatticeType: return self[node.operator] def _type_dirichlet(self, node: bn.DirichletNode) -> bt.BMGLatticeType: # The type of a Dirichlet node is a one-column simplex with as many # rows as its input. input_type = self[node.concentration] rows = 1 columns = 1 if isinstance(input_type, bt.BMGMatrixType): rows = input_type.rows return bt.SimplexMatrix(rows, columns) def _type_addition(self, node: bn.BMGNode) -> bt.BMGLatticeType: op_type = bt.supremum(*[self[i] for i in node.inputs]) if bt.supremum(op_type, bt.NegativeReal) == bt.NegativeReal: return bt.NegativeReal if bt.supremum(op_type, bt.PositiveReal) == bt.PositiveReal: return bt.PositiveReal return bt.Real def _type_column_index(self, node: bn.ColumnIndexNode) -> bt.BMGLatticeType: # A stochastic index into a one-hot or all-zero constant matrix # is treated as a column of bools. lt = self[node.left] assert isinstance(lt, bt.BMGMatrixType) result = lt if isinstance(lt, bt.ZeroMatrix) or isinstance(lt, bt.OneHotMatrix): result = bt.Boolean return result.with_dimensions(lt.rows, 1) def _type_complement(self, node: bn.ComplementNode) -> bt.BMGLatticeType: if bt.supremum(self[node.operand], bt.Boolean) == bt.Boolean: return bt.Boolean return bt.Probability def _type_exp(self, node: bn.ExpNode) -> bt.BMGLatticeType: ot = self[node.operand] if bt.supremum(ot, bt.NegativeReal) == bt.NegativeReal: return bt.Probability return bt.PositiveReal def _type_expm1(self, node: bn.ExpM1Node) -> bt.BMGLatticeType: # ExpM1 takes a real, positive real or negative real. Its return has # the same type as its input. ot = self[node.operand] if bt.supremum(ot, bt.PositiveReal) == bt.PositiveReal: return bt.PositiveReal if bt.supremum(ot, bt.NegativeReal) == bt.NegativeReal: return bt.NegativeReal return bt.Real def _type_if(self, node: bn.IfThenElseNode) -> bt.BMGLatticeType: # TODO: Consider adding a pass which optimizes away IF(X, Y, Y) to # just plain Y. # TODO: What if we have an atomic type on one side and a 1x1 matrix # on the other? That has not yet arisen in practice but we might # consider putting a matrix constraint on the atomic side and # marking IF as producing a matrix in that case. # TODO: We need to consider what happens if the consequence and alternative # types have no supremum other than Tensor. In bmg_requirements.py we impose # the requirement that the consequence and alternative are both of their # supremum, but if that is Tensor then we need to give an error. result = bt.supremum(self[node.consequence], self[node.alternative]) if result == bt.Zero or result == bt.One: result = bt.Boolean return result def _type_choice(self, node: bn.ChoiceNode) -> bt.BMGLatticeType: # The type of a choice node is the supremum of all its value's types. # TODO: We need to consider what happens if the value's types # have no supremum other than Tensor. In bmg_requirements.py we # impose the requirement that the values are both of their # supremum, but if that is Tensor then we need to give an error. result = bt.supremum( *(self[node.inputs[i]] for i in range(1, len(node.inputs))) ) if result == bt.Zero or result == bt.One: result = bt.Boolean return result def _type_cholesky(self, node: bn.CholeskyNode) -> bt.BMGLatticeType: # TODO: Check to see if the input is a square matrix. return self[node.operand] def _type_index(self, node: bn.VectorIndexNode) -> bt.BMGLatticeType: # The lattice type of an index is derived from the lattice type of # the vector, but it's not as straightforward as just # shrinking the type down to a 1x1 matrix. The elements of # a one-hot vector are bools, for instance, not all one. # The elements of a simplex are probabilities. lt = self[node.left] if isinstance(lt, bt.OneHotMatrix): return bt.Boolean if isinstance(lt, bt.ZeroMatrix): return bt.Boolean if isinstance(lt, bt.SimplexMatrix): return bt.Probability if isinstance(lt, bt.BMGMatrixType): return lt.with_dimensions(1, 1) # The only other possibility is that we have a tensor, so let's say # its elements are reals. return bt.Real def _type_log(self, node: bn.LogNode) -> bt.BMGLatticeType: ot = bt.supremum(self[node.operand], bt.Probability) if ot == bt.Probability: return bt.NegativeReal return bt.Real def _type_multiplication(self, node: bn.MultiplicationNode) -> bt.BMGLatticeType: ot = bt.supremum(*[self[i] for i in node.inputs]) it = bt.supremum(ot, bt.Probability) if bt.supremum(it, bt.Real) == bt.Real: return it return bt.Real def _type_matrix_multiplication( self, node: bn.MatrixMultiplicationNode ) -> bt.BMGLatticeType: assert len(node.inputs) == 2 lt = self[node.left] assert lt is not bt.Untypable assert isinstance(lt, bt.BMGMatrixType) rt = self[node.right] assert rt is not bt.Untypable assert isinstance(rt, bt.BMGMatrixType) # Note that we do not detect here if lt.columns != rt.rows, which would # be illegal. We assume the type of the output is a real matrix with # lt.rows and rt.columns. That error condition will be checked elsewhere. return bt.RealMatrix(lt.rows, rt.columns) def _type_matrix_scale(self, node: bn.MatrixScaleNode) -> bt.BMGLatticeType: assert len(node.inputs) == 2 lt = self[node.left] assert lt is not bt.Untypable assert bt.supremum(lt, bt.Real) == bt.Real assert isinstance( lt, bt.BMGMatrixType ) # Beanstalk scalars are single matrix types lt = typing.cast(bt.BroadcastMatrixType, lt) rt = self[node.right] assert rt is not bt.Untypable assert isinstance(rt, bt.BMGMatrixType) ltm = lt.with_dimensions(rt.rows, rt.columns) return bt.supremum(ltm, rt) def _type_negate(self, node: bn.NegateNode) -> bt.BMGLatticeType: ot = self[node.operand] if bt.supremum(ot, bt.PositiveReal) == bt.PositiveReal: return bt.NegativeReal if bt.supremum(ot, bt.NegativeReal) == bt.NegativeReal: return bt.PositiveReal return bt.Real def _type_power(self, node: bn.PowerNode) -> bt.BMGLatticeType: # BMG supports a power node that has these possible combinations of # base and exponent type: # # P ** R+ --> P # P ** R --> R+ # R+ ** R+ --> R+ # R+ ** R --> R+ # R ** R+ --> R # R ** R --> R inf_base = bt.supremum(self[node.left], bt.Probability) inf_exp = bt.supremum(self[node.right], bt.PositiveReal) if inf_base == bt.Probability and inf_exp == bt.Real: return bt.PositiveReal if bt.supremum(inf_base, bt.Real) == bt.Real: return inf_base return bt.Real def _type_sample(self, node: bn.SampleNode) -> bt.BMGLatticeType: return self[node.operand] def _type_to_matrix(self, node: bn.ToMatrixNode) -> bt.BMGLatticeType: assert len(node.inputs) >= 3 rows = node.inputs[0] assert isinstance(rows, bn.NaturalNode) columns = node.inputs[1] assert isinstance(columns, bn.NaturalNode) t = bt.supremum(*(self[item] for item in node.inputs.inputs[2:])) if bt.supremum(t, bt.Real) != bt.Real: t = bt.Real elif t == bt.One or t == bt.Zero: # This should not happen, but just to be sure we'll make # an all-one or all-zero matrix into a matrix of bools. # (It should not happen because an all-constant matrix should # already be a TensorConstant node.) t = bt.Boolean assert isinstance(t, bt.BMGMatrixType) return t.with_dimensions(rows.value, columns.value) def _type_broadcast(self, node: bn.BMGNode) -> bt.BMGLatticeType: # We have the same logic for broadcast and fill matrix. assert isinstance(node, bn.BroadcastNode) or isinstance(node, bn.FillMatrixNode) assert len(node.inputs) == 3 val = node.inputs[0] rows = node.inputs[1] assert isinstance(rows, bn.NaturalNode) columns = node.inputs[2] assert isinstance(columns, bn.NaturalNode) t = self[val] assert isinstance(t, bt.BMGMatrixType) return t.with_dimensions(rows.value, columns.value) def _type_to_real_matrix(self, node: bn.ToRealMatrixNode) -> bt.BMGLatticeType: op = node.operand t = self[op] assert isinstance(t, bt.BMGMatrixType) assert self.is_matrix(op) return bt.RealMatrix(t.rows, t.columns) def _type_to_pos_real_matrix( self, node: bn.ToPositiveRealMatrixNode ) -> bt.BMGLatticeType: op = node.operand t = self[op] assert isinstance(t, bt.BMGMatrixType) assert self.is_matrix(op) return bt.PositiveRealMatrix(t.rows, t.columns) def _type_to_neg_real_matrix( self, node: bn.ToNegativeRealMatrixNode ) -> bt.BMGLatticeType: op = node.operand t = self[op] assert isinstance(t, bt.BMGMatrixType) assert self.is_matrix(op) return bt.NegativeRealMatrix(t.rows, t.columns) def _type_transpose(self, node: bn.TransposeNode) -> bt.BMGLatticeType: op = node.operand t = self[op] assert t is not bt.Untypable assert isinstance(t, bt.BMGMatrixType) assert self.is_matrix(op) return bt.RealMatrix(t.columns, t.rows) def _type_lkj_cholesky(self, node: bn.LKJCholeskyNode) -> bt.BMGLatticeType: dim = node.dim assert isinstance(dim, bn.ConstantNode) dim_value = dim.value assert isinstance(dim_value, int) return bt.RealMatrix(dim_value, dim_value) def _compute_type_inputs_known(self, node: bn.BMGNode) -> bt.BMGLatticeType: # If there is any input node whose type cannot be determined, then *none* # of its descendents can be determined, even if a descendent node always # has the same type regardless of its inputs. This ensures that (1) we only # attempt to assign type judgments to graphs that are supported by BMG, # and (2) will help us avoid presenting cascading errors to the user in # the event that a graph violates a typing rule. for i in node.inputs: if self[i] == bt.Untypable: return bt.Untypable if isinstance(node, bn.UntypedConstantNode): return bt.type_of_value(node.value) t = type(node) if t in _requires_nothing: result = _requires_nothing[t] elif t in _constant_matrix_graph_types: assert isinstance(node, bn.ConstantTensorNode) r = _constant_matrix_graph_types[t] result = r.with_size(node.value.size()) elif t in self._dispatch: result = self._dispatch[t](node) else: # TODO: Consider asserting that the node is unsupported by BMG. result = bt.Untypable assert result != bt.Zero and result != bt.One return result def is_bool(self, node: bn.BMGNode) -> bool: t = self[node] return t != bt.Untypable and bt.supremum(t, bt.Boolean) == bt.Boolean def is_natural(self, node: bn.BMGNode) -> bool: t = self[node] return t != bt.Untypable and bt.supremum(t, bt.Natural) == bt.Natural def is_prob_or_bool(self, node: bn.BMGNode) -> bool: t = self[node] return t != bt.Untypable and bt.supremum(t, bt.Probability) == bt.Probability def is_neg_real(self, node: bn.BMGNode) -> bool: t = self[node] return t != bt.Untypable and bt.supremum(t, bt.NegativeReal) == bt.NegativeReal def is_pos_real(self, node: bn.BMGNode) -> bool: t = self[node] return t != bt.Untypable and bt.supremum(t, bt.PositiveReal) == bt.PositiveReal def is_real(self, node: bn.BMGNode) -> bool: t = self[node] return t != bt.Untypable and bt.supremum(t, bt.Real) == bt.Real def is_matrix(self, node: bn.BMGNode) -> bool: t = type(node) if t in _always_matrix_types: return True lattice_type = self[node] if isinstance(lattice_type, bt.SimplexMatrix): return True if isinstance(lattice_type, bt.BMGMatrixType) and ( lattice_type.rows != 1 or lattice_type.columns != 1 ): return True return False
beanmachine-main
src/beanmachine/ppl/compiler/lattice_typer.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, List, Set, Tuple import beanmachine.ppl.compiler.profiler as prof from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.devectorizer_transformer import vectorized_graph_fixer from beanmachine.ppl.compiler.error_report import ErrorReport from beanmachine.ppl.compiler.fix_additions import addition_fixer, sum_fixer from beanmachine.ppl.compiler.fix_arithmetic import ( log1mexp_fixer, neg_neg_fixer, negative_real_multiplication_fixer, nested_if_same_cond_fixer, ) from beanmachine.ppl.compiler.fix_beta_conjugate_prior import ( beta_bernoulli_conjugate_fixer, beta_binomial_conjugate_fixer, ) from beanmachine.ppl.compiler.fix_bool_arithmetic import bool_arithmetic_fixer from beanmachine.ppl.compiler.fix_bool_comparisons import bool_comparison_fixer from beanmachine.ppl.compiler.fix_logsumexp import logsumexp_fixer from beanmachine.ppl.compiler.fix_matrix_scale import ( nested_matrix_scale_fixer, trivial_matmul_fixer, ) from beanmachine.ppl.compiler.fix_multiary_ops import ( multiary_addition_fixer, multiary_multiplication_fixer, ) from beanmachine.ppl.compiler.fix_normal_conjugate_prior import ( normal_normal_conjugate_fixer, ) from beanmachine.ppl.compiler.fix_observations import observations_fixer from beanmachine.ppl.compiler.fix_observe_true import observe_true_fixer from beanmachine.ppl.compiler.fix_problem import ( ancestors_first_graph_fixer, conditional_graph_fixer, fixpoint_graph_fixer, GraphFixer, GraphFixerResult, node_fixer_first_match, NodeFixer, sequential_graph_fixer, ) from beanmachine.ppl.compiler.fix_requirements import requirements_fixer from beanmachine.ppl.compiler.fix_transpose import identity_transpose_fixer from beanmachine.ppl.compiler.fix_unsupported import ( bad_matmul_reporter, unsupported_node_fixer, unsupported_node_reporter, untypable_node_reporter, ) from beanmachine.ppl.compiler.lattice_typer import LatticeTyper default_skip_optimizations: Set[str] = { "beta_bernoulli_conjugate_fixer", "beta_binomial_conjugate_fixer", "normal_normal_conjugate_fixer", } def arithmetic_graph_fixer(skip: Set[str]) -> GraphFixer: typer = LatticeTyper() def _arithmetic_graph_fixer(bmg: BMGraphBuilder) -> GraphFixerResult: node_fixers = [ addition_fixer(bmg, typer), bool_arithmetic_fixer(bmg, typer), bool_comparison_fixer(bmg, typer), log1mexp_fixer(bmg, typer), logsumexp_fixer(bmg), multiary_addition_fixer(bmg), multiary_multiplication_fixer(bmg), neg_neg_fixer(bmg), negative_real_multiplication_fixer(bmg, typer), nested_if_same_cond_fixer(bmg), nested_matrix_scale_fixer(bmg), sum_fixer(bmg, typer), trivial_matmul_fixer(bmg, typer), unsupported_node_fixer(bmg, typer), identity_transpose_fixer(bmg, typer), ] node_fixers = [nf for nf in node_fixers if nf.__name__ not in skip] node_fixer = node_fixer_first_match(node_fixers) arith = ancestors_first_graph_fixer(typer, node_fixer) return fixpoint_graph_fixer(arith)(bmg) return _arithmetic_graph_fixer _conjugacy_fixer_factories: List[Callable[[BMGraphBuilder], NodeFixer]] = [ beta_bernoulli_conjugate_fixer, beta_binomial_conjugate_fixer, normal_normal_conjugate_fixer, ] def conjugacy_graph_fixer(skip: Set[str]) -> GraphFixer: def _conjugacy_graph_fixer(bmg: BMGraphBuilder) -> GraphFixerResult: node_fixers = [ f(bmg) for f in _conjugacy_fixer_factories if f.__name__ not in skip ] node_fixer = node_fixer_first_match(node_fixers) # TODO: Make the typer optional return ancestors_first_graph_fixer(LatticeTyper(), node_fixer)(bmg) return _conjugacy_graph_fixer def fix_problems( bmg: BMGraphBuilder, skip_optimizations: Set[str] = default_skip_optimizations ) -> Tuple[BMGraphBuilder, ErrorReport]: current = bmg current._begin(prof.fix_problems) all_fixers = sequential_graph_fixer( [ vectorized_graph_fixer(), arithmetic_graph_fixer(skip_optimizations), unsupported_node_reporter(), bad_matmul_reporter(), untypable_node_reporter(), conjugacy_graph_fixer(skip_optimizations), requirements_fixer, observations_fixer, conditional_graph_fixer( condition=lambda gb: gb._fix_observe_true, fixer=observe_true_fixer ), ] ) current, _, errors = all_fixers(current) current._finish(prof.fix_problems) return current, errors
beanmachine-main
src/beanmachine/ppl/compiler/fix_problems.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List import beanmachine.ppl.compiler.bmg_nodes as bn from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.fix_problem import ( Inapplicable, NodeFixer, NodeFixerResult, ) class MultiaryOperatorFixer: """This fixer transforms graphs with long chains of binary operator nodes into multiary operations. This greatly decreases both the number of nodes and the number of edges in the graph, which can lead to performance wins during inference.""" _bmg: BMGraphBuilder _operator: type def __init__(self, bmg: BMGraphBuilder, operator: type) -> None: self._bmg = bmg self._operator = operator def _single_output_is_operator(self, n: bn.BMGNode) -> bool: if len(n.outputs.items) != 1: # Not exactly one output node. return False if next(iter(n.outputs.items.values())) != 1: # Exactly one output node, but has two edges going to it. # TODO: This is a bit opaque. Add a helper method for this. return False o = next(iter(n.outputs.items.keys())) return isinstance(o, self._operator) def _addition_single_output_is_operator(self, n: bn.BMGNode) -> bool: if not isinstance(n, self._operator): return False if len(n.inputs) > 2: return False return self._single_output_is_operator(n) def _needs_fixing(self, n: bn.BMGNode) -> bool: # A binary operator is fixable if: # # * There is more than one output OR the single output is NOT the given operation # * At least one of the left or right inputs is a binary operator with only # one output. # # Let us say the operator is addition, we are looking for stuff like: # # A B # \ / # + C # \ / # + D # \ / # + # # to turn it into # # A B C D # \ | | / # sum # # Why do we have these conditions? # # * Consider the (A + B) + C node. We do not want to fix it. # If there is exactly one output and it is an addition, then # this node's output is itself a candidate for fixing; we can skip # this one and fix it instead. No need to do extra work we're just # going to throw away. # # * Any addition with two or more outputs is an addition that is # deduplicated. We do not want to eliminate it; doing so causes # the deduplicated work to be done twice. That is, if we have # # A B # \ / # + C # \ / # E + D # \ / \ / # * + # # Then the bottom addition node is NOT fixable but the A + B + C addition # is fixable. The desired final graph is: # # A B C # \ | / # E sum D # \ / \ / # * + # # and NOT # # A B C # \ | / # E sum A B C D # \ / \ | | / # * sum # # Why not? Because our metrics are graph size and amount of arithmetic # performed when evaluating the graph in BMG. # # * The original graph has eight edges, nine nodes, and computes three additions: # t1 = A + B, t2 = t1 + C, t3 = t2 + D # * The desired graph has seven edges, eight nodes, and computes three additions: # t1 = sum(A, B, C) requires two additions, and t2 = t1 + D is one more. # The bad graph has nine edges, eight nodes, and computes five additions: # sum(A, B, C) does two additions and sum(A, B, C, D) does three. # # The desired graph is a clear win in its reduced edge and node count without # actually doing more math. The bad graph is in every way worse than the # desired graph. return ( isinstance(n, self._operator) and len(n.inputs) == 2 and not self._single_output_is_operator(n) and ( self._addition_single_output_is_operator(n.inputs[0]) or self._addition_single_output_is_operator(n.inputs[1]) ) ) def accumulate_input_nodes(self, n: bn.BMGNode) -> List[bn.BMGNode]: acc = [] stack = [n.inputs[1], n.inputs[0]] while len(stack) > 0: c = stack.pop() if self._addition_single_output_is_operator(c): assert isinstance(c, self._operator) assert len(n.inputs) == 2 stack.append(c.inputs[1]) stack.append(c.inputs[0]) else: acc.append(c) return acc def multiary_addition_fixer(bmg: BMGraphBuilder) -> NodeFixer: """This fixer transforms graphs with long chains of binary addition nodes into multiary addition. This greatly decreases both the number of nodes and the number of edges in the graph, which can lead to performance wins during inference.""" maf = MultiaryOperatorFixer(bmg, bn.AdditionNode) def multiary_addition_fixer(node: bn.BMGNode) -> NodeFixerResult: if not maf._needs_fixing(node): return Inapplicable acc = maf.accumulate_input_nodes(node) return bmg.add_multi_addition(*acc) return multiary_addition_fixer def multiary_multiplication_fixer(bmg: BMGraphBuilder) -> NodeFixer: """This fixer transforms graphs with long chains of binary multiplication nodes into multiary multiplication. This greatly decreases both the number of nodes and the number of edges in the graph, which can lead to performance wins during inference.""" maf = MultiaryOperatorFixer(bmg, bn.MultiplicationNode) def multiary_multiplication_fixer(node: bn.BMGNode) -> NodeFixerResult: if not maf._needs_fixing(node): return Inapplicable acc = maf.accumulate_input_nodes(node) return bmg.add_multi_multiplication(*acc) return multiary_multiplication_fixer
beanmachine-main
src/beanmachine/ppl/compiler/fix_multiary_ops.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.bmg_types import ( Boolean, Natural, NegativeReal, PositiveReal, Probability, Real, supremum, type_of_value, Untypable, ) from beanmachine.ppl.compiler.error_report import ErrorReport, ImpossibleObservation from beanmachine.ppl.compiler.lattice_typer import LatticeTyper def observations_fixer(bmg: BMGraphBuilder): """This fixer attempts to fix violations of BMG type system requirements in observation nodes. It also finds observations that are impossible -- say, an observation that a Boolean node is -3.14 -- and reports them as errors.""" typer = LatticeTyper() errors = ErrorReport() made_progress = False for o in bmg.all_observations(): v = o.value value_type = type_of_value(v) assert value_type != Untypable sample_type = typer[o.observed] assert sample_type != Untypable if supremum(value_type, sample_type) != sample_type: errors.add_error(ImpossibleObservation(o, sample_type)) elif sample_type == Boolean and not isinstance(v, bool): o.value = bool(v) made_progress = True elif sample_type == Natural and not isinstance(v, int): o.value = int(v) made_progress = True elif sample_type in { Probability, PositiveReal, NegativeReal, Real, } and not isinstance(v, float): o.value = float(v) made_progress = True else: # TODO: How should we deal with observations of # TODO: matrix-valued samples? # When this gets fixed, also fix _add_observation in gen_bmg_cpp. pass # TODO: Handle the case where there are two inconsistent # TODO: observations of the same sample return bmg, made_progress, errors
beanmachine-main
src/beanmachine/ppl/compiler/fix_observations.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # TODO: For reasons unknown, Pyre is unable to find type information about # TODO: beanmachine.graph from beanmachine.ppl. I'll figure out why later; # TODO: for now, we'll just turn off error checking in this module. # pyre-ignore-all-errors from typing import Any, Tuple import beanmachine.ppl.compiler.bmg_nodes as bn from beanmachine.graph import ( AtomicType, DistributionType as dt, FactorType as ft, OperatorType, ValueType, VariableType, ) from beanmachine.ppl.compiler.bmg_types import RealMatrix, SimplexMatrix from beanmachine.ppl.compiler.lattice_typer import LatticeTyper _factor_types = { bn.ExpProductFactorNode: ft.EXP_PRODUCT, } def factor_type(node: bn.FactorNode) -> ft: return _factor_types[type(node)] _dist_types = { bn.BernoulliLogitNode: (dt.BERNOULLI_LOGIT, AtomicType.BOOLEAN), bn.BernoulliNode: (dt.BERNOULLI, AtomicType.BOOLEAN), bn.BetaNode: (dt.BETA, AtomicType.PROBABILITY), bn.BinomialNode: (dt.BINOMIAL, AtomicType.NATURAL), bn.CategoricalNode: (dt.CATEGORICAL, AtomicType.NATURAL), bn.DirichletNode: (dt.DIRICHLET, None), bn.FlatNode: (dt.FLAT, AtomicType.PROBABILITY), bn.GammaNode: (dt.GAMMA, AtomicType.POS_REAL), bn.HalfCauchyNode: (dt.HALF_CAUCHY, AtomicType.POS_REAL), bn.NormalNode: (dt.NORMAL, AtomicType.REAL), bn.HalfNormalNode: (dt.HALF_NORMAL, AtomicType.POS_REAL), bn.StudentTNode: (dt.STUDENT_T, AtomicType.REAL), bn.PoissonNode: (dt.POISSON, AtomicType.NATURAL), bn.LKJCholeskyNode: (dt.LKJ_CHOLESKY, None), } def dist_type(node: bn.DistributionNode) -> Tuple[dt, Any]: t = type(node) if t is bn.DirichletNode: simplex = LatticeTyper()[node] assert isinstance(simplex, SimplexMatrix) element_type = ValueType( VariableType.COL_SIMPLEX_MATRIX, AtomicType.PROBABILITY, simplex.rows, simplex.columns, ) return dt.DIRICHLET, element_type elif t is bn.LKJCholeskyNode: matrix = LatticeTyper()[node] assert isinstance(matrix, RealMatrix) element_type = ValueType( VariableType.BROADCAST_MATRIX, AtomicType.REAL, matrix.rows, matrix.columns, ) return dt.LKJ_CHOLESKY, element_type return _dist_types[t] _operator_types = { bn.AdditionNode: OperatorType.ADD, bn.BroadcastNode: OperatorType.BROADCAST, bn.ChoiceNode: OperatorType.CHOICE, bn.CholeskyNode: OperatorType.CHOLESKY, bn.ColumnIndexNode: OperatorType.COLUMN_INDEX, bn.ComplementNode: OperatorType.COMPLEMENT, bn.ElementwiseMultiplyNode: OperatorType.ELEMENTWISE_MULTIPLY, bn.ExpM1Node: OperatorType.EXPM1, bn.ExpNode: OperatorType.EXP, bn.FillMatrixNode: OperatorType.FILL_MATRIX, bn.IfThenElseNode: OperatorType.IF_THEN_ELSE, bn.Log1mexpNode: OperatorType.LOG1MEXP, bn.LogNode: OperatorType.LOG, bn.LogProbNode: OperatorType.LOG_PROB, bn.LogisticNode: OperatorType.LOGISTIC, bn.LogSumExpNode: OperatorType.LOGSUMEXP, bn.LogSumExpVectorNode: OperatorType.LOGSUMEXP_VECTOR, bn.MatrixAddNode: OperatorType.MATRIX_ADD, bn.MatrixExpNode: OperatorType.MATRIX_EXP, bn.MatrixLogNode: OperatorType.MATRIX_LOG, bn.MatrixLog1mexpNode: OperatorType.MATRIX_LOG1MEXP, bn.MatrixComplementNode: OperatorType.MATRIX_COMPLEMENT, bn.MatrixMultiplicationNode: OperatorType.MATRIX_MULTIPLY, bn.MatrixNegateNode: OperatorType.MATRIX_NEGATE, bn.MatrixPhiNode: OperatorType.MATRIX_PHI, bn.MatrixScaleNode: OperatorType.MATRIX_SCALE, bn.MatrixSumNode: OperatorType.MATRIX_SUM, bn.MultiplicationNode: OperatorType.MULTIPLY, bn.NegateNode: OperatorType.NEGATE, bn.PhiNode: OperatorType.PHI, bn.PowerNode: OperatorType.POW, bn.SampleNode: OperatorType.SAMPLE, bn.ToIntNode: OperatorType.TO_INT, bn.ToMatrixNode: OperatorType.TO_MATRIX, bn.ToNegativeRealNode: OperatorType.TO_NEG_REAL, bn.ToNegativeRealMatrixNode: OperatorType.TO_NEG_REAL_MATRIX, bn.ToRealMatrixNode: OperatorType.TO_REAL_MATRIX, bn.ToRealNode: OperatorType.TO_REAL, bn.ToPositiveRealMatrixNode: OperatorType.TO_POS_REAL_MATRIX, bn.ToPositiveRealNode: OperatorType.TO_POS_REAL, bn.ToProbabilityNode: OperatorType.TO_PROBABILITY, bn.VectorIndexNode: OperatorType.INDEX, bn.TransposeNode: OperatorType.TRANSPOSE, } _constant_value_types = { bn.BooleanNode, bn.NaturalNode, bn.NegativeRealNode, bn.PositiveRealNode, bn.ProbabilityNode, bn.RealNode, } _constant_matrix_types = { bn.ConstantBooleanMatrixNode, bn.ConstantNaturalMatrixNode, bn.ConstantNegativeRealMatrixNode, bn.ConstantPositiveRealMatrixNode, bn.ConstantProbabilityMatrixNode, bn.ConstantRealMatrixNode, bn.ConstantSimplexMatrixNode, } def operator_type(node: bn.OperatorNode) -> OperatorType: return _operator_types[type(node)] def is_supported_by_bmg(node: bn.BMGNode) -> bool: t = type(node) return ( t is bn.Observation or t is bn.Query or t in _constant_matrix_types or t in _constant_value_types or t in _operator_types or t in _dist_types or t in _factor_types )
beanmachine-main
src/beanmachine/ppl/compiler/bmg_node_types.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # In lattice_typer.py we assign a type to every node in a graph. # If the node or any ancestor node is unsupported in BMG then the node # is said to be "untypable"; otherwise, it computes (and caches) the smallest # possible lattice type for that node. # # We can similarly put "requirements" on edges. The rules are as follows: # # * Every node in a graph has n >= 0 inputs. For every node we can compute # a list of n requirements, one corresponding to each edge. # # * If a node is untypable then every edge requirement is "Any" -- that is, # there is no requirement placed on the edge. The node cannot be represented # in BMG, so there is no reason to report a requirement violation on any of # its edges. # # * Requirements of input edges of typable nodes are computed for each node. # Since this computation is cheap and requires no traversal of the graph # once the lattice type is known, we do not attempt to cache this information. # import typing from typing import Callable, Dict, List import beanmachine.ppl.compiler.bmg_nodes as bn import beanmachine.ppl.compiler.bmg_types as bt from beanmachine.ppl.compiler.lattice_typer import LatticeTyper # These are the nodes which always have the same requirements no matter # what their inputs are. _known_requirements: Dict[type, List[bt.Requirement]] = { bn.Observation: [bt.any_requirement], bn.Query: [bt.any_requirement], # Distributions bn.BernoulliLogitNode: [bt.Real], bn.BernoulliNode: [bt.Probability], bn.BetaNode: [bt.PositiveReal, bt.PositiveReal], bn.BinomialNode: [bt.Natural, bt.Probability], bn.PoissonNode: [bt.PositiveReal], bn.LKJCholeskyNode: [bt.Natural, bt.PositiveReal], # TODO: We don't enforce squareness or positive-definite-ness for Cholesky bn.CholeskyNode: [bt.any_real_matrix], bn.GammaNode: [bt.PositiveReal, bt.PositiveReal], bn.HalfCauchyNode: [bt.PositiveReal], bn.NormalNode: [bt.Real, bt.PositiveReal], bn.HalfNormalNode: [bt.PositiveReal], bn.StudentTNode: [bt.PositiveReal, bt.Real, bt.PositiveReal], # Operators bn.BroadcastNode: [bt.any_requirement, bt.Natural, bt.Natural], # Broadcast requires that its first operand be a matrix, that # the other two operands be constants, and that the constants are # a valid resizing of the given value. But since we will not # generate a broadcast node unless those conditions are met, # we don't need to check them. bn.FillMatrixNode: [bt.any_requirement, bt.Natural, bt.Natural], # Similarly FillMatrix requires that its first operand NOT be # a matrix, that its others are constants. Again, since we will # not generate this node unless those conditions are met, we # don't check them. bn.LogisticNode: [bt.Real], bn.Log1mexpNode: [bt.NegativeReal], # TODO: Check the dimensions. Consider broadcasting if possible. bn.MatrixMultiplicationNode: [bt.any_real_matrix, bt.any_real_matrix], bn.MatrixExpNode: [bt.any_real_matrix], bn.MatrixLogNode: [bt.any_pos_real_matrix], bn.MatrixLog1mexpNode: [bt.any_real_matrix], bn.MatrixNegateNode: [bt.any_real_matrix], bn.MatrixSumNode: [bt.any_real_matrix], bn.PhiNode: [bt.Real], bn.ToIntNode: [bt.upper_bound(bt.Real)], bn.ToNegativeRealNode: [bt.Real], bn.ToRealNode: [bt.upper_bound(bt.Real)], bn.ToPositiveRealNode: [bt.upper_bound(bt.PositiveReal)], bn.ToProbabilityNode: [bt.upper_bound(bt.Real)], } def _requirements_valid(node: bn.BMGNode, reqs: List[bt.Requirement]) -> bool: return len(reqs) == len(node.inputs) and not any( r in bt._invalid_requirement_types for r in reqs ) class EdgeRequirements: _dispatch: Dict[type, Callable] typer: LatticeTyper def __init__(self, typer: LatticeTyper) -> None: self.typer = typer self._dispatch = { # Factors bn.ExpProductFactorNode: self._requirements_expproduct, # Distributions bn.CategoricalNode: self._requirements_categorical, bn.DirichletNode: self._requirements_dirichlet, # Operators bn.AdditionNode: self._requirements_addition, bn.ChoiceNode: self._requirements_choice, bn.ColumnIndexNode: self._requirements_column_index, bn.ComplementNode: self._same_as_output, bn.ElementwiseMultiplyNode: self._requirements_elementwise_mult, bn.ExpM1Node: self._same_as_output, bn.ExpNode: self._requirements_exp_neg, bn.IfThenElseNode: self._requirements_if, bn.LogNode: self._requirements_log, bn.LogProbNode: self._requirements_log_prob, bn.LogSumExpNode: self._requirements_logsumexp, bn.LogSumExpVectorNode: self._requirements_logsumexp_vector, # TODO: bn.MatrixMultiplyNode: self._requirements_matrix_multiply, # see comment above bn.MatrixComplementNode: self._requirements_matrix_complement, bn.MatrixAddNode: self._requirements_matrix_add, bn.MatrixPhiNode: self._requirements_matrix_phi, bn.MatrixScaleNode: self._requirements_matrix_scale, bn.MultiplicationNode: self._requirements_multiplication, bn.NegateNode: self._requirements_exp_neg, bn.PowerNode: self._requirements_power, bn.SampleNode: self._same_as_output, bn.ToMatrixNode: self._requirements_to_matrix, bn.VectorIndexNode: self._requirements_index, } def _requirements_expproduct( self, node: bn.ExpProductFactorNode ) -> List[bt.Requirement]: # Each input to an exp-power is required to be a # real, negative real, positive real or probability. return [ self.typer[i] if self.typer[i] in {bt.Real, bt.NegativeReal, bt.PositiveReal, bt.Probability} else bt.Real for i in node.inputs ] def _requirements_dirichlet(self, node: bn.DirichletNode) -> List[bt.Requirement]: # BMG's Dirichlet node requires that the input be a column # vector of positive reals, and the row count of the vector is # the number of elements in the simplex we produce. We can # express that restriction as a positive real matrix with # column count equal to 1 and row count equal to the final # dimension of the size. # # A degenerate case here is Dirichlet(tensor([1.0])); we would # normally generate the input as a positive real constant, but # we require that it be a positive real constant 1x1 *matrix*, # which is a different node. The "always matrix" requirement # forces the problem fixer to ensure that the input node is # always considered by BMG to be a matrix. # # TODO: BMG requires it to be a *broadcast* matrix; what happens # if we feed one Dirichlet into another? That would be a simplex, # not a broadcast matrix. Do some research here; do we actually # need the semantics of "always a broadcast matrix" ? # To compute the requirement we get the matrix column size of the input. input_type = self.typer[node] required_columns = 1 required_rows = 1 # TODO: This will produce a bad error message experience. What we want to # say in the case where the input is not a matrix at all is "we require a # one-column positive real matrix" and not "we require a 1x1 positive real # matrix". if isinstance(input_type, bt.BMGMatrixType): required_rows = input_type.rows return [ bt.always_matrix( bt.PositiveReal.with_dimensions(required_rows, required_columns) ) ] def _requirements_categorical( self, node: bn.CategoricalNode ) -> List[bt.Requirement]: # A categorical node requires that its input be a simplex with a single column. it = self.typer[node.probability] # If we have a matrix input then its lattice type is one hot matrix, simplex, # or broadcast matrix. # # Let's list all the possible cases: # # * If we have an r x c one hot or simplex then require a r x 1 simplex; # This will succeed if c == 1 and fail with a good error message otherwise. # # * If we have an r x c other matrix then require a r x 1 simplex; # This will always fail with a good error message. # # In all cases we require an r x 1 simplex: if isinstance(it, bt.BMGMatrixType): return [bt.SimplexMatrix(it.rows, 1)] # We do not have a matrix input. There are two possibilities: # # * The input is 1. We require a 1 x 1 simplex, and this will succeed. # * The input is any other atomic value, and we need to fail. But what # unmet requirement should we put on this edge? # TODO: The current code produces a bad error message experience because at # present we have no way to represent the stated requirement: "a simplex with # one column and ANY number of rows". We only have a way to represent the # requirement "a simplex with one column and a SPECIFIC number of rows". # We should create a requirement object that represents the real requirement, # and then error reporting can give a sensible error message. # # Note that once we have this requirement object we can simply return it # for *every* case and delete all this code. The requirements fixer # would then be responsible for determining if the requirement can be met. return [bt.SimplexMatrix(1, 1)] def _requirements_addition(self, node: bn.BMGNode) -> List[bt.Requirement]: it = self.typer[node] assert it in {bt.Real, bt.NegativeReal, bt.PositiveReal} return [it] * len(node.inputs) # pyre-ignore def _requirements_column_index( self, node: bn.ColumnIndexNode ) -> List[bt.Requirement]: # See notes in _requirements_index. If we have an index into # a one-hot matrix or an all-zero matrix, assume that we want # it to be a column of bools. lt = self.typer[node.left] assert isinstance(lt, bt.BMGMatrixType) result = lt if isinstance(lt, bt.OneHotMatrix) or isinstance(lt, bt.ZeroMatrix): result = bt.Boolean.with_dimensions(lt.rows, lt.columns) return [bt.always_matrix(result), bt.Natural] def _requirements_exp_neg(self, node: bn.UnaryOperatorNode) -> List[bt.Requirement]: # Same logic for both exp and negate operators ot = self.typer[node.operand] if bt.supremum(ot, bt.NegativeReal) == bt.NegativeReal: return [bt.NegativeReal] if bt.supremum(ot, bt.PositiveReal) == bt.PositiveReal: return [bt.PositiveReal] return [bt.Real] def _same_as_output(self, node: bn.BMGNode) -> List[bt.Requirement]: # Input type must be same as output type return [self.typer[node]] def _requirements_if(self, node: bn.IfThenElseNode) -> List[bt.Requirement]: # The condition has to be Boolean; the consequence and alternative need # to be the same. # TODO: Consider what to do if the node type is Tensor. it = self.typer[node] return [bt.Boolean, it, it] def _requirements_choice(self, node: bn.ChoiceNode) -> List[bt.Requirement]: # The condition has to be Natural; the values must all be of the same type. # TODO: Consider what to do if the node type is Tensor. node_type = self.typer[node] c: List[bt.Requirement] = [bt.Natural] v: List[bt.Requirement] = [node_type] return c + v * (len(node.inputs) - 1) def _requirements_index(self, node: bn.VectorIndexNode) -> List[bt.Requirement]: # The index operator introduces an interesting wrinkle into the # "requirements" computation. Until now we have always had the property # that queries and observations are "sinks" of the graph, and the transitive # closure of the inputs to the sinks can have their requirements checked in # order going from the nodes farthest from the sinks down to the sinks. # That is, each node can meet its input requirements *before* its output # nodes meet their requirements. We now have a case where doing so creates # potential inefficiencies. # # B is the constant 3x1 vector [[0], [1], [1]] # N is any node of type natural. # I is an index # F is Bernoulli. # # B N # | | # I # | # F # # The requirement on edge I->F is Probability # The requirement on edge N->I is Natural. # What is the requirement on the B->I edge? # # If we say that it is Boolean[3, 1], its inf type, then the graph we end up # generating is # # b = const_bool_matrix([[0], [1], [1]]) # 3x1 bool matrix # n = whatever # natural # i = index(b, i) # bool # z = const_prob(0) # prob # o = const_prob(1) # prob # c = if_then_else(i, o, z) # prob # f = Bernoulli(c) # bool # # But it would be arguably better to produce # # b = const_prob_matrix([[0.], [1.], [1.]]) # 3x1 prob matrix # n = whatever # natural # i = index(b, i) # prob # f = Bernoulli(i) # bool # # TODO: We might consider an optimization pass which does so. # # However there is an even worse situation. Suppose we have # this unlikely-but-legal graph: # # Z is [[0], [0], [0]] # N is any natural # I is an index # C requires a Boolean input # L requires a NegativeReal input # # Z N # | | # I # | | # C L # # The inf type of Z is Zero[3, 1]. # The I->C edge requirement is Boolean # The I->L edge requirement is NegativeReal # # Now what requirement do we impose on the Z->I edge? We have our choice # of "matrix of negative reals" or "matrix of bools", and whichever we # pick will disappoint someone. # # Fortunately for us, this situation is unlikely; a model writer who # contrives a situation where they are making a stochastic choice where # all choices are all zero AND that zero needs to be used as both # false and a negative number is not writing realistic models. # # What we will do in this unlikely situation is decide that the intended # output type is Boolean and therefore the vector is a vector of bools. # # ----- # # We require: # * the vector must be one row # * the vector must be a matrix, not a single value # * the vector must be either a simplex, or a matrix where the element # type is the output type of the indexing operation # * the index must be a natural # lt = self.typer[node.left] # If we have a tensor that has more than two dimensions, who can # say what the column count should be? # TODO: We need a better error message for that scenario. # It will be common for people to use tensors that are too high # dimension for BMG to handle and we should say that clearly. required_rows = lt.rows if isinstance(lt, bt.BMGMatrixType) else 1 required_columns = 1 if isinstance(lt, bt.SimplexMatrix): vector_req = lt.with_dimensions(required_rows, required_columns) else: it = self.typer[node] assert isinstance(it, bt.BMGMatrixType) vector_req = it.with_dimensions(required_rows, required_columns) return [bt.always_matrix(vector_req), bt.Natural] def _requirements_log(self, node: bn.LogNode) -> List[bt.Requirement]: # Input must be probability or positive real; choose the smaller. ot = bt.supremum(self.typer[node.operand], bt.Probability) if ot == bt.Probability: return [bt.Probability] return [bt.PositiveReal] def _requirements_log_prob(self, node: bn.LogProbNode) -> List[bt.Requirement]: # TODO: Left parent must be a distribution. # Right parent must be the same type as a sample # from the left. return [bt.any_requirement, self.typer[node.left]] def _requirements_logsumexp(self, node: bn.LogSumExpNode) -> List[bt.Requirement]: s = bt.supremum(*[self.typer[i] for i in node.inputs]) if s not in {bt.Real, bt.NegativeReal, bt.PositiveReal}: s = bt.Real return [s] * len(node.inputs) # pyre-ignore def _requirements_logsumexp_vector( self, node: bn.LogSumExpVectorNode ) -> List[bt.Requirement]: # LOGSUMEXP_VECTOR requires: # * a broadcast matrix with one column and any number of rows # * The matrix has real, positive real, or negative real elements. # # TODO: What about simplex matrices? It should be legal to do # a logsumexp of a sample from a Dirichlet, right? Can we convert # a simplex to a broadcast matrix of positive reals? t = self.typer[node.operand] required_rows = t.rows if isinstance(t, bt.BMGMatrixType) else 1 required_columns = 1 if isinstance(t, bt.PositiveRealMatrix) or isinstance(t, bt.NegativeRealMatrix): t = t.with_dimensions(required_rows, required_columns) else: t = bt.Real.with_dimensions(required_rows, required_columns) return [bt.always_matrix(t)] def _requirements_multiplication( self, node: bn.MultiplicationNode ) -> List[bt.Requirement]: it = self.typer[node] assert it in {bt.Probability, bt.PositiveReal, bt.Real} return [it] * len(node.inputs) # pyre-ignore def _requirements_matrix_complement( self, node: bn.MatrixComplementNode ) -> List[bt.Requirement]: it = self.typer[node] assert ( isinstance(it, bt.BooleanMatrix) or isinstance(it, bt.ProbabilityMatrix) or isinstance(it, bt.SimplexMatrix) ) req = [] if isinstance(it, bt.ProbabilityMatrix): req = [bt.ProbabilityMatrix] if isinstance(it, bt.BooleanMatrix): req = [bt.BooleanMatrix] if isinstance(it, bt.SimplexMatrix): req = [bt.SimplexMatrix] return req def _requirements_elementwise_mult( self, node: bn.ElementwiseMultiplyNode ) -> List[bt.Requirement]: # Elementwise multiply requires that both operands be the same as the output type. it = self.typer[node] assert isinstance(it, bt.BMGMatrixType) return [it, it] def _requirements_matrix_add(self, node: bn.MatrixAddNode) -> List[bt.Requirement]: # Matrix add requires that both operands be the same as the output type. it = self.typer[node] assert isinstance(it, bt.BMGMatrixType) return [it, it] def _requirements_matrix_phi(self, node: bn.MatrixPhiNode) -> List[bt.Requirement]: it = self.typer[node] assert isinstance(it, bt.BMGMatrixType) return [bt.RealMatrix(it.rows, it.columns)] def _requirements_matrix_scale( self, node: bn.MatrixScaleNode ) -> List[bt.Requirement]: it = self.typer[node] assert isinstance(it, bt.BMGMatrixType) it = typing.cast(bt.BroadcastMatrixType, it) it_scalar = it.with_dimensions(1, 1) # TODO[Walid]: Not entirely sure if we need this asserted requirement. # It was included only for analogy with multiplication assert it_scalar in {bt.Probability, bt.PositiveReal, bt.Real} return [it_scalar, it] def _requirements_power(self, node: bn.PowerNode) -> List[bt.Requirement]: # BMG supports a power node that has these possible combinations of # base and exponent type: # # P ** R+ --> P # P ** R --> R+ # R+ ** R+ --> R+ # R+ ** R --> R+ # R ** R+ --> R # R ** R --> R req_base = bt.supremum(self.typer[node.left], bt.Probability) if req_base not in {bt.Probability, bt.PositiveReal, bt.Real}: req_base = bt.Real req_exp = bt.supremum(self.typer[node.right], bt.PositiveReal) if req_exp not in {bt.PositiveReal, bt.Real}: req_exp = bt.Real return [req_base, req_exp] def _requirements_to_matrix(self, node: bn.ToMatrixNode) -> List[bt.Requirement]: node_type = self.typer[node] assert isinstance(node_type, bt.BMGMatrixType) item_type = node_type.with_dimensions(1, 1) rc: List[bt.Requirement] = [bt.Natural, bt.Natural] its: List[bt.Requirement] = [item_type] return rc + its * (len(node.inputs) - 2) def requirements(self, node: bn.BMGNode) -> List[bt.Requirement]: input_count = len(node.inputs) if input_count == 0: result = [] elif self.typer[node] == bt.Untypable: result = [bt.any_requirement] * input_count else: t = type(node) if t in _known_requirements: result = _known_requirements[t] elif t in self._dispatch: result = self._dispatch[t](node) else: result = [bt.any_requirement] * input_count assert _requirements_valid(node, result) return result
beanmachine-main
src/beanmachine/ppl/compiler/bmg_requirements.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import beanmachine.ppl.compiler.bmg_nodes as bn import beanmachine.ppl.compiler.bmg_types as bt from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.fix_problem import ( Inapplicable, NodeFixer, NodeFixerResult, ) from beanmachine.ppl.compiler.lattice_typer import LatticeTyper def addition_fixer(bmg: BMGraphBuilder, typer: LatticeTyper) -> NodeFixer: """This fixer rewrites additions into complements: * add(1, negate(prob)) or add(negate(prob), 1) -> complement(prob) * add(1, negate(bool)) or add(negate(bool), 1) -> complement(bool)""" def fixer(node: bn.BMGNode) -> NodeFixerResult: if not isinstance(node, bn.AdditionNode) or len(node.inputs) != 2: return Inapplicable left = node.inputs[0] right = node.inputs[1] if ( bn.is_one(left) and isinstance(right, bn.NegateNode) and typer.is_prob_or_bool(right.operand) ): return bmg.add_complement(right.operand) if ( bn.is_one(right) and isinstance(left, bn.NegateNode) and typer.is_prob_or_bool(left.operand) ): return bmg.add_complement(left.operand) return Inapplicable return fixer def sum_fixer(bmg: BMGraphBuilder, typer: LatticeTyper) -> NodeFixer: """This fixer rewrites vector sums into multiary additions.""" def fixer(node: bn.BMGNode) -> NodeFixerResult: if not isinstance(node, bn.SumNode): return Inapplicable t = typer[node.operand] if not isinstance(t, bt.BMGMatrixType): return Inapplicable # TODO: Write code to handle a 2-d tensor element sum. if t.columns != 1: return Inapplicable indices = [] for i in range(t.rows): c = bmg.add_constant(i) index = bmg.add_index(node.operand, c) indices.append(index) if len(indices) == 1: return indices[0] if len(indices) == 2: return bmg.add_addition(indices[0], indices[1]) return bmg.add_multi_addition(*indices) return fixer
beanmachine-main
src/beanmachine/ppl/compiler/fix_additions.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Visualize the contents of a builder in the DOT graph language. """ from typing import Set from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.bmg_requirements import EdgeRequirements from beanmachine.ppl.compiler.fix_problems import ( default_skip_optimizations, fix_problems, ) from beanmachine.ppl.compiler.graph_labels import get_edge_labels, get_node_label from beanmachine.ppl.compiler.lattice_typer import LatticeTyper from beanmachine.ppl.compiler.sizer import size_to_str, Sizer from beanmachine.ppl.utils.dotbuilder import DotBuilder def to_dot( bmg_raw: BMGraphBuilder, node_types: bool = False, node_sizes: bool = False, edge_requirements: bool = False, after_transform: bool = False, label_edges: bool = True, skip_optimizations: Set[str] = default_skip_optimizations, ) -> str: """This dumps the entire accumulated graph state, including orphans, as a DOT graph description; nodes are enumerated in the order they were created.""" lt = LatticeTyper() sizer = Sizer() reqs = EdgeRequirements(lt) db = DotBuilder() if after_transform: # TODO: It is strange having a visualizer that edits the graph # as a side effect, and it is also strange that the only way # to visualize the ancestor nodes is to edit the graph. # # * Remove the after_transform flag; modify the tests so that # tests which currently set after_transform to true instead # call fix_problems first. # # * Add a whole_graph flag, default to true, which decides # whether to graph the whole thing or not. bmg, error_report = fix_problems(bmg_raw, skip_optimizations) error_report.raise_errors() node_list = bmg.all_ancestor_nodes() else: node_list = bmg_raw.all_nodes() nodes = {} for index, node in enumerate(node_list): nodes[node] = index max_length = len(str(len(nodes) - 1)) def to_id(index) -> str: return "N" + str(index).zfill(max_length) for node, index in nodes.items(): n = to_id(index) node_label = get_node_label(node) if node_types: node_label += ":" + lt[node].short_name if node_sizes: node_label += ":" + size_to_str(sizer[node]) db.with_node(n, node_label) for (i, edge_name, req) in zip( node.inputs, get_edge_labels(node), reqs.requirements(node) ): if label_edges: edge_label = edge_name if edge_requirements: edge_label += ":" + req.short_name elif edge_requirements: edge_label = req.short_name else: edge_label = "" # Bayesian networks are typically drawn with the arrows # in the direction of data flow, not in the direction # of dependency. start_node = to_id(nodes[i]) end_node = n db.with_edge(start_node, end_node, edge_label) return str(db)
beanmachine-main
src/beanmachine/ppl/compiler/gen_dot.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import List import beanmachine.ppl.compiler.bmg_nodes as bn from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.fix_problem import ( Inapplicable, NodeFixer, NodeFixerResult, ) def _mu_is_normal_with_real_params(n: bn.SampleNode) -> bool: # TODO: For now we support conjugate prior transformation on # priors with constant parameter values. We need to modify this for # cascading normals e.g. normal(normal(normal...)) normal_node = n.inputs[0] if not isinstance(normal_node, bn.NormalNode): return False mu = normal_node.inputs[0] sigma = normal_node.inputs[1] return isinstance(mu, bn.ConstantNode) and isinstance(sigma, bn.ConstantNode) def _mu_is_queried(n: bn.SampleNode) -> bool: # TODO: This check can be removed if it is not a necessary condition. return any(isinstance(i, bn.Query) for i in n.outputs.items) def _sample_contains_obs(n: bn.SampleNode) -> bool: return any(isinstance(o, bn.Observation) for o in n.outputs.items) def _normal_is_observed(n: bn.BMGNode) -> bool: return any(_sample_contains_obs(i) for i in n.outputs.items) def normal_normal_conjugate_fixer(bmg: BMGraphBuilder) -> NodeFixer: """This fixer transforms graphs with Normal likelihood with fixed sigma and Normal prior for mu. Since this is a conjugate pair, we analytically update the prior parameters Normal(mu, sigma) using observations to get the posterior parameters Normal(mu', sigma'). Once we update the parameters, we delete the observed samples from the graph. This greatly decreases the number of nodes, the number of edges in the graph, and the Bayesian update is reduced to parameter update which can lead to performance wins during inference.""" def _transform_mu( mu: bn.ConstantNode, std: bn.ConstantNode, sigma: bn.ConstantNode, obs: List[bn.Observation], ) -> bn.BMGNode: precision_prior = pow(std.value, -2.0) precision_data = len(obs) * pow(sigma.value, -2.0) precision_inv = pow((precision_prior + precision_data), -1.0) data_sum = sum(o.value for o in obs) transformed_mu = precision_inv * ( (mu.value * pow(std.value, -2.0)) + (data_sum * pow(sigma.value, -2.0)) ) return bmg.add_constant(transformed_mu) def _transform_std( std: bn.ConstantNode, sigma: bn.ConstantNode, obs: List[bn.Observation], ) -> bn.BMGNode: precision_prior = 1 / pow(std.value, 2) precision_data = len(obs) / pow(sigma.value, 2) transformed_std = math.sqrt(1 / (precision_prior + precision_data)) return bmg.add_constant(transformed_std) def fixer(n: bn.BMGNode) -> NodeFixerResult: # A graph is normal-normal conjugate fixable if: # # There is a Normal node with mu that is sampled # from a Normal distribution. Further, the Normal prior # is queried and the Normal likelihood has n observations. # # # That is we are looking for stuff like: # # mu std # \ / # Normal # | # sigma Sample # \ / \ # Normal Query # | # Sample # | \ # Observation 15.9 ... # # to turn it into # # mu' std' # \ / # Normal # | # Sample # | # Query if not isinstance(n, bn.NormalNode): return Inapplicable mu_normal_sample = n.inputs[0] if not ( isinstance(mu_normal_sample, bn.SampleNode) and _mu_is_normal_with_real_params(mu_normal_sample) and _mu_is_queried(mu_normal_sample) and _normal_is_observed(n) ): return Inapplicable sigma = n.inputs[1] assert isinstance(sigma, bn.UntypedConstantNode) mu_normal_node = mu_normal_sample.inputs[0] assert isinstance(mu_normal_node, bn.NormalNode) obs = [] samples_to_remove = [] for o in n.outputs.items: if isinstance(o, bn.SampleNode) and _sample_contains_obs(o): obs.append(next(iter(o.outputs.items.keys()))) samples_to_remove.append(o) mu = mu_normal_node.inputs[0] std = mu_normal_node.inputs[1] assert isinstance(mu, bn.ConstantNode) assert isinstance(std, bn.ConstantNode) transformed_mu = _transform_mu(mu, std, sigma, obs) transformed_std = _transform_std(std, sigma, obs) mu_normal_node.inputs[0] = transformed_mu mu_normal_node.inputs[1] = transformed_std # We need to remove both the sample and the observation node. for o in obs: bmg.remove_leaf(o) for s in samples_to_remove: if len(s.outputs.items) == 0: bmg.remove_node(s) return n return fixer
beanmachine-main
src/beanmachine/ppl/compiler/fix_normal_conjugate_prior.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional import beanmachine.ppl.compiler.bmg_nodes as bn from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.bmg_types import BMGMatrixType, RealMatrix from beanmachine.ppl.compiler.error_report import BadMatrixMultiplication, BMGError from beanmachine.ppl.compiler.sizer import is_scalar, Size, Sizer class SizeAssessment: def __init__(self, sizer: Sizer): self.sizer = sizer def size_error( self, node: bn.BMGNode, context: BMGraphBuilder ) -> Optional[BMGError]: error = None if isinstance(node, bn.MatrixMultiplicationNode): lhs = node.inputs.inputs[0] rhs = node.inputs.inputs[1] lhs_size = self.sizer[lhs] rhs_size = self.sizer[rhs] if not (is_scalar(lhs_size) or is_scalar(rhs_size)): l_rhs = len(rhs_size) l_lhs = len(lhs_size) rhs_can_be_considered_column = ( l_rhs == 1 and l_lhs == 2 and lhs_size[1] == rhs_size[0] ) lhs_can_be_considered_row = ( l_lhs == 1 and l_rhs == 2 and lhs_size[0] == rhs_size[0] ) can_be_inner_product = ( l_rhs == 1 and l_lhs == 1 and rhs_size[0] == lhs_size[0] ) are_not_matrices_or_not_compatible_matrices = ( not (len(lhs_size) == 2 and l_rhs == 2) ) or (lhs_size[1] != rhs_size[0]) if are_not_matrices_or_not_compatible_matrices and not ( rhs_can_be_considered_column or lhs_can_be_considered_row or can_be_inner_product ): # Do NOT use the Lattice typer. the BMGMatrix type constructor # will translate the size into column major form. Since the user is writing in # a row major api, we present the error message in row major form. We don't care about the # element type in this case def get_matrix_type(sz: Size) -> BMGMatrixType: rows = 1 cols = 1 length = len(sz) if length >= 2: rows = sz[length - 2] cols = sz[length - 1] elif length == 1: rows = sz[0] return RealMatrix(rows, cols) error = BadMatrixMultiplication( node, get_matrix_type(lhs_size), get_matrix_type(rhs_size), context.execution_context.node_locations(node), ) return error
beanmachine-main
src/beanmachine/ppl/compiler/size_assessment.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Optional import beanmachine.ppl.compiler.bmg_nodes as bn import beanmachine.ppl.compiler.bmg_types as bt from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.bmg_node_types import is_supported_by_bmg from beanmachine.ppl.compiler.bmg_types import PositiveReal from beanmachine.ppl.compiler.error_report import ( BadMatrixMultiplication, BMGError, UnsupportedNode, UntypableNode, ) from beanmachine.ppl.compiler.fix_problem import ( edge_error_pass, GraphFixer, node_error_pass, node_fixer_first_match, NodeFixer, type_guard, ) from beanmachine.ppl.compiler.graph_labels import get_edge_label from beanmachine.ppl.compiler.lattice_typer import LatticeTyper class UnsupportedNodeFixer: """This class takes a Bean Machine Graph builder and attempts to fix all uses of unsupported operators by replacing them with semantically equivalent nodes that are supported by BMG.""" _bmg: BMGraphBuilder _typer: LatticeTyper _unsupported_nodes = [ bn.Chi2Node, bn.DivisionNode, bn.Exp2Node, bn.IndexNode, bn.ItemNode, bn.Log10Node, bn.Log1pNode, bn.Log2Node, bn.LogSumExpTorchNode, bn.LogAddExpNode, bn.SquareRootNode, bn.SwitchNode, bn.TensorNode, bn.UniformNode, ] def __init__(self, bmg: BMGraphBuilder, typer: LatticeTyper) -> None: self._bmg = bmg self._typer = typer def _replace_division(self, node: bn.DivisionNode) -> Optional[bn.BMGNode]: # BMG has no division node. We replace division by a constant with # a multiplication: # # x / const --> x * (1 / const) # # And we replace division by a non-constant with a power: # # x / y --> x * (y ** (-1)) # r = node.right if isinstance(r, bn.ConstantNode): return self._bmg.add_multiplication( node.left, self._bmg.add_constant(1.0 / r.value) ) neg1 = self._bmg.add_constant(-1.0) powr = self._bmg.add_power(r, neg1) return self._bmg.add_multiplication(node.left, powr) def _replace_exp2(self, node: bn.Exp2Node) -> bn.BMGNode: two = self._bmg.add_constant(2.0) return self._bmg.add_power(two, node.operand) def _replace_log10(self, node: bn.Log10Node) -> bn.BMGNode: # log10(x) = log(x) * (1/log(10)) c = self._bmg.add_constant(1.0 / math.log(10)) ln = self._bmg.add_log(node.operand) return self._bmg.add_multiplication(ln, c) def _replace_log1p(self, node: bn.Log1pNode) -> bn.BMGNode: # log1p(x) = log(1+x) # TODO: If we added a log1p node to BMG then it could get # more accurate results. one = self._bmg.add_constant(1.0) add = self._bmg.add_addition(one, node.operand) return self._bmg.add_log(add) def _replace_log2(self, node: bn.Log10Node) -> bn.BMGNode: # log2(x) = log(x) * (1/log(2)) c = self._bmg.add_constant(1.0 / math.log(2)) ln = self._bmg.add_log(node.operand) return self._bmg.add_multiplication(ln, c) def _replace_squareroot(self, node: bn.SquareRootNode) -> bn.BMGNode: half = self._bmg.add_constant(0.5) return self._bmg.add_power(node.operand, half) def _replace_uniform(self, node: bn.UniformNode) -> Optional[bn.BMGNode]: # TODO: Suppose we have something like Uniform(1.0, 2.0). Can we replace that # with (Flat() + 1.0) ? The problem is that if there is an observation on # a sample of the original uniform, we need to modify the observation to # point to the sample, not the addition, and then we need to modify the value # of the observation. But this is doable. Revisit this question later. # For now, we can simply say that a uniform distribution over 0.0 to 1.0 can # be replaced with a flat. low = node.low high = node.high if ( isinstance(low, bn.ConstantNode) and float(low.value) == 0.0 and isinstance(high, bn.ConstantNode) and float(high.value) == 1.0 ): return self._bmg.add_flat() return None def _replace_chi2(self, node: bn.Chi2Node) -> bn.BMGNode: # Chi2(x), which BMG does not support, is exactly equivalent # to Gamma(x * 0.5, 0.5), which BMG does support. half = self._bmg.add_constant_of_type(0.5, PositiveReal) mult = self._bmg.add_multiplication(node.df, half) return self._bmg.add_gamma(mult, half) def _replace_index_one_column(self, node: bn.IndexNode) -> bn.BMGNode: left = node.left right = node.right typer = self._typer assert isinstance(typer, LatticeTyper) # It is possible during rewrites to end up with a constant for both # operands of the index; in that case, fold the index away entirely. # If we have a ToMatrix indexed by a constant, we can similarly # eliminate the indexing operation. # If we have Index(ColumnIndex(ToMatrix(elements), Const1), Const2) # then we can again eliminate the indexing altogether. if isinstance(right, bn.ConstantNode) and typer.is_natural(right): r = int(right.value) if isinstance(left, bn.ConstantNode): return self._bmg.add_constant(left.value[r]) if isinstance(left, bn.ToMatrixNode): return left.inputs[r + 2] if isinstance(left, bn.ColumnIndexNode): collection = left.left if isinstance(collection, bn.ToMatrixNode): column_index = left.right if isinstance(column_index, bn.ConstantNode) and typer.is_natural( column_index ): c = int(column_index.value) rows = int(collection.rows.value) return collection.inputs[rows * c + r + 2] # We cannot optimize it away; add a vector index operation. return self._bmg.add_vector_index(left, right) def _replace_index_multi_column(self, node: bn.IndexNode) -> bn.BMGNode: left = node.left right = node.right typer = self._typer assert isinstance(typer, LatticeTyper) # It is possible during rewrites to end up with a constant for both # operands of the index; in that case, fold the index away entirely. if isinstance(right, bn.ConstantNode) and typer.is_natural(right): r = int(right.value) if isinstance(left, bn.ConstantNode): return self._bmg.add_constant(left.value[r]) # TODO: If left is a ToMatrixNode then we can construct a second # ToMatrixNode that has just the entries we need. # We cannot optimize it away. return self._bmg.add_column_index(left, right) def _replace_index(self, node: bn.IndexNode) -> Optional[bn.BMGNode]: # * If we have an index into a one-column matrix, replace it with # a vector index. # * If we have an index into a multi-column matrix, replace it with # a column index left = node.left node_type = self._typer[left] if not isinstance(node_type, bt.BMGMatrixType): return None if node_type.columns == 1: return self._replace_index_one_column(node) return self._replace_index_multi_column(node) def _replace_item(self, node: bn.ItemNode) -> Optional[bn.BMGNode]: # "item()" is an identity for our purposes. We just remove it. return node.inputs[0] def _replace_lse(self, node: bn.LogSumExpTorchNode) -> Optional[bn.BMGNode]: # We only support compiling models where dim=0 and keepDims=False. if not bn.is_zero(node.inputs[1]) or not bn.is_zero(node.inputs[2]): return None # We require that the input to LSE be a single column. operand = node.inputs[0] operand_type = self._typer[operand] if not isinstance(operand_type, bt.BMGMatrixType) or operand_type.columns != 1: return None # If the input is a TO_MATRIX operation then we can just feed its inputs # directly into the n-ary LSE BMG node. if isinstance(operand, bn.ToMatrixNode): # The first two inputs are the size. assert len(operand.inputs) >= 3 # If the matrix is a singleton then logsumexp is an identity. if len(operand.inputs) == 3: return operand.inputs[2] elements = operand.inputs.inputs[2:] assert isinstance(elements, list) return self._bmg.add_logsumexp(*elements) # Otherwise, just generate the vector LSE BMG node. return self._bmg.add_logsumexp_vector(operand) def _replace_lae(self, node: bn.LogAddExpNode) -> Optional[bn.BMGNode]: return self._bmg.add_logsumexp(*node.inputs) def _replace_tensor(self, node: bn.TensorNode) -> Optional[bn.BMGNode]: # Replace a 1-d or 2-d tensor with a TO_MATRIX node. size = node._size if len(size) > 2: return None # This is the row and column count of the torch tensor. # In BMG, matrices are column-major, so we'll swap them. r, c = bt._size_to_rc(size) # ToMatrixNode requires naturals. rows = self._bmg.add_natural(c) cols = self._bmg.add_natural(r) tm = self._bmg.add_to_matrix(rows, cols, *node.inputs.inputs) return tm def _replace_bool_switch(self, node: bn.SwitchNode) -> bn.BMGNode: # If the switched value is a Boolean then we can turn this into an if-then-else. assert (len(node.inputs) - 1) / 2 == 2 assert isinstance(node.inputs[1], bn.ConstantNode) assert isinstance(node.inputs[3], bn.ConstantNode) if bn.is_zero(node.inputs[1]): assert bn.is_one(node.inputs[3]) return self._bmg.add_if_then_else( node.inputs[0], node.inputs[4], node.inputs[2] ) else: assert bn.is_one(node.inputs[1]) assert bn.is_zero(node.inputs[3]) return self._bmg.add_if_then_else( node.inputs[0], node.inputs[2], node.inputs[4] ) def _replace_natural_switch(self, node: bn.SwitchNode) -> Optional[bn.BMGNode]: # If: # # * the switched value is natural, and # * the cases are 0, 1, 2, ... n # then we can generate Choice(choice, [stochastic_values]). # # TODO: If we have a contiguous set of cases, say {2, 3, 4}, then # we could generate the choice from the elements and the index could # be the choice node minus two. # # TODO: If we have a slightly noncontiguous set of cases, say {0, 1, 3}, # then we can generate a choice with a dummy value of the appropriate type # in the missing place. # # TODO: If we have arbitrary natural cases, say 1, 10, 101, then we could # add an integer equality operation to BMG and generate a nested IfThenElse. # Do we have contiguous cases 0, ..., n? num_cases = (len(node.inputs) - 1) // 2 cases = set() for i in range(num_cases): c = node.inputs[i * 2 + 1] assert isinstance(c, bn.ConstantNode) cases.add(int(c.value)) if min(cases) != 0 or max(cases) != num_cases - 1 or len(cases) != num_cases: return None # We're all set; generate a choice. values = [None] * num_cases for i in range(num_cases): c = node.inputs[i * 2 + 1] assert isinstance(c, bn.ConstantNode) v = node.inputs[i * 2 + 2] values[int(c.value)] = v # pyre-ignore assert None not in values return self._bmg.add_choice(node.inputs[0], *values) def _replace_switch(self, node: bn.SwitchNode) -> Optional[bn.BMGNode]: # inputs[0] is the value used to perform the switch; there are # then pairs of constants and values. It should be impossible # to have an even number of inputs. assert len(node.inputs) % 2 == 1 choice = node.inputs[0] num_cases = (len(node.inputs) - 1) // 2 # It should be impossible to have a switch with no cases. assert num_cases > 0 # It is possible but weird to have a switch with exactly one case. # In this scenario we can eliminate the switch entirely by simply # replacing it with its lone case value. # TODO: Consider producing a warning for this situation, because # the user's model is probably wrong if they think they are stochastically # choosing a random variable but always get the same one. if num_cases == 1: assert isinstance(node.inputs[1], bn.ConstantNode) return node.inputs[2] # There are at least two cases. We should never have two cases to choose from # but a constant choice! assert not isinstance(choice, bn.ConstantNode) tc = self._typer[choice] if tc == bt.Boolean: return self._replace_bool_switch(node) if tc == bt.Natural: return self._replace_natural_switch(node) # TODO: Generate a better error message for switches that we cannot yet # turn into BMG nodes. return None def _replace_binomial_logit( self, node: bn.BinomialLogitNode ) -> Optional[bn.BinomialNode]: logistic = self._bmg.add_logistic(node.inputs[1]) return self._bmg.add_binomial(node.inputs[0], logistic) def unsupported_node_fixer(bmg: BMGraphBuilder, typer: LatticeTyper) -> NodeFixer: usnf = UnsupportedNodeFixer(bmg, typer) return node_fixer_first_match( [ type_guard(bn.Chi2Node, usnf._replace_chi2), type_guard(bn.DivisionNode, usnf._replace_division), type_guard(bn.Exp2Node, usnf._replace_exp2), type_guard(bn.IndexNode, usnf._replace_index), type_guard(bn.ItemNode, usnf._replace_item), type_guard(bn.Log10Node, usnf._replace_log10), type_guard(bn.Log1pNode, usnf._replace_log1p), type_guard(bn.Log2Node, usnf._replace_log2), type_guard(bn.LogSumExpTorchNode, usnf._replace_lse), type_guard(bn.LogAddExpNode, usnf._replace_lae), type_guard(bn.SquareRootNode, usnf._replace_squareroot), type_guard(bn.SwitchNode, usnf._replace_switch), type_guard(bn.TensorNode, usnf._replace_tensor), type_guard(bn.UniformNode, usnf._replace_uniform), type_guard(bn.BinomialLogitNode, usnf._replace_binomial_logit), ] ) # TODO: We should make a rewriter that detects stochastic index # into list. We will need to detect if the list is (1) all # numbers, in which case we can make a constant matrix out of it, # (2) mix of numbers and stochastic elements, in which case we can # make it into a TO_MATRIX node, or (3) wrong shape or contents, # in which case we must give an error. We will likely want to # move this check for unsupported constant value to AFTER that rewrite. def unsupported_node_reporter() -> GraphFixer: def _error_for_unsupported_node( bmg: BMGraphBuilder, n: bn.BMGNode, index: int ) -> Optional[BMGError]: # TODO: The edge labels used to visualize the graph in DOT # are not necessarily the best ones for displaying errors. # Consider fixing this. # Constants that can be converted to constant nodes of the appropriate type # will be converted in the requirements checking pass. Here we just detect # constants that cannot possibly be supported because they are the wrong # dimensionality. parent = n.inputs[index] if isinstance(parent, bn.ConstantNode): t = bt.type_of_value(parent.value) if t != bt.Tensor and t != bt.Untypable: return None if is_supported_by_bmg(parent): return None return UnsupportedNode( parent, n, get_edge_label(n, index), bmg.execution_context.node_locations(parent), ) return edge_error_pass(_error_for_unsupported_node) def bad_matmul_reporter() -> GraphFixer: typer = LatticeTyper() def get_error(bmg: BMGraphBuilder, node: bn.BMGNode) -> Optional[BMGError]: if not isinstance(node, bn.MatrixMultiplicationNode): return None # If an operand is bad then we'll produce an error later. This # pass just looks for row/column mismatches. lt = typer[node.left] if not isinstance(lt, bt.BMGMatrixType): return None rt = typer[node.right] if not isinstance(rt, bt.BMGMatrixType): return None if lt.columns == rt.rows: return None return BadMatrixMultiplication( node, lt, rt, bmg.execution_context.node_locations(node) ) return node_error_pass(get_error) def untypable_node_reporter() -> GraphFixer: # By the time this pass runs every node in the graph should # be a valid BMG node (except constants, which are rewritten later) # and therefore should have a valid type. Later passes rely on this # invariant, so if it is violated, let's find out now and stop # the compiler rather than producing some strange error later on. typer = LatticeTyper() def get_error(bmg: BMGraphBuilder, node: bn.BMGNode) -> Optional[BMGError]: # If a node is untypable then all its descendants will be too. # We do not want to produce a cascading error here so let's just # report the nodes that are untypable who have no untypable parents. if isinstance(node, bn.ConstantNode): return None t = typer[node] if t != bt.Untypable and t != bt.Tensor: return None for i in node.inputs: t = typer[i] if t == bt.Untypable or t == bt.Tensor: return None return UntypableNode(node, bmg.execution_context.node_locations(node)) return node_error_pass(get_error)
beanmachine-main
src/beanmachine/ppl/compiler/fix_unsupported.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json from typing import Any, Dict, List, Optional import beanmachine.ppl.compiler.bmg_nodes as bn from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder _node_type_to_distribution = { bn.BernoulliNode: "DISTRIBUTION_BERNOULLI", bn.BetaNode: "DISTRIBUTION_BETA", bn.NormalNode: "DISTRIBUTION_NORMAL", } _node_type_to_operator = { bn.MultiplicationNode: "MULTIPLY", bn.SampleNode: "SAMPLE", } MiniNode = Dict[str, Any] class ToMini: _node_to_mini_node: Dict[bn.BMGNode, MiniNode] _observed_constants: int _queries: int _mini_nodes: List[MiniNode] def __init__(self) -> None: self._node_to_mini_node = {} self._observed_constants = 0 self._queries = 0 self._mini_nodes = [] def to_json(self, bmg: BMGraphBuilder, indent=None) -> str: self._observed_constants = 0 self._queries = 0 self._mini_nodes = [] self._node_to_mini_node = {} # all_ancestor_nodes enumerates all samples, queries, # observations, and all of their ancestors, in topo-sorted # order, parents before children. for node in bmg.all_ancestor_nodes(): self._add_node_to_mini_nodes(node) mini = { "comment": "Mini BMG", "nodes": self._mini_nodes, } return json.dumps(mini, indent=indent) def _add_mini_node(self, mini: MiniNode) -> None: mini["sequence"] = len(self._mini_nodes) self._mini_nodes.append(mini) def _node_to_mini_seq(self, node: bn.BMGNode) -> int: return self._node_to_mini_node[node]["sequence"] def _add_inputs(self, mini: MiniNode, node: bn.BMGNode) -> None: in_nodes = [self._node_to_mini_seq(i) for i in node.inputs] if len(in_nodes) > 0: mini["in_nodes"] = in_nodes def _make_query(self, node: bn.Query) -> MiniNode: mini: MiniNode = { "operator": "QUERY", "type": "NONE", "query_index": self._queries, } self._queries += 1 self._add_inputs(mini, node) return mini def _make_constant(self, value: Any) -> MiniNode: return { "operator": "CONSTANT", "type": "REAL", "value": float(value), # TODO: Deal with tensors } def _make_distribution(self, node: bn.DistributionNode) -> MiniNode: op = _node_type_to_distribution[type(node)] # pyre-ignore mini: MiniNode = { "operator": op, "type": "DISTRIBUTION", } self._add_inputs(mini, node) return mini def _make_operator(self, node: bn.OperatorNode) -> MiniNode: op = _node_type_to_operator[type(node)] # pyre-ignore mini: Dict[str, Any] = { "operator": op, "type": "REAL", } self._add_inputs(mini, node) return mini def _is_observed_sample(self, node: bn.BMGNode) -> bool: return isinstance(node, bn.SampleNode) and any( isinstance(o, bn.Observation) for o in node.outputs.items ) def _get_sample_observation(self, node: bn.SampleNode) -> Any: for o in node.outputs.items: if isinstance(o, bn.Observation): return o.value return None def _make_observed_sample(self, node: bn.SampleNode) -> None: # Various parts of our system handle observations slightly # differently, which can be somewhat confusing. Here is how # it works: # # * In the graph accumulator, an observation is a node whose # parent is a sample node, and which contains a constant. # # * In BMG, an observation is not a node. Rather, it is an # annotation associating a value with a sample node. # # * In MiniBMG, both the observation and the observed value # are nodes, and observations are parented by a distribution, # not by a sample. In this system, observations are essentially # a special kind of sample that has two parents: a distribution # and a value. # # How then will we transform the accumulated graph into MiniBMG? # # Suppose we have accumulated this graph, for arbitrary subgraphs # X and Y: # # 2 # / \ # BETA # | \ # SAMPLE QUERY # | # BERNOULLI # / \ # SAMPLE SAMPLE # / \ | # OBSERVE(0) X Y # # then we will emit this MiniBMG: # # 2 # / \ # BETA # | \ # SAMPLE QUERY # | # 0 BERNOULLI # / \ / \ # X OBSERVE SAMPLE # | # Y # # Note that subgraph X has been re-parented to the constant, # and the OBSERVE is also a child of the constant. # # We return None here because this code already takes care # of ensuring that the new nodes are added to the list, # that a sequence id is generated, and that the observed # sample is mapped to the mini const. ob = self._get_sample_observation(node) mini_const = self._make_constant(ob) self._add_mini_node(mini_const) const_seq = mini_const["sequence"] dist_seq = self._node_to_mini_seq(node.operand) in_nodes = [dist_seq, const_seq] mini_obs = { "operator": "OBSERVE", "type": "NONE", "in_nodes": in_nodes, } self._add_mini_node(mini_obs) self._node_to_mini_node[node] = mini_const def _add_node_to_mini_nodes(self, node: bn.BMGNode) -> None: mini: Optional[MiniNode] = None if self._is_observed_sample(node): # We have special handling for observed queries. # mini stays None because the special handler ensures # that the maps are set up correctly. assert isinstance(node, bn.SampleNode) self._make_observed_sample(node) elif isinstance(node, bn.Observation): # We do nothing when we encounter an observation node. # Rather, the observation is added to MiniBMG state # when the observed sample is handled. pass elif isinstance(node, bn.Query): mini = self._make_query(node) elif isinstance(node, bn.ConstantNode): mini = self._make_constant(node.value) elif isinstance(node, bn.DistributionNode): mini = self._make_distribution(node) elif isinstance(node, bn.OperatorNode): mini = self._make_operator(node) else: raise ValueError(f"{type(node)} is not supported by miniBMG") if mini is not None: self._add_mini_node(mini) self._node_to_mini_node[node] = mini def to_mini(bmg: BMGraphBuilder, indent=None) -> str: # TODO: Run an error checking pass that rejects nodes # we cannot map to Mini BMG return ToMini().to_json(bmg, indent=indent)
beanmachine-main
src/beanmachine/ppl/compiler/gen_mini.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Convert a builder into a program that produces a graph builder. """ from typing import Dict import beanmachine.ppl.compiler.bmg_nodes as bn from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder class GenerateBuilder: bmg: BMGraphBuilder _nodes: Dict[bn.BMGNode, int] def __init__(self, bmg: BMGraphBuilder) -> None: self.bmg = bmg self._nodes = {} def _factory_name(self, node: bn.BMGNode) -> str: return type(node).__name__ def _to_id(self, index: int) -> str: return "n" + str(index) def _input_to_arg(self, node: bn.BMGNode) -> str: index = self._nodes[node] return self._to_id(index) def _inputs_to_args(self, node: bn.BMGNode) -> str: if isinstance(node, bn.ConstantNode): return str(node.value) arglist = ", ".join(self._input_to_arg(i) for i in node.inputs) if isinstance(node, bn.Observation): return f"{arglist}, {str(node.value)}" if isinstance(node, bn.LogSumExpNode): return f"[{arglist}]" if isinstance(node, bn.TensorNode): return f"[{arglist}], torch.Size([{str(len(node.inputs))}])" return arglist def _generate_builder(self) -> str: lines = [ "import beanmachine.ppl.compiler.bmg_nodes as bn", "import torch", "from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder", "from torch import tensor", "", "bmg = BMGraphBuilder()", ] # Nodes should be sorted so that ancestors always come # before descendents. self._nodes = {} for index, node in enumerate(self.bmg.all_nodes()): self._nodes[node] = index for node, index in self._nodes.items(): n = self._to_id(index) f = self._factory_name(node) a = self._inputs_to_args(node) lines.append(f"{n} = bmg.add_node(bn.{f}({a}))") return "\n".join(lines) def generate_builder(bmg: BMGraphBuilder) -> str: return GenerateBuilder(bmg)._generate_builder()
beanmachine-main
src/beanmachine/ppl/compiler/gen_builder.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import beanmachine.ppl.compiler.bmg_nodes as bn import beanmachine.ppl.compiler.bmg_types as bt from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.fix_problem import ( Inapplicable, NodeFixer, NodeFixerResult, ) from beanmachine.ppl.compiler.lattice_typer import LatticeTyper def _is_real_matrix(t: bt.BMGLatticeType) -> bool: return any( isinstance(t, m) for m in { bt.RealMatrix, bt.PositiveRealMatrix, bt.NegativeRealMatrix, bt.ProbabilityMatrix, } ) def identity_transpose_fixer(bmg: BMGraphBuilder, typer: LatticeTyper) -> NodeFixer: """This fixer eliminates scalar or 1x1 transposes""" def fixer(node: bn.BMGNode) -> NodeFixerResult: if not isinstance(node, bn.TransposeNode): return Inapplicable in_node = node.inputs[0] if not typer.is_matrix(in_node): return in_node return Inapplicable return fixer
beanmachine-main
src/beanmachine/ppl/compiler/fix_transpose.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import inspect import math import operator from types import MethodType from typing import Any, Callable, Dict, List, NoReturn, Optional, Set, Tuple import beanmachine.ppl.compiler.bmg_nodes as bn import torch import torch.distributions as dist from beanmachine.ppl.compiler.beanstalk_common import allowed_functions from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.bmg_nodes import BMGNode, ConstantNode _in_place_operator_names = { operator.iadd: "__iadd__", operator.iand: "__iand__", operator.ifloordiv: "__ifloordiv__", operator.ilshift: "__ilshift__", operator.imatmul: "__imatmul__", operator.imod: "__imod__", operator.imul: "__imul__", operator.ior: "__ior__", operator.ipow: "__ipow__", operator.irshift: "__irshift__", operator.isub: "__isub__", operator.itruediv: "__idiv__", operator.ixor: "__ixor__", } _in_place_to_regular = { operator.iadd: operator.add, operator.iand: operator.and_, operator.ifloordiv: operator.floordiv, operator.ilshift: operator.lshift, operator.imatmul: operator.matmul, operator.imod: operator.mod, operator.imul: operator.mul, operator.ior: operator.or_, operator.ipow: operator.pow, operator.irshift: operator.rshift, operator.isub: operator.sub, operator.itruediv: operator.truediv, operator.ixor: operator.xor, } def _raise_unsupported(func: Any) -> NoReturn: if hasattr(func, "__name__"): func = func.__name__ raise ValueError(f"Function {func} is not supported by Bean Machine Graph.") def _is_in_place_operator(func: Callable) -> bool: return func in _in_place_to_regular def _ordinary_arg_or_const(arg: Any) -> bool: return isinstance(arg, bn.ConstantNode) or not isinstance(arg, BMGNode) def only_ordinary_arguments(args, kwargs) -> bool: if any(isinstance(arg, BMGNode) for arg in args): return False if any(isinstance(arg, BMGNode) for arg in kwargs.values()): return False return True def _only_ordinary_arguments_or_constants( args: List[Any], kwargs: Dict[str, Any] ) -> bool: return all(_ordinary_arg_or_const(arg) for arg in args) and all( _ordinary_arg_or_const(arg) for arg in kwargs.values() ) def _get_ordinary_value(x: Any) -> Any: return x.value if isinstance(x, bn.ConstantNode) else x def _is_standard_normal(x: Any) -> bool: return isinstance(x, dist.Normal) and x.mean == 0.0 and x.stddev == 1.0 def _is_phi_bound(f: Any, arguments: List[Any], kwargs: Dict[str, Any]) -> bool: # Is this Normal(0.0, 1.0).cdf(x) ? # TODO: Support kwargs return ( isinstance(f, MethodType) and f.__func__ is dist.Normal.cdf and len(arguments) == 1 and _is_standard_normal(f.__self__) ) def _is_phi_unbound(f: Any, arguments: List[Any], kwargs: Dict[str, Any]) -> bool: # Is this Normal.cdf(Normal(0.0, 1.0), x)? # TODO: Support kwargs return ( f is dist.Normal.cdf and len(arguments) == 2 and _is_standard_normal(arguments[0]) ) def _is_phi(f: Any, arguments: List[Any], kwargs: Dict[str, Any]) -> bool: return _is_phi_unbound(f, arguments, kwargs) or _is_phi_bound(f, arguments, kwargs) def _flatten_all_lists(xs): """Takes a list-of-lists, with arbitrary nesting level; returns an iteration of all elements.""" if isinstance(xs, list): for x in xs: yield from _flatten_all_lists(x) else: yield xs def _list_to_zeros(xs): """Takes a list-of-lists, with arbitrary nesting level; returns a list-of-lists of the same shape but with every non-list element replaced with zero.""" if isinstance(xs, list): return [_list_to_zeros(x) for x in xs] return 0 def _hashable(x: Any) -> bool: # Oddly enough, Python does not allow you to test for set inclusion # if the object is not hashable. Since it is impossible for an unhashable # object to be in a set, Python could simply say no when asked if a set # contains any unhashable object. It does not, so we are forced to do so. # All hashable objects have a callable __hash__ attribute. if not hasattr(x, "__hash__"): return False if not isinstance(x.__hash__, Callable): return False # It is possible that callable __hash__ exists but throws, which makes it # unhashable. Eliminate that possibility as well. try: hash(x) except Exception: return False return True _empty_args = [] _empty_kwargs = {} # Oddly enough there does not appear to be an easy way to obtain the type # of builtin methods. _builtin_function_or_method = type(abs) def _is_any_distribution_ctor(f: Callable) -> bool: # We need to handle calls to constructors for distributions # that are NOT in the torch.distributions module such as # Unit. if not isinstance(f, type): return False mro = getattr(f, "__mro__", None) if mro is None: return False return dist.Distribution in mro def _is_any_torch_function(f: Callable) -> bool: # Torch functions we either know about or we reject them immediately; # we do not attempt to extract a graph of a model which contains # a call to an unknown torch function with stochastic arguments. # # Given a reference to a function, how can we know if it is # a torch function? Torch does not make it very easy on us to figure # out what module a function is from. Let's choose some typical # methods as examples, like arccos or erf: # # * torch.Tensor.arccos has no __module__ attribute. # * torch.arccos.__module__ is None but .__objclass__ has a module string. # * torch.special.erf.__module__ is the string "torch.special.erf.__module__" # * torch.tensor(1).arccos.__module__ is None and has no .__objclass__, but # does have a __self__ with a module. # # Our first step then is to see if we have a module. m = getattr(f, "__module__", None) if m is None: # We don't have a module. Do we have an __objclass__ with a module? oc = getattr(f, "__objclass__", None) if oc is not None: m = getattr(oc, "__module__", None) if m is None: # We still don't have a module. Maybe __self__ has a module. s = getattr(f, "__self__", None) if s is not None: m = getattr(s, "__module__", None) if m is not None: return isinstance(m, str) and (m == "torch" or m.startswith("torch.")) # We don't have a module or an objclass. # # If we have something like torch.arccos then we can simply # check the torch module to see if we can find this exact reference. return any(item is f for _, item in torch.__dict__.items()) def _is_tensor_unbound_instance_method(f: Callable) -> bool: # This identifies if a function object is a method *descriptor* # such as torch.Tensor.add; that is, the method before it is bound # to a particular self. This function does NOT identify if a function # is a bound instance method, such as torch.tensor(1.0).add. See below. if not inspect.ismethoddescriptor(f): return False objc = getattr(f, "__objclass__", None) return objc is torch.Tensor or objc in torch.Tensor.__bases__ def _is_tensor_bound_instance_method(f: Callable) -> bool: # This identifies if a function object is an instance method of # a tensor already bound to a particular self. All such functions # in torch are marked as builtin. return isinstance(f, _builtin_function_or_method) and isinstance( getattr(f, "__self__", None), torch.Tensor ) def _get_unbound_tensor_method(f: Callable) -> Callable: # Given a bound-to-self tensor instance method, obtain its corresponding # unbound descriptor. In normal Python, the protocol is that the bound # method has attribute __func__ pointing back to the descriptor but # torch does not follow this protocol. Rather, we'll look it up by name. assert _is_tensor_bound_instance_method(f) unbound = getattr(torch.Tensor, f.__name__, None) assert _is_tensor_unbound_instance_method(unbound) return unbound def canonicalize_function( function: Any, arguments: List[Any] ) -> Tuple[Callable, List[Any]]: # In Python a function that is a member of a class can be in either a "bound" # or "unbound" form. Suppose c is of type C and we are calling foo with argument # x. We could have: # # bound: c.foo(x) # unbound: C.foo(c, x) # # The bound version calls the unbound version. How? In the bound case the fetch # of c.foo returns a method object with attribute __self__ set to c and attribute # __func__ set to C.foo. The call on the method object then invokes # __func__(__self__, x). # # Unfortunately, calls to torch tensor methods do not follow this convention; # instead of returning a method object with __func__ and __self__, it returns # a builtin method object with __self__ but no __func__, so we call special helpers # for those. # # It is useful when analyzing calls to have them in a consistent form. This function # turns bound function calls into the equivalent unbound function call. if isinstance(function, MethodType): f = function.__func__ args = [function.__self__] + arguments assert isinstance(f, Callable) elif _is_tensor_bound_instance_method(function): f = _get_unbound_tensor_method(function) args = [function.__self__] + arguments elif isinstance(function, Callable): f = function args = arguments else: _raise_unsupported(function) assert isinstance(f, Callable), ( # pyre-ignore "_canonicalize_function should return callable " + f"but got {type(f)} {str(f)}" # pyre-ignore ) return (f, args) # pyre-ignore # This helper class is to solve a problem in the simulated # execution of the model during graph accumulation. Consider # a model fragment like: # # n = normal() # y = n.exp() # # During graph construction, n will be a SampleNode whose # operand is a NormalNode, but SampleNode does not have a # method "exp". # # The lifted program we execute will be something like: # # n = bmg.handle_function(normal, []) # func = bmg.handle_dot(n, "exp") # y = bmg.handle_function(func, []) # # The "func" that is returned is one of these KnownFunction # objects, which captures the notion "I am an invocation # of known function Tensor.exp on a receiver that is a BMG # node". We then turn that into a exp node in handle_function. class KnownFunction: receiver: BMGNode function: Callable def __init__(self, receiver: BMGNode, function: Callable) -> None: if not isinstance(receiver, BMGNode): raise TypeError( f"KnownFunction receiver must be BMGNode but is {type(receiver)}" ) if not isinstance(function, Callable): raise TypeError( f"KnownFunction function must be Callable but is {type(function)}" ) self.receiver = receiver self.function = function class SpecialFunctionCaller: # As we execute the lifted program, we accumulate graph nodes in the # graph builder,and the program passes around graph nodes instead of # regular values. What happens when a graph node is passed to a # function, or used as the receiver of a function? That function will be # expecting a regular value as its argument or receiver. # # Certain function calls are special because they call graph nodes to # be created; we have a dictionary here that maps Python function objects # to the graph builder method that knows how to create the appropriate # node type. # # There are also some functions which we know can be passed a graph node # and will treat it correctly even though it is a graph node and not # a value. For example, the function which constructs a dictionary # or the function which constructs a list. When we encounter one of # these functions in the lifted program, we do not create a graph node # or call a special helper function; we simply allow it to be called normally. _bmg: BMGraphBuilder _function_map: Dict[Callable, Callable] _special_tensor_instance_function_names: Set[str] def __init__(self, bmg: BMGraphBuilder) -> None: self._bmg = bmg self._function_map = { # # Built-in functions # float: self._builtin_float, # # Math functions # math.exp: self._math_exp, math.log: self._math_log, # # Operators as functions # operator.add: self._operator_add, operator.and_: self._operator_and, operator.contains: self._operator_contains, operator.eq: self._operator_eq, operator.floordiv: self._operator_floordiv, operator.ge: self._operator_ge, operator.gt: self._operator_gt, operator.inv: self._operator_inv, operator.is_: self._operator_is, operator.is_not: self._operator_is_not, operator.le: self._operator_le, operator.lshift: self._operator_lshift, operator.lt: self._operator_lt, operator.matmul: self._operator_matmul, operator.mod: self._operator_mod, operator.mul: self._operator_mul, operator.ne: self._operator_ne, operator.neg: self._operator_neg, operator.not_: self._operator_not, operator.or_: self._operator_or, operator.pos: self._operator_pos, operator.pow: self._operator_pow, operator.rshift: self._operator_rshift, operator.sub: self._operator_sub, operator.truediv: self._operator_truediv, operator.xor: self._operator_xor, # # # Torch distributions # # (Remember to add a case to distribution_to_node.) # dist.Bernoulli: self._dist_bernoulli, dist.Beta: self._dist_beta, dist.Binomial: self._dist_binomial, dist.Categorical: self._dist_categorical, # TODO: Cauchy dist.Chi2: self._dist_chi2, # TODO: ContinuousBernoulli dist.Dirichlet: self._dist_dirichlet, # TODO: Exponential # TODO: FisherSnedecor dist.Gamma: self._dist_gamma, # TODO: Geometric # TODO: Gumbel dist.HalfCauchy: self._dist_halfcauchy, dist.HalfNormal: self._dist_halfnormal, # TODO: Independent # TODO: Kumaraswamy dist.LKJCholesky: self._dist_lkj_cholesky, # TODO: Laplace # TODO: LogNormal # TODO: LowRankMultivariateNormal # TODO: MixtureSameFamily # TODO: Multinomial # TODO: MultivariateNormal # TODO: NegativeBinomial dist.Normal: self._dist_normal, # TODO: OneHotCategorical # TODO: Pareto # TODO: Poisson dist.Poisson: self._dist_poisson, # TODO: RelaxedBernoulli # TODO: LogitRelaxedBernoulli # TODO: RelaxedOneHotCategorical dist.StudentT: self._dist_studentt, # TODO: TransformedDistribution dist.Uniform: self._dist_uniform, # TODO: VonMises # TODO: Weibull # # Torch functions # torch.Tensor.add: self._torch_add, torch.add: self._torch_add, torch.Tensor.bitwise_and: self._torch_bitwise_and, torch.bitwise_and: self._torch_bitwise_and, torch.Tensor.bitwise_not: self._torch_bitwise_not, torch.bitwise_not: self._torch_bitwise_not, torch.Tensor.bitwise_or: self._torch_bitwise_or, torch.bitwise_or: self._torch_bitwise_or, torch.Tensor.bitwise_xor: self._torch_bitwise_xor, torch.bitwise_xor: self._torch_bitwise_xor, torch.Tensor.bitwise_left_shift: self._torch_bitwise_left_shift, torch.bitwise_left_shift: self._torch_bitwise_left_shift, torch.Tensor.bitwise_right_shift: self._torch_bitwise_right_shift, torch.bitwise_right_shift: self._torch_bitwise_right_shift, torch.Tensor.cholesky: self._torch_cholesky, torch.linalg.cholesky: self._torch_cholesky, torch.linalg.cholesky_ex: self._torch_cholesky_ex, torch.Tensor.div: self._torch_div, torch.div: self._torch_div, torch.Tensor.divide: self._torch_div, torch.divide: self._torch_div, torch.Tensor.eq: self._torch_eq, torch.eq: self._torch_eq, torch.Tensor.equal: self._torch_eq, torch.equal: self._torch_eq, torch.Tensor.exp: self._torch_exp, torch.exp: self._torch_exp, torch.Tensor.exp2: self._torch_exp2, torch.exp2: self._torch_exp2, torch.special.exp2: self._torch_exp2, torch.Tensor.expm1: self._torch_expm1, torch.expm1: self._torch_expm1, torch.special.expm1: self._torch_expm1, torch.Tensor.float: self._torch_float, # TODO: float_power torch.Tensor.floor_divide: self._torch_floor_divide, torch.floor_divide: self._torch_floor_divide, torch.Tensor.fmod: self._torch_fmod, torch.fmod: self._torch_fmod, torch.Tensor.ge: self._torch_ge, torch.ge: self._torch_ge, torch.Tensor.greater: self._torch_gt, torch.greater: self._torch_gt, torch.Tensor.greater_equal: self._torch_ge, torch.greater_equal: self._torch_ge, torch.Tensor.gt: self._torch_gt, torch.gt: self._torch_gt, torch.Tensor.int: self._torch_int, torch.Tensor.item: self._torch_item, torch.Tensor.le: self._torch_le, torch.le: self._torch_le, torch.Tensor.less: self._torch_lt, torch.less: self._torch_lt, torch.Tensor.less_equal: self._torch_le, torch.less_equal: self._torch_le, torch.Tensor.log: self._torch_log, torch.log: self._torch_log, torch.Tensor.log10: self._torch_log10, torch.log10: self._torch_log10, torch.Tensor.log1p: self._torch_log1p, torch.log1p: self._torch_log1p, torch.special.log1p: self._torch_log1p, torch.Tensor.log2: self._torch_log2, torch.log2: self._torch_log2, # TODO: logical_and # TODO: special.logit torch.Tensor.logical_not: self._torch_logical_not, torch.logical_not: self._torch_logical_not, # TODO: logical_or # TODO: logical_xor torch.Tensor.logsumexp: self._torch_logsumexp, torch.logsumexp: self._torch_logsumexp, torch.special.logsumexp: self._torch_logsumexp, torch.Tensor.logaddexp: self._torch_logaddexp, torch.logaddexp: self._torch_logaddexp, torch.Tensor.lt: self._torch_lt, torch.lt: self._torch_lt, torch.Tensor.matmul: self._torch_matmul, torch.matmul: self._torch_matmul, torch.Tensor.mm: self._torch_mm, torch.mm: self._torch_mm, torch.Tensor.mul: self._torch_mul, torch.mul: self._torch_mul, torch.Tensor.multiply: self._torch_mul, torch.multiply: self._torch_mul, torch.Tensor.ne: self._torch_ne, torch.ne: self._torch_ne, torch.Tensor.not_equal: self._torch_ne, torch.not_equal: self._torch_ne, torch.Tensor.neg: self._torch_neg, torch.neg: self._torch_neg, torch.Tensor.negative: self._torch_neg, torch.negative: self._torch_neg, torch.Tensor.pow: self._torch_pow, torch.pow: self._torch_pow, torch.Tensor.remainder: self._torch_fmod, torch.remainder: self._torch_fmod, torch.sigmoid: self._torch_sigmoid, torch.Tensor.sigmoid: self._torch_sigmoid, torch.special.expit: self._torch_sigmoid, torch.Tensor.sqrt: self._torch_sqrt, torch.sqrt: self._torch_sqrt, torch.Tensor.sub: self._torch_sub, torch.sub: self._torch_sub, torch.Tensor.subtract: self._torch_sub, torch.subtract: self._torch_sub, torch.Tensor.sum: self._torch_sum, torch.sum: self._torch_sum, torch.Tensor.true_divide: self._torch_div, torch.true_divide: self._torch_div, torch.transpose: self._torch_transpose, torch.Tensor.transpose: self._torch_transpose, # Torch functions on distributions dist.Distribution.log_prob: self._dist_log_prob, dist.Normal.log_prob: self._dist_log_prob, } self._special_tensor_instance_function_names = { f.__name__ for f in self._function_map if _is_tensor_unbound_instance_method(f) } def _is_special_tensor_bound_instance_method_name(self, name: str) -> bool: return name in self._special_tensor_instance_function_names def bind_torch_instance_function( self, receiver: BMGNode, name: str ) -> KnownFunction: # If we have a stochastic receiver of a dot operator, we need # to know if it might be a function on either a stochastic tensor # or distribution. if hasattr(torch.Tensor, name): return KnownFunction(receiver, getattr(torch.Tensor, name)) if hasattr(dist.Distribution, name): return KnownFunction(receiver, getattr(dist.Distribution, name)) _raise_unsupported(name) def is_special_tensor_bound_instance_method(self, f: Callable) -> bool: return self._is_special_tensor_bound_instance_method_name( f.__name__ ) and _is_tensor_bound_instance_method(f) def _value_to_node(self, arg: Any) -> BMGNode: if isinstance(arg, BMGNode): return arg if isinstance(arg, dist.Distribution): return self.distribution_to_node(arg) return self._bmg.add_constant(arg) def is_special_function( self, func: Callable, args: List[Any] = _empty_args, # TODO: Unused kwargs: Dict[str, Any] = _empty_kwargs, # TODO: Unused ) -> bool: if isinstance(func, KnownFunction): return True if _is_any_torch_function(func): return True if _is_any_distribution_ctor(func): return True # TODO: What if its a member function of a distribution that's # not in torch.distributions? if not _hashable(func): return False if func in allowed_functions: return True if func in self._function_map: return True # All in-place operators are special functions. if _is_in_place_operator(func): return True return False def _canonicalize_function( self, func: Callable, args: List[Any] ) -> Tuple[Callable, List[Any]]: if isinstance(func, KnownFunction): args = [func.receiver] + args func = func.function else: func, args = canonicalize_function(func, args) return func, args def do_special_call_maybe_stochastic( self, func: Any, args: List[Any], kwargs: Dict[str, Any] = _empty_kwargs, ) -> Any: # If we possibly can, just call the original function with ordinary arguments. # Otherwise, convert everything to a graph node and call our helper which # does node construction. assert self.is_special_function(func, args, kwargs) func, args = self._canonicalize_function(func, args) if func is torch.tensor: return self._tensor_constructor(*args, **kwargs) if ( _only_ordinary_arguments_or_constants(args, kwargs) or func in allowed_functions ): new_args = (_get_ordinary_value(arg) for arg in args) new_kwargs = {key: _get_ordinary_value(arg) for key, arg in kwargs.items()} return func(*new_args, **new_kwargs) if _is_in_place_operator(func): return self._in_place_operator(func, *args) return self.do_special_call_always_stochastic(func, args, kwargs) def do_special_call_always_stochastic( self, func: Callable, args: List[Any], kwargs: Dict[str, Any] = _empty_kwargs, ) -> BMGNode: # Never call the original function with ordinary arguments. Convert everything # to a graph node and call our helper which does node construction. assert self.is_special_function(func, args, kwargs) # We should never call do_special_call_always_stochastic on (1) a tensor # constructor, or (2) a function known to be allowed to take any values. assert func not in allowed_functions assert func is not torch.tensor func, args = self._canonicalize_function(func, args) if _is_phi_unbound(func, args, kwargs): args = args[1:] node_constructor = self._phi elif _hashable(func) and func in self._function_map: node_constructor = self._function_map[func] else: # We are trying to do an always-stochastic call on a function that # we do not yet know how to handle. _raise_unsupported(func) new_args = (self._value_to_node(arg) for arg in args) new_kwargs = {key: self._value_to_node(arg) for key, arg in kwargs.items()} return node_constructor(*new_args, **new_kwargs) # pyre-ignore # # Builtins; these must have the same signature as their corresponding # builtin functions. # def _builtin_float(self, input: BMGNode) -> BMGNode: # TODO: Do we want to do this at all? Why should float(t) insert a # TO_REAL node into the graph? We can simply insert TO_REAL where required # by the BMG type system. return self._bmg.add_to_real(input) # # Math functions # def _math_exp(self, input: BMGNode) -> BMGNode: # TODO: Right signature? return self._bmg.add_exp(input) def _math_log(self, input: BMGNode) -> BMGNode: return self._bmg.add_log(input) # # Distributions; these must have the same signature as the corresponding # constructor. # def distribution_to_node( # noqa self, distribution: dist.Distribution ) -> bn.DistributionNode: t = type(distribution) if isinstance(distribution, dist.Bernoulli): args = [distribution.probs] elif isinstance(distribution, dist.Beta): args = [distribution.concentration1, distribution.concentration0] elif isinstance(distribution, dist.Binomial): args = [distribution.total_count, distribution.probs] elif isinstance(distribution, dist.Categorical): args = [distribution.probs] elif isinstance(distribution, dist.Chi2): args = [distribution.df] elif isinstance(distribution, dist.Dirichlet): args = [distribution.concentration] elif isinstance(distribution, dist.Gamma): args = [distribution.concentration, distribution.rate] elif isinstance(distribution, dist.HalfCauchy): args = [distribution.scale] elif isinstance(distribution, dist.HalfNormal): args = [distribution.scale] elif isinstance(distribution, dist.LKJCholesky): args = [distribution.dim, distribution.concentration] elif isinstance(distribution, dist.Normal): args = [distribution.mean, distribution.stddev] elif isinstance(distribution, dist.Poisson): args = [distribution.rate] elif isinstance(distribution, dist.StudentT): args = [distribution.df, distribution.loc, distribution.scale] elif isinstance(distribution, dist.Uniform): args = [distribution.low, distribution.high] else: # TODO: Better error raise TypeError( f"Distribution '{t.__name__}' is not supported by Bean Machine Graph." ) d = self.do_special_call_always_stochastic(t, args, {}) assert isinstance(d, bn.DistributionNode) return d def _dist_bernoulli( self, probs: Optional[BMGNode] = None, logits: Optional[BMGNode] = None, validate_args: Any = None, ) -> BMGNode: if (probs is None and logits is None) or ( probs is not None and logits is not None ): raise ValueError("Bernoulli requires exactly one of probs or logits") if logits is not None: return self._bmg.add_bernoulli_logit(logits) return self._bmg.add_bernoulli(probs) def _dist_beta( self, concentration1: BMGNode, concentration0: BMGNode, validate_args: Any = None, ) -> BMGNode: return self._bmg.add_beta(concentration1, concentration0) def _dist_binomial( self, total_count: Optional[BMGNode] = None, probs: Optional[BMGNode] = None, logits: Optional[BMGNode] = None, validate_args: Any = None, ) -> BMGNode: if (probs is None and logits is None) or ( probs is not None and logits is not None ): raise ValueError("Binomial requires exactly one of probs or logits") # TODO: Create a test case for Binomial(probs=0.5) where total_count # is omitted. if total_count is None: total_count = self._value_to_node(1) if logits is not None: return self._bmg.add_binomial_logit(total_count, logits) return self._bmg.add_binomial(total_count, probs) def _dist_categorical( self, probs: Optional[BMGNode] = None, logits: Optional[BMGNode] = None, validate_args: Any = None, ) -> BMGNode: if (probs is None and logits is None) or ( probs is not None and logits is not None ): raise ValueError("Categorical requires exactly one of probs or logits") if logits is not None: return self._bmg.add_categorical_logit(logits) return self._bmg.add_categorical(probs) def _dist_chi2(self, df: BMGNode, validate_args: Any = None) -> BMGNode: return self._bmg.add_chi2(df) def _dist_dirichlet(self, concentration: BMGNode, validate_args=None) -> BMGNode: return self._bmg.add_dirichlet(concentration) def _dist_gamma( self, concentration: BMGNode, rate: BMGNode, validate_args=None ) -> BMGNode: return self._bmg.add_gamma(concentration, rate) def _dist_halfcauchy(self, scale: BMGNode, validate_args=None) -> BMGNode: return self._bmg.add_halfcauchy(scale) def _dist_halfnormal(self, scale: Any, validate_args=None) -> BMGNode: return self._bmg.add_halfnormal(scale) def _dist_normal(self, loc: BMGNode, scale: BMGNode, validate_args=None) -> BMGNode: return self._bmg.add_normal(loc, scale) def _dist_poisson(self, rate: BMGNode) -> BMGNode: return self._bmg.add_poisson(rate) def _dist_studentt( self, df: BMGNode, loc: Optional[BMGNode] = None, scale: Optional[BMGNode] = None, validate_args=None, ) -> BMGNode: if loc is None: loc = self._value_to_node(0) if scale is None: scale = self._value_to_node(1) return self._bmg.add_studentt(df, loc, scale) def _dist_uniform(self, low: BMGNode, high: BMGNode, validate_args=None) -> BMGNode: return self._bmg.add_uniform(low, high) def _dist_lkj_cholesky(self, dim: BMGNode, eta: BMGNode) -> BMGNode: if ( isinstance(dim, bn.ConstantNode) and isinstance(dim.value, int) and dim.value >= 2 ): return self._bmg.add_lkj_cholesky(dim, eta) else: raise ValueError("LKJ Cholesky distribution must have dimension >= 2") # # Methods on distributions # def _dist_log_prob(self, d: BMGNode, value: BMGNode) -> BMGNode: return self._bmg.add_log_prob(d, value) # # Tensor constructor # def _tensor_constructor(self, data: Any) -> Any: # The tensor constructor is a bit tricky because it takes a single # argument that is either a value or a list of values. We need: # (1) a flattened list of all the arguments, and # (2) the size of the original tensor. flattened_args = list(_flatten_all_lists(data)) if not any(isinstance(arg, BMGNode) for arg in flattened_args): # None of the arguments are graph nodes. We can just # construct the tensor normally. return torch.tensor(data) # At least one of the arguments is a graph node. # # If we're constructing a singleton tensor and the single value # is a graph node, we can just keep it as that graph node. if len(flattened_args) == 1: return flattened_args[0] # We have two or more arguments and at least one is a graph node. # Convert them all to graph nodes. for index, arg in enumerate(flattened_args): if not isinstance(arg, BMGNode): flattened_args[index] = self._bmg.add_constant(arg) # What shape is this tensor? Rather than duplicating the logic in the # tensor class, let's just construct the same shape made of entirely # zeros and then ask what shape it is. size = torch.tensor(_list_to_zeros(data)).size() return self._bmg.add_tensor(size, *flattened_args) # # Tensor functions; these must have the same signature as the # corresponding torch function. # # TODO: We do not support mutation of stochastic tensors; we should produce an # error if there are any "out" values. def _phi(self, value: BMGNode) -> BMGNode: return self._bmg.add_phi(value) def _torch_add( self, input: BMGNode, other: BMGNode, alpha: Optional[BMGNode] = None, out: Any = None, ) -> BMGNode: # TODO: tensor add has the semantics input + alpha * other; if alpha is present # then we need to generate a multiply and an addition. return self._bmg.add_addition(input, other) def _torch_bitwise_and( self, input: BMGNode, other: BMGNode, out: Any = None ) -> BMGNode: return self._bmg.add_bitand(input, other) def _torch_bitwise_left_shift( self, input: BMGNode, other: BMGNode, out: Any = None ) -> BMGNode: # TODO: In torch, a << b is not bitwise at all. Rather it is simply an # an alias for a * (2 ** b). Make a rewriter that turns shifts into # this operation. return self._bmg.add_lshift(input, other) def _torch_bitwise_not(self, input: BMGNode, out: Any = None) -> BMGNode: return self._bmg.add_invert(input) def _torch_bitwise_or( self, input: BMGNode, other: BMGNode, out: Any = None ) -> BMGNode: return self._bmg.add_bitor(input, other) def _torch_bitwise_right_shift( self, input: BMGNode, other: BMGNode, out: Any = None ) -> BMGNode: # TODO: In torch, a >> b is not bitwise at all. Rather it is simply an # an alias for a * (2 ** -b). Make a rewriter that turns shifts into # this operation. return self._bmg.add_rshift(input, other) def _torch_bitwise_xor( self, input: BMGNode, other: BMGNode, out: Any = None ) -> BMGNode: return self._bmg.add_bitxor(input, other) def _torch_cholesky( self, input: BMGNode, upper: Optional[BMGNode] = None, out: Any = None, ) -> BMGNode: # TODO: What to do with upper? return self._bmg.add_cholesky(input) def _torch_cholesky_ex( self, input: BMGNode, upper: Optional[BMGNode] = None, check_errors: Optional[BMGNode] = None, out: Any = None, ) -> BMGNode: # TODO: What to do with upper and check_errors? # cholesky_ex returns a named tuple (L, info) where # L is the result matrix and info is a tensor containing # an index saying which input element was not # positive-definite. We pretend that this operation always # succeeds and return a graph node and a zero error index. return torch.return_types.linalg_cholesky_ex( # pyre-ignore (self._bmg.add_cholesky(input), torch.tensor(0)) ) def _torch_transpose( self, input: BMGNode, dim0: BMGNode, dim1: BMGNode, upper: Optional[BMGNode] = None, out: Any = None, ) -> BMGNode: constD1 = dim0.value if isinstance(dim0, ConstantNode) else None constD2 = dim1.value if isinstance(dim1, ConstantNode) else None def valid_dim_or_none(c): return c is None or isinstance(c, int) and 0 <= c <= 1 valid_dims = valid_dim_or_none(constD1) and valid_dim_or_none(constD2) matched_dims = constD1 is not None and constD1 == constD2 if not valid_dims or matched_dims: raise ValueError( f"Unsupported dimension arguments for transpose: {constD1} and {constD2}" ) else: return self._bmg.add_transpose(input) def _torch_div( self, input: BMGNode, other: BMGNode, rounding_mode: Optional[BMGNode] = None, out: Any = None, ) -> BMGNode: # TODO: Should we give an error if there is a rounding mode? return self._bmg.add_division(input, other) def _torch_eq(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode: return self._bmg.add_equal(input, other) def _torch_exp(self, input: BMGNode, out: Any = None) -> BMGNode: return self._bmg.add_exp(input) def _torch_exp2(self, input: BMGNode, out: Any = None) -> BMGNode: return self._bmg.add_exp2(input) def _torch_expm1(self, input: BMGNode, out: Any = None) -> BMGNode: return self._bmg.add_expm1(input) def _torch_float( self, input: BMGNode, memory_format: Optional[BMGNode] = None ) -> BMGNode: # TODO: Do we want to do this at all? Why should t.float() insert a # TO_REAL node into the graph? We can simply insert TO_REAL where required # by the BMG type system. # TODO: If we do keep this, what should we do with memory_format? return self._bmg.add_to_real(input) def _torch_floor_divide( self, input: BMGNode, other: BMGNode, out: Any = None, ) -> BMGNode: return self._bmg.add_floordiv(input, other) def _torch_fmod( self, input: BMGNode, other: BMGNode, out: Any = None, ) -> BMGNode: return self._bmg.add_mod(input, other) def _torch_ge(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode: return self._bmg.add_greater_than_equal(input, other) def _torch_gt(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode: return self._bmg.add_greater_than(input, other) def _torch_int( self, input: BMGNode, memory_format: Optional[BMGNode] = None ) -> BMGNode: # TODO: What should we do with memory_format? return self._bmg.add_to_int(input) def _torch_item(self, input: BMGNode) -> Any: return self._bmg.add_item(input) def _torch_le(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode: return self._bmg.add_less_than_equal(input, other) def _torch_log(self, input: BMGNode, out: Any = None) -> Any: return self._bmg.add_log(input) def _torch_log10(self, input: BMGNode, out: Any = None) -> Any: return self._bmg.add_log10(input) def _torch_log1p(self, input: BMGNode, out: Any = None) -> Any: return self._bmg.add_log1p(input) def _torch_log2(self, input: BMGNode, out: Any = None) -> Any: return self._bmg.add_log2(input) def _torch_logical_not(self, input: BMGNode, out: Any = None) -> Any: return self._bmg.add_not(input) def _torch_logsumexp( self, input: BMGNode, dim: BMGNode, keepdim: Optional[BMGNode] = None, out: Any = None, ) -> Any: if keepdim is None: keepdim = self._value_to_node(False) return self._bmg.add_logsumexp_torch(input, dim, keepdim) def _torch_logaddexp( self, input: BMGNode, other: BMGNode, out: Any = None, ) -> Any: return self._bmg.add_logaddexp(input, other) def _torch_lt(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode: return self._bmg.add_less_than(input, other) def _torch_matmul(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode: # TODO: mm and matmul have different behavior; we probably need to make # a distinction here. return self._bmg.add_matrix_multiplication(input, other) def _torch_mm(self, input: BMGNode, mat2: BMGNode, out: Any = None) -> BMGNode: # TODO: mm and matmul have different behavior; we probably need to make # a distinction here. return self._bmg.add_matrix_multiplication(input, mat2) def _torch_mul(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode: return self._bmg.add_multiplication(input, other) def _torch_ne(self, input: BMGNode, other: BMGNode, out: Any = None) -> BMGNode: return self._bmg.add_not_equal(input, other) def _torch_neg(self, input: BMGNode, out: Any = None) -> BMGNode: return self._bmg.add_negate(input) def _torch_pow(self, input: BMGNode, exponent: BMGNode, out: Any = None) -> BMGNode: return self._bmg.add_power(input, exponent) def _torch_sigmoid(self, input: BMGNode, out: Any = None) -> BMGNode: return self._bmg.add_logistic(input) def _torch_sqrt(self, input: BMGNode, out: Any = None) -> Any: return self._bmg.add_squareroot(input) def _torch_sub( self, input: BMGNode, other: BMGNode, alpha: Optional[BMGNode] = None, out: Any = None, ) -> BMGNode: # TODO: tensor sub has the semantics input - alpha * other; if alpha is present # then we need to generate a multiply and an subtraction return self._bmg.add_subtraction(input, other) def _torch_sum( self, input: BMGNode, dtype: Any = None, ) -> Any: return self._bmg.add_sum(input) # # Operators as functions # def _operator_add(self, a: BMGNode, b: BMGNode) -> BMGNode: return self._bmg.add_addition(a, b) def _operator_and(self, a: BMGNode, b: BMGNode) -> BMGNode: return self._bmg.add_bitand(a, b) def _operator_contains(self, a: BMGNode, b: BMGNode) -> BMGNode: # Note that "a" is the container and "b" is the query. That is, # this means "b in a", NOT "a in b" return self._bmg.add_in(b, a) def _operator_eq(self, a: Any, b: Any) -> Any: return self._bmg.add_equal(a, b) def _operator_floordiv(self, a: BMGNode, b: BMGNode) -> BMGNode: return self._bmg.add_floordiv(a, b) def _operator_ge(self, a: Any, b: Any) -> Any: return self._bmg.add_greater_than_equal(a, b) def _operator_gt(self, a: Any, b: Any) -> Any: return self._bmg.add_greater_than(a, b) def _operator_inv(self, obj: BMGNode) -> BMGNode: return self._bmg.add_invert(obj) def _operator_is(self, a: Any, b: Any) -> Any: return self._bmg.add_is(a, b) def _operator_is_not(self, a: Any, b: Any) -> Any: return self._bmg.add_is_not(a, b) def _operator_le(self, a: Any, b: Any) -> Any: return self._bmg.add_less_than_equal(a, b) def _operator_lshift(self, a: BMGNode, b: BMGNode) -> BMGNode: # TODO: In torch, a << b is not bitwise at all. Rather it is simply an # an alias for a * (2 ** b). Make a rewriter that turns shifts into # this operation. return self._bmg.add_lshift(a, b) def _operator_lt(self, a: Any, b: Any) -> Any: return self._bmg.add_less_than(a, b) def _operator_matmul(self, a: BMGNode, b: BMGNode) -> BMGNode: return self._bmg.add_matrix_multiplication(a, b) def _operator_mod(self, a: BMGNode, b: BMGNode) -> BMGNode: return self._bmg.add_mod(a, b) def _operator_mul(self, a: BMGNode, b: BMGNode) -> BMGNode: return self._bmg.add_multiplication(a, b) def _operator_ne(self, a: Any, b: Any) -> Any: return self._bmg.add_not_equal(a, b) def _operator_neg(self, obj: BMGNode) -> BMGNode: return self._bmg.add_negate(obj) def _operator_not(self, obj: BMGNode) -> BMGNode: return self._bmg.add_not(obj) def _operator_or(self, a: BMGNode, b: BMGNode) -> BMGNode: return self._bmg.add_bitor(a, b) def _operator_pos(self, obj: BMGNode) -> BMGNode: # unary + is an identity on graph nodes return obj def _operator_pow(self, a: BMGNode, b: BMGNode) -> BMGNode: return self._bmg.add_power(a, b) def _operator_rshift(self, a: BMGNode, b: BMGNode) -> BMGNode: # TODO: In torch, a >> b is not bitwise at all. Rather it is simply an # an alias for a * (2 ** -b). Make a rewriter that turns shifts into # this operation. return self._bmg.add_rshift(a, b) def _operator_sub(self, a: BMGNode, b: BMGNode) -> BMGNode: return self._bmg.add_subtraction(a, b) def _operator_truediv(self, a: BMGNode, b: BMGNode) -> BMGNode: return self._bmg.add_division(a, b) def _operator_xor(self, a: BMGNode, b: BMGNode) -> BMGNode: return self._bmg.add_bitxor(a, b) # # Augmented assignment operators # def _in_place_operator( self, native_in_place: Callable, # operator.iadd, for example left: Any, right: Any, ) -> Any: # Handling augmented assignments (+=, -=, *=, and so on) has a lot of cases; # to cut down on code duplication we call this higher-level method. Throughout # the comments below we assume that we're handling a +=; the logic is the same # for all the operators. # TODO: We have a problem that we need to resolve regarding compilation of models # which have mutations of aliased tensors. Compare the action of these two similar: # models in the original Bean Machine implementation: # # @functional def foo(): # x = flip() # 0 or 1 # y = x # y is an alias for x # y += 1 # y is mutated in place and continues to alias x # return x # returns 1 or 2 # # vs # # @functional def foo(): # x = flip() # 0 or 1 # y = x # y is an alias for x # y = y + 1 # y no longer aliases x; y is 1 or 2 # return x # returns 0 or 1 # # Suppose we are asked to compile the first model; how should we execute # the rewritten form of it so as to accumulate the correct graph? Unlike # tensors, graph nodes are not mutable! # # Here's what we're going to do for now: # # If neither operand is a graph node then do exactly what the model would # normally do: # if not isinstance(left, BMGNode) and not isinstance(right, BMGNode): return native_in_place(left, right) assert native_in_place in _in_place_to_regular native_regular = _in_place_to_regular[native_in_place] # At least one operand is a graph node. If we have tensor += graph_node # or graph_node += anything then optimistically assume that there # is NOT any alias of the mutated left side, and treat the += as though # it is a normal addition. # # TODO: Should we produce some sort of warning here telling the user that # the compiled model semantics might be different than the original model? # Or is that too noisy? There are going to be a lot of models with += where # one of the operands is an ordinary tensor and one is a graph node, but which # do not have any aliasing problem. if isinstance(left, torch.Tensor) or isinstance(left, BMGNode): return self.do_special_call_always_stochastic( native_regular, [left, right], {} ) # If we've made it here then we have x += graph_node, where x is not a # tensor. There are two possibilities: either x is some type which implements # mutating in-place +=, or it is not. If it is, then just call the mutator # and hope for the best. # # TODO: This scenario is another opportunity for a warning or error, since # the model is probably not one that can be compiled if it is depending on # in-place mutation of an object which has a stochastic quantity added to it. assert isinstance(right, BMGNode) assert native_in_place in _in_place_operator_names if hasattr(left, _in_place_operator_names[native_in_place]): # It is possible that the operator exists but either returns # NotImplemented or raises NotImplementedError. In either case, # assume that we can fall back to non-mutating addition. try: result = native_in_place(left, right) if result is not NotImplemented: return result except NotImplementedError: pass # We have x += graph_node, and x is not mutating in place, so just # do x + graph_node: return self.do_special_call_maybe_stochastic(native_regular, [left, right], {})
beanmachine-main
src/beanmachine/ppl/compiler/special_function_caller.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Callable, Dict, List, Optional, Set from beanmachine.ppl.compiler.bmg_nodes import BMGNode from beanmachine.ppl.utils.multidictionary import MultiDictionary # We wish to associate the program state at the time of node creation # with that node, so that we can produce better diagnostics, error messages, # and so on. class FunctionCall: # A record of a particular function call. func: Callable args: Any kwargs: Dict[str, Any] def __init__(self, func: Callable, args: Any, kwargs: Dict[str, Any]) -> None: self.func = func self.args = args self.kwargs = kwargs def __str__(self) -> str: func = self.func.__name__ args = ",".join(str(arg) for arg in self.args) kwargs = ",".join( sorted( (str(kwarg) + "=" + str(self.kwargs[kwarg])) for kwarg in self.kwargs ) ) comma = "," if len(args) > 0 and len(kwargs) > 0 else "" return f"{func}({args}{comma}{kwargs})" class CallStack: _stack: List[FunctionCall] def __init__(self) -> None: self._stack = [] def push(self, FunctionCall: FunctionCall) -> None: self._stack.append(FunctionCall) def pop(self) -> FunctionCall: return self._stack.pop() def peek(self) -> Optional[FunctionCall]: return self._stack[-1] if len(self._stack) > 0 else None _empty_kwargs = {} class ExecutionContext: # An execution context does two jobs right now: # * Tracks the current call stack # * Maintains a map from nodes to the function call that # created them. # # NOTE: Because most nodes are deduplicated, it is possible that # one node is associated with multiple calls. We therefore have # a multidictionary that maps from nodes to a set of function calls. _stack: CallStack _node_locations: MultiDictionary # BMGNode -> {FunctionCall} def __init__(self) -> None: self._stack = CallStack() self._node_locations = MultiDictionary() def current_site(self) -> Optional[FunctionCall]: return self._stack.peek() def record_node_call( self, node: BMGNode, site: Optional[FunctionCall] = None ) -> None: if site is None: site = self.current_site() if site is not None: self._node_locations.add(node, site) def node_locations(self, node: BMGNode) -> Set[FunctionCall]: return self._node_locations[node] def call( self, func: Callable, args: Any, kwargs: Dict[str, Any] = _empty_kwargs ) -> Any: self._stack.push(FunctionCall(func, args, kwargs)) try: return func(*args, **kwargs) finally: self._stack.pop()
beanmachine-main
src/beanmachine/ppl/compiler/execution_context.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Pattern matching for ASTs""" import ast from platform import python_version from typing import Any, Dict from beanmachine.ppl.compiler.patterns import ( anyPattern as _any, match_any, match_every, negate, Pattern, PredicatePattern, type_and_attributes, ) from beanmachine.ppl.compiler.rules import RuleDomain # To support different Python versions correctly, in particular changes from 3.8 to 3.9, # some functionality defined in this module needs to be version dependent. _python_version = [int(i) for i in python_version().split(".")[:2]] _python_3_9_or_later = _python_version >= [3, 9] # Assertions about changes across versions that we address in this module if _python_3_9_or_later: dummy_value = ast.Constant(1) assert ast.Index(dummy_value) == dummy_value else: dummy_value = ast.Constant(1) assert ast.Index(dummy_value) != dummy_value def _get_children(node: Any) -> Dict[str, Any]: if isinstance(node, ast.AST): return dict(ast.iter_fields(node)) return {} def _construct(typ: type, children: Dict[str, ast.AST]) -> ast.AST: return typ(**children) ast_domain = RuleDomain(_get_children, _construct) ast_and: Pattern = ast.And add: Pattern = ast.Add bit_and: Pattern = ast.BitAnd bit_or: Pattern = ast.BitOr bit_xor: Pattern = ast.BitXor div: Pattern = ast.Div eq: Pattern = ast.Eq gt: Pattern = ast.Gt gte: Pattern = ast.GtE invert: Pattern = ast.Invert ast_is: Pattern = ast.Is ast_is_not: Pattern = ast.IsNot load: Pattern = ast.Load lshift: Pattern = ast.LShift lt: Pattern = ast.Lt lte: Pattern = ast.LtE mod: Pattern = ast.Mod mult: Pattern = ast.Mult not_eq: Pattern = ast.NotEq ast_or: Pattern = ast.Or ast_pass: Pattern = ast.Pass ast_pow: Pattern = ast.Pow rshift: Pattern = ast.RShift sub: Pattern = ast.Sub uadd: Pattern = ast.UAdd usub: Pattern = ast.USub def arguments( args: Pattern = _any, vararg: Pattern = _any, kwonlyargs: Pattern = _any, kw_defaults: Pattern = _any, kwarg: Pattern = _any, defaults: Pattern = _any, ) -> Pattern: return type_and_attributes( ast.arguments, { "args": args, "vararg": vararg, "kwonlyargs": kwonlyargs, "kw_defaults": kw_defaults, "kwarg": kwarg, "defaults": defaults, }, ) def ast_assert(expr: Pattern = _any, msg: Pattern = _any) -> Pattern: return type_and_attributes(ast.Assert, {"expr": expr, "msg": msg}) def assign(targets: Pattern = _any, value: Pattern = _any) -> Pattern: return type_and_attributes(ast.Assign, {"targets": targets, "value": value}) def aug_assign( target: Pattern = _any, op: Pattern = _any, value: Pattern = _any ) -> Pattern: return type_and_attributes( ast.AugAssign, {"target": target, "op": op, "value": value} ) def ann_assign( target: Pattern = _any, op: Pattern = _any, value: Pattern = _any ) -> Pattern: return type_and_attributes( ast.AnnAssign, {"target": target, "op": op, "value": value} ) # TODO: what should we do about AnnAssign? def starred(value: Pattern = _any, ctx: Pattern = _any) -> Pattern: return type_and_attributes(ast.Starred, {"value": value, "ctx": ctx}) def attribute( value: Pattern = _any, attr: Pattern = _any, ctx: Pattern = _any ) -> Pattern: return type_and_attributes( ast.Attribute, {"value": value, "attr": attr, "ctx": ctx} ) def binop(op: Pattern = _any, left: Pattern = _any, right: Pattern = _any) -> Pattern: return type_and_attributes(ast.BinOp, {"op": op, "left": left, "right": right}) def boolop(op: Pattern = _any, values: Pattern = _any) -> Pattern: return type_and_attributes(ast.BoolOp, {"op": op, "values": values}) def call( func: Pattern = _any, args: Pattern = _any, keywords: Pattern = _any ) -> Pattern: return type_and_attributes( ast.Call, {"func": func, "args": args, "keywords": keywords} ) def call_to(id: Pattern = _any, args: Pattern = _any) -> Pattern: return call(func=match_any(attribute(attr=id), name(id=id)), args=args) def id_from_call(c: ast.Call) -> str: f = c.func if isinstance(f, ast.Name): return f.id if isinstance(f, ast.Attribute): return f.attr raise ValueError("Unexpected argument to id_from_call") def compare(left: Pattern = _any, ops: Pattern = _any, comparators: Pattern = _any): return type_and_attributes( ast.Compare, {"left": left, "ops": ops, "comparators": comparators} ) def binary_compare(op: Pattern = _any): return type_and_attributes( ast.Compare, {"left": _any, "ops": [op], "comparators": [_any]} ) def equal(left: Pattern = _any, right: Pattern = _any): return type_and_attributes( ast.Compare, {"left": left, "ops": [ast.Eq], "comparators": [right]} ) def not_equal(left: Pattern = _any, right: Pattern = _any): return type_and_attributes( ast.Compare, {"left": left, "ops": [ast.NotEq], "comparators": [right]} ) def greater_than(left: Pattern = _any, right: Pattern = _any): return type_and_attributes( ast.Compare, {"left": left, "ops": [ast.Gt], "comparators": [right]} ) def greater_than_equal(left: Pattern = _any, right: Pattern = _any): return type_and_attributes( ast.Compare, {"left": left, "ops": [ast.GtE], "comparators": [right]} ) def less_than(left: Pattern = _any, right: Pattern = _any): return type_and_attributes( ast.Compare, {"left": left, "ops": [ast.Lt], "comparators": [right]} ) def less_than_equal(left: Pattern = _any, right: Pattern = _any): return type_and_attributes( ast.Compare, {"left": left, "ops": [ast.LtE], "comparators": [right]} ) def expr(value: Pattern = _any) -> Pattern: return type_and_attributes(ast.Expr, {"value": value}) def ast_while( test: Pattern = _any, body: Pattern = _any, orelse: Pattern = _any ) -> Pattern: return type_and_attributes( ast.While, {"test": test, "body": body, "orelse": orelse} ) def ast_generator(elt: Pattern = _any, generators: Pattern = _any) -> Pattern: return type_and_attributes(ast.GeneratorExp, {"elt": elt, "generators": generators}) def ast_listComp(elt: Pattern = _any, generators: Pattern = _any) -> Pattern: return type_and_attributes(ast.ListComp, {"elt": elt, "generators": generators}) def ast_setComp(elt: Pattern = _any, generators: Pattern = _any) -> Pattern: return type_and_attributes(ast.SetComp, {"elt": elt, "generators": generators}) def ast_dictComp( key: Pattern = _any, value: Pattern = _any, generators: Pattern = _any ) -> Pattern: return type_and_attributes( ast.DictComp, {"key": key, "value": value, "generators": generators} ) def ast_boolop(op: Pattern = _any, values: Pattern = _any) -> Pattern: return type_and_attributes(ast.BoolOp, {"op": op, "values": values}) def ast_compare( left: Pattern = _any, ops: Pattern = _any, comparators: Pattern = _any ) -> Pattern: return type_and_attributes( ast.Compare, {"left": left, "ops": ops, "comparators": comparators} ) def ast_for( target: Pattern = _any, iter: Pattern = _any, body: Pattern = _any, orelse: Pattern = _any, ) -> Pattern: return type_and_attributes( ast.For, {"target": target, "iter": iter, "body": body, "orelse": orelse} ) def function_def( name: Pattern = _any, args: Pattern = _any, body: Pattern = _any, decorator_list: Pattern = _any, returns: Pattern = _any, ) -> Pattern: return type_and_attributes( ast.FunctionDef, { "name": name, "args": args, "body": body, "decorator_list": decorator_list, "returns": returns, }, ) def if_exp( test: Pattern = _any, body: Pattern = _any, orelse: Pattern = _any ) -> Pattern: return type_and_attributes( ast.IfExp, {"test": test, "body": body, "orelse": orelse} ) def if_statement( test: Pattern = _any, body: Pattern = _any, orelse: Pattern = _any ) -> Pattern: return type_and_attributes(ast.If, {"test": test, "body": body, "orelse": orelse}) # Note: The following pattern definition is valid only for Python # versions less than 3.9. As a result, it is followed by a # version-dependent redefinition def _index(value: Pattern = _any) -> Pattern: return type_and_attributes(ast.Index, {"value": value}) def index(value: Pattern = _any): if _python_3_9_or_later: return match_every(value, negate(slice_pattern())) else: return _index(value=value) # The following definition should not be necessary in 3.9 # since ast.Index should be identity in this version. It is # nevertheless included for clarity. def ast_index(value, **other): if _python_3_9_or_later: return value else: return ast.Index(value=value, **other) def get_value(slice_field): if _python_3_9_or_later: return slice_field else: return slice_field.value def slice_pattern( lower: Pattern = _any, upper: Pattern = _any, step: Pattern = _any ) -> Pattern: return type_and_attributes( ast.Slice, {"lower": lower, "upper": upper, "step": step} ) def keyword(arg: Pattern = _any, value: Pattern = _any) -> Pattern: return type_and_attributes(ast.keyword, {"arg": arg, "value": value}) def ast_list(elts: Pattern = _any, ctx: Pattern = _any, ast_op=ast.List) -> Pattern: return type_and_attributes(ast_op, {"elts": elts, "ctx": ctx}) def ast_luple(elts: Pattern = _any, ctx: Pattern = _any) -> Pattern: return match_any( type_and_attributes(ast.List, {"elts": elts, "ctx": ctx}), type_and_attributes(ast.Tuple, {"elts": elts, "ctx": ctx}), ) def ast_dict(keys: Pattern = _any, values: Pattern = _any) -> Pattern: return type_and_attributes(ast.Dict, {"keys": keys, "values": values}) def module(body: Pattern = _any) -> Pattern: return type_and_attributes(ast.Module, {"body": body}) def name(id: Pattern = _any, ctx: Pattern = _any) -> Pattern: return type_and_attributes(ast.Name, {"id": id, "ctx": ctx}) def name_constant(value: Pattern = _any) -> Pattern: return type_and_attributes(ast.NameConstant, {"value": value}) def num(n: Pattern = _any) -> Pattern: return type_and_attributes(ast.Num, {"n": n}) def ast_str(s: Pattern = _any) -> Pattern: return type_and_attributes(ast.Str, {"s": s}) def subscript( value: Pattern = _any, slice: Pattern = _any, ctx: Pattern = _any ) -> Pattern: return type_and_attributes( ast.Subscript, {"value": value, "slice": slice, "ctx": ctx} ) def ast_return(value: Pattern = _any) -> Pattern: return type_and_attributes(ast.Return, {"value": value}) def ast_if( test: Pattern = _any, body: Pattern = _any, orelse: Pattern = _any ) -> Pattern: return type_and_attributes(ast.If, {"test": test, "body": body, "orelse": orelse}) def unaryop(op: Pattern = _any, operand: Pattern = _any) -> Pattern: return type_and_attributes(ast.UnaryOp, {"op": op, "operand": operand}) def unarysub(operand: Pattern = _any) -> Pattern: return unaryop(op=ast.USub, operand=operand) zero: Pattern = match_any(num(0), num(0.0)) number_constant: Pattern = ast.Num non_zero_num: Pattern = match_every(number_constant, negate(zero)) negative_num: Pattern = match_every( number_constant, PredicatePattern(lambda n: n.n < 0) ) ast_true: Pattern = name_constant(True) ast_false: Pattern = name_constant(False) constant_bool: Pattern = match_any(ast_true, ast_false) constant_literal: Pattern = match_any(number_constant, constant_bool) any_list: Pattern = ast.List
beanmachine-main
src/beanmachine/ppl/compiler/ast_patterns.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, List import beanmachine.ppl.compiler.bmg_nodes as bn from torch import Tensor def _tensor_to_label(t: Tensor) -> str: length = len(t.shape) if length == 0: return str(t.item()) comma = "," if length == 1 else ",\\n" return "[" + comma.join(_tensor_to_label(c) for c in t) + "]" def _tensor_val(node: bn.ConstantTensorNode) -> str: return _tensor_to_label(node.value) def _val(node: bn.ConstantNode) -> str: if isinstance(node.value, Tensor): return _tensor_to_label(node.value) return str(node.value) # These are the labels used when rendering a graph as a DOT. _node_labels = { bn.AdditionNode: "+", bn.BernoulliLogitNode: "Bernoulli(logits)", bn.BernoulliNode: "Bernoulli", bn.BetaNode: "Beta", bn.BinomialNode: "Binomial", bn.BinomialLogitNode: "Binomial(logits)", bn.BitAndNode: "&", bn.BitOrNode: "|", bn.BitXorNode: "^", bn.BooleanNode: _val, bn.BroadcastNode: "Broadcast", bn.CategoricalLogitNode: "Categorical(logits)", bn.CategoricalNode: "Categorical", bn.Chi2Node: "Chi2", bn.ChoiceNode: "Choice", bn.CholeskyNode: "Cholesky", bn.ColumnIndexNode: "ColumnIndex", bn.ComplementNode: "complement", bn.ConstantBooleanMatrixNode: _tensor_val, bn.ConstantNaturalMatrixNode: _tensor_val, bn.ConstantNegativeRealMatrixNode: _tensor_val, bn.ConstantPositiveRealMatrixNode: _tensor_val, bn.ConstantProbabilityMatrixNode: _tensor_val, bn.ConstantRealMatrixNode: _tensor_val, bn.ConstantSimplexMatrixNode: _tensor_val, bn.ConstantTensorNode: _tensor_val, bn.DirichletNode: "Dirichlet", bn.DivisionNode: "/", bn.ElementwiseMultiplyNode: "ElementwiseMult", bn.EqualNode: "==", bn.ExpM1Node: "ExpM1", bn.ExpNode: "Exp", bn.Exp2Node: "Exp2", bn.ExpProductFactorNode: "ExpProduct", bn.FlatNode: "Flat", bn.FloorDivNode: "//", bn.FillMatrixNode: "FillMatrix", bn.GammaNode: "Gamma", bn.GreaterThanEqualNode: ">=", bn.GreaterThanNode: ">", bn.HalfCauchyNode: "HalfCauchy", bn.IfThenElseNode: "if", bn.InNode: "In", bn.IndexNode: "index", bn.InvertNode: "Invert", bn.IsNode: "Is", bn.IsNotNode: "IsNot", bn.ItemNode: "Item", bn.LessThanEqualNode: "<=", bn.LessThanNode: "<", bn.LKJCholeskyNode: "LKJCholesky", bn.Log1mexpNode: "Log1mexp", bn.LogisticNode: "Logistic", bn.LogNode: "Log", bn.Log10Node: "Log10", bn.Log1pNode: "Log1p", bn.Log2Node: "Log2", bn.LogProbNode: "LogProb", bn.LogSumExpNode: "LogSumExp", bn.LogSumExpTorchNode: "LogSumExp", bn.LogSumExpVectorNode: "LogSumExp", bn.LogAddExpNode: "LogAddExp", bn.LShiftNode: "<<", bn.MatrixAddNode: "MatrixAdd", bn.MatrixComplementNode: "MatrixComplement", bn.MatrixExpNode: "MatrixExp", bn.MatrixLogNode: "MatrixLog", bn.MatrixLog1mexpNode: "MatrixLog1mexp", bn.MatrixMultiplicationNode: "@", bn.MatrixNegateNode: "MatrixNegate", bn.MatrixPhiNode: "MatrixPhi", bn.MatrixScaleNode: "MatrixScale", bn.MatrixSumNode: "MatrixSum", bn.ModNode: "%", bn.MultiplicationNode: "*", bn.NaturalNode: _val, bn.NegateNode: "-", bn.NegativeRealNode: _val, bn.NormalNode: "Normal", bn.HalfNormalNode: "HalfNormal", bn.PoissonNode: "Poisson", bn.NotEqualNode: "!=", bn.NotInNode: "NotIn", bn.NotNode: "not", bn.Observation: lambda n: f"Observation {str(n.value)}", bn.PhiNode: "Phi", bn.PositiveRealNode: _val, bn.PowerNode: "**", bn.ProbabilityNode: _val, bn.Query: "Query", bn.RealNode: _val, bn.RShiftNode: ">>", bn.SampleNode: "Sample", bn.SquareRootNode: "Sqrt", bn.StudentTNode: "StudentT", bn.SumNode: "Sum", bn.SwitchNode: "Switch", bn.TensorNode: "Tensor", bn.ToIntNode: "ToInt", bn.ToMatrixNode: "ToMatrix", bn.ToNegativeRealNode: "ToNegReal", bn.ToNegativeRealMatrixNode: "ToNegRealMatrix", bn.ToPositiveRealMatrixNode: "ToPosRealMatrix", bn.ToPositiveRealNode: "ToPosReal", bn.ToProbabilityNode: "ToProb", bn.ToRealMatrixNode: "ToRealMatrix", bn.ToRealNode: "ToReal", bn.UniformNode: "Uniform", bn.UntypedConstantNode: _val, bn.VectorIndexNode: "index", bn.TransposeNode: "Transpose", } # These are the labels used when describing a node in an error message. _node_error_labels = { bn.AdditionNode: "addition (+)", bn.BernoulliLogitNode: "Bernoulli", bn.BernoulliNode: "Bernoulli", bn.BetaNode: "beta", bn.BinomialNode: "binomial", bn.BinomialLogitNode: "binomial", bn.BitAndNode: "'bitwise and' (&)", bn.BitOrNode: "'bitwise or' (|)", bn.BitXorNode: "'bitwise xor' (^)", bn.BooleanNode: "Boolean value", bn.BroadcastNode: "broadcast", bn.CategoricalLogitNode: "categorical", bn.CategoricalNode: "categorical", bn.Chi2Node: "chi-squared", bn.ChoiceNode: "choice", bn.CholeskyNode: "Cholesky", bn.ColumnIndexNode: "column index", bn.ComplementNode: "complement", bn.ConstantBooleanMatrixNode: "Boolean matrix", bn.ConstantNaturalMatrixNode: "natural matrix", bn.ConstantNegativeRealMatrixNode: "negative real matrix", bn.ConstantPositiveRealMatrixNode: "positive real matrix", bn.ConstantProbabilityMatrixNode: "probability matrix", bn.ConstantRealMatrixNode: "real matrix", bn.ConstantSimplexMatrixNode: "simplex", bn.ConstantTensorNode: "tensor", bn.DirichletNode: "Dirichlet", bn.DivisionNode: "division (/)", bn.EqualNode: "equality (==)", bn.ExpM1Node: "expm1", bn.ExpNode: "exp", bn.Exp2Node: "exp2", bn.ExpProductFactorNode: "exp product factor", bn.FillMatrixNode: "fill matrix", bn.FlatNode: "flat", bn.FloorDivNode: "floor division (//)", bn.GammaNode: "gamma", bn.GreaterThanEqualNode: "'greater than or equal' (>=)", bn.GreaterThanNode: "'greater than' (>)", bn.HalfCauchyNode: "half Cauchy", bn.IfThenElseNode: "'if'", bn.InNode: "'in'", bn.IndexNode: "index", bn.InvertNode: "'bitwise invert' (~)", bn.IsNode: "'is'", bn.IsNotNode: "'is not'", bn.ItemNode: "item", bn.LessThanEqualNode: "'less than or equal' (<=)", bn.LessThanNode: "'less than' (<)", bn.LKJCholeskyNode: "LKJ Cholesky", bn.Log1mexpNode: "log1mexp", bn.LogisticNode: "logistic", bn.LogProbNode: "log_prob", bn.LogNode: "log", bn.Log10Node: "log10", bn.Log1pNode: "log1p", bn.Log2Node: "log2", bn.LogSumExpNode: "logsumexp", bn.LogSumExpTorchNode: "logsumexp", bn.LogSumExpVectorNode: "logsumexp", bn.LogAddExpNode: "logaddexp", bn.LShiftNode: "'left shift' (<<)", bn.MatrixAddNode: "matrix add", bn.MatrixMultiplicationNode: "matrix multiplication (@)", bn.MatrixScaleNode: "matrix scale", bn.ModNode: "modulus (%)", bn.MultiplicationNode: "multiplication (*)", bn.NaturalNode: "natural value", bn.NegateNode: "negation (-)", bn.NegativeRealNode: "negative real value", bn.NormalNode: "normal", bn.HalfNormalNode: "half normal", bn.PoissonNode: "Poisson", bn.NotEqualNode: "inequality (!=)", bn.NotInNode: "'not in'", bn.NotNode: "'not'", bn.Observation: "observation", bn.PhiNode: "phi", bn.PositiveRealNode: "positive real value", bn.PowerNode: "power (**)", bn.ProbabilityNode: "probability value", bn.Query: "query", bn.RealNode: "real value", bn.RShiftNode: "'right shift' (>>)", bn.SampleNode: "sample", bn.SquareRootNode: "square root", bn.StudentTNode: "student T", bn.SumNode: "sum", bn.SwitchNode: "switch", bn.TensorNode: "tensor", bn.ToIntNode: "'to int'", bn.ToMatrixNode: "'to matrix'", bn.ToNegativeRealNode: "'to negative real'", bn.ToNegativeRealMatrixNode: "'to negative real matrix'", bn.ToPositiveRealMatrixNode: "'to positive real matrix'", bn.ToPositiveRealNode: "'to positive real'", bn.ToProbabilityNode: "'to probability'", bn.ToRealMatrixNode: "'to real matrix'", bn.ToRealNode: "'to real'", bn.UniformNode: "uniform", bn.UntypedConstantNode: "constant value", bn.VectorIndexNode: "index", bn.TransposeNode: "transpose", } _none = [] _left_right = ["left", "right"] _operand = ["operand"] _probability = ["probability"] def _numbers(n: int) -> List[str]: return [str(x) for x in range(n)] def _numbered_or_left_right(node: bn.BMGNode) -> List[str]: if len(node.inputs) == 2: return _left_right return _numbers(len(node.inputs)) def _prefix_numbered(prefix: List[str]) -> Callable: return lambda node: prefix + _numbers(len(node.inputs) - len(prefix)) _edge_labels = { bn.AdditionNode: _numbered_or_left_right, bn.BernoulliLogitNode: _probability, bn.BernoulliNode: _probability, bn.BetaNode: ["alpha", "beta"], bn.BinomialNode: ["count", "probability"], bn.BinomialLogitNode: ["count", "probability"], bn.BooleanNode: _none, bn.BroadcastNode: ["value", "rows", "columns"], bn.CategoricalNode: _probability, bn.Chi2Node: ["df"], bn.ChoiceNode: _prefix_numbered(["condition"]), bn.CholeskyNode: _operand, bn.ColumnIndexNode: _left_right, bn.ComplementNode: _operand, bn.ConstantBooleanMatrixNode: _none, bn.ConstantNaturalMatrixNode: _none, bn.ConstantNegativeRealMatrixNode: _none, bn.ConstantPositiveRealMatrixNode: _none, bn.ConstantProbabilityMatrixNode: _none, bn.ConstantRealMatrixNode: _none, bn.ConstantSimplexMatrixNode: _none, bn.ConstantTensorNode: _none, bn.DirichletNode: ["concentration"], bn.DivisionNode: _left_right, bn.ElementwiseMultiplyNode: _left_right, bn.EqualNode: _left_right, bn.ExpM1Node: _operand, bn.ExpNode: _operand, bn.Exp2Node: _operand, bn.ExpProductFactorNode: _numbered_or_left_right, bn.FlatNode: _none, bn.FillMatrixNode: ["value", "rows", "columns"], bn.GammaNode: ["concentration", "rate"], bn.GreaterThanEqualNode: _left_right, bn.GreaterThanNode: _left_right, bn.HalfCauchyNode: ["scale"], bn.IfThenElseNode: ["condition", "consequence", "alternative"], bn.IndexNode: _left_right, bn.LessThanEqualNode: _left_right, bn.LessThanNode: _left_right, bn.Log1mexpNode: _operand, bn.LogisticNode: _operand, bn.LogNode: _operand, bn.Log10Node: _operand, bn.Log1pNode: _operand, bn.Log2Node: _operand, bn.LogProbNode: ["distribution", "value"], bn.LogSumExpNode: _numbered_or_left_right, bn.LogSumExpTorchNode: ["operand", "dim", "keepdim"], bn.LogSumExpVectorNode: _operand, bn.LogAddExpNode: _left_right, bn.SwitchNode: _numbered_or_left_right, bn.MatrixAddNode: _left_right, bn.MatrixComplementNode: _operand, bn.MatrixExpNode: _operand, bn.MatrixMultiplicationNode: _left_right, bn.MatrixLogNode: _operand, bn.MatrixLog1mexpNode: _operand, bn.MatrixNegateNode: _operand, bn.MatrixPhiNode: _operand, bn.MatrixScaleNode: _numbered_or_left_right, bn.MatrixSumNode: _operand, bn.MultiplicationNode: _numbered_or_left_right, bn.NaturalNode: _none, bn.NegateNode: _operand, bn.NegativeRealNode: _none, bn.NormalNode: ["mu", "sigma"], bn.HalfNormalNode: ["sigma"], bn.PoissonNode: ["rate"], bn.NotEqualNode: _left_right, bn.NotNode: _operand, bn.Observation: _operand, bn.PhiNode: _operand, bn.PositiveRealNode: _none, bn.PowerNode: _left_right, bn.ProbabilityNode: _none, bn.Query: ["operator"], bn.RealNode: _none, bn.SampleNode: _operand, bn.SquareRootNode: _operand, bn.StudentTNode: ["df", "loc", "scale"], bn.SumNode: _operand, bn.TensorNode: _numbered_or_left_right, bn.ToIntNode: _operand, bn.ToMatrixNode: _prefix_numbered(["rows", "columns"]), bn.ToNegativeRealNode: _operand, bn.ToPositiveRealMatrixNode: _operand, bn.ToPositiveRealNode: _operand, bn.ToProbabilityNode: _operand, bn.ToRealMatrixNode: _operand, bn.ToRealNode: _operand, bn.UniformNode: ["low", "high"], bn.VectorIndexNode: _left_right, bn.TransposeNode: _operand, } def get_node_label(node: bn.BMGNode) -> str: label = _node_labels.get(type(node), "UNKNOWN") # pyre-ignore if isinstance(label, str): return label assert isinstance(label, Callable) return label(node) def get_node_error_label(node: bn.BMGNode) -> str: return _node_error_labels.get(type(node), "UNKNOWN") # pyre-ignore def get_edge_labels(node: bn.BMGNode) -> List[str]: t = type(node) if t not in _edge_labels: return ["UNKNOWN"] * len(node.inputs) labels = _edge_labels[t] if isinstance(labels, list): result = labels else: assert isinstance(labels, Callable) result = labels(node) assert isinstance(result, list) and len(result) == len(node.inputs) return result def get_edge_label(node: bn.BMGNode, i: int) -> str: t = type(node) if t not in _edge_labels: return "UNKNOWN" labels = _edge_labels[t] if isinstance(labels, list): return labels[i] assert isinstance(labels, Callable) return labels(node)[i]
beanmachine-main
src/beanmachine/ppl/compiler/graph_labels.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Tuple import beanmachine.ppl.compiler.bmg_nodes as bn from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.fix_problem import ( Inapplicable, NodeFixer, NodeFixerResult, ) def _theta_is_beta_with_real_params(n: bn.SampleNode) -> bool: # TODO: For now we support conjugate prior transformation on # priors with constant parameter values. beta_node = n.inputs[0] if not isinstance(beta_node, bn.BetaNode): return False alpha = beta_node.inputs[0] beta = beta_node.inputs[1] return isinstance(alpha, bn.ConstantNode) and isinstance(beta, bn.ConstantNode) def _theta_is_queried(n: bn.SampleNode) -> bool: # TODO: This check can be removed if it is not a necessary condition. return any(isinstance(i, bn.Query) for i in n.outputs.items) def _sample_contains_obs(n: bn.SampleNode) -> bool: return any(isinstance(o, bn.Observation) for o in n.outputs.items) def _get_likelihood_obs_samples( n: bn.BMGNode, ) -> Tuple[List[bn.Observation], List[bn.SampleNode]]: obs = [] samples = [] for o in n.outputs.items: if isinstance(o, bn.SampleNode) and _sample_contains_obs(o): obs.append(next(iter(o.outputs.items.keys()))) samples.append(o) return obs, samples def _liklihood_is_observed(n: bn.BMGNode) -> bool: return any(_sample_contains_obs(i) for i in n.outputs.items) def beta_bernoulli_conjugate_fixer(bmg: BMGraphBuilder) -> NodeFixer: """This fixer transforms graphs with Bernoulli likelihood and Beta prior. Since this is a conjugate pair, we analytically update the prior parameters Beta(alpha, beta) using observations to get the posterior parameters Beta(alpha', beta'). Once we update the parameters, we delete the observed samples from the graph. This greatly decreases the number of nodes, the number of edges in the graph, and the Bayesian update is reduced to parameter update which can lead to performance wins during inference.""" def fixer(n: bn.BMGNode) -> NodeFixerResult: # A graph is beta-bernoulli conjugate fixable if: # # There is a bernoulli node with theta that is sampled # from a beta distribution. Further, the beta is queried and # the bernoulli node has n observations. # # That is we are looking for stuff like: # # alpha beta # \ / # Beta # | # Sample # / \ # Bernoulli Query # | # Sample # | \ # Observation True ... # # to turn it into # # alpha' beta' # \ / # Beta # | # Sample # | # Query if not isinstance(n, bn.BernoulliNode): return Inapplicable beta_sample = n.inputs[0] if not ( isinstance(beta_sample, bn.SampleNode) and _theta_is_beta_with_real_params(beta_sample) and _theta_is_queried(beta_sample) and _liklihood_is_observed(n) ): return Inapplicable beta_node = beta_sample.inputs[0] assert isinstance(beta_node, bn.BetaNode) obs, samples_to_remove = _get_likelihood_obs_samples(n) alpha = beta_node.inputs[0] assert isinstance(alpha, bn.ConstantNode) obs_sum = sum(o.value for o in obs) transformed_alpha = bmg.add_pos_real(alpha.value + obs_sum) beta = beta_node.inputs[1] assert isinstance(beta, bn.ConstantNode) # Update: beta' = beta + n - obs_sum transformed_beta = bmg.add_pos_real(beta.value + len(obs) - obs_sum) beta_node.inputs[0] = transformed_alpha beta_node.inputs[1] = transformed_beta # We need to remove both the sample and the observation node. for o in obs: bmg.remove_leaf(o) for s in samples_to_remove: if len(s.outputs.items) == 0: bmg.remove_node(s) return n return fixer def beta_binomial_conjugate_fixer(bmg: BMGraphBuilder) -> NodeFixer: """This fixer transforms graphs with Binomial likelihood and Beta prior. Since this is a conjugate pair, we analytically update the prior parameters Beta(alpha, beta) using observations to get the posterior parameters Beta(alpha', beta'). Once we update the parameters, we delete the observed samples from the graph. This greatly decreases the number of nodes, the number of edges in the graph, and the Bayesian update is reduced to parameter update which can lead to performance wins during inference.""" def fixer(n: bn.BMGNode) -> NodeFixerResult: # A graph is beta-binomial conjugate fixable if: # # There is a binomial node with theta that is sampled # from a beta distribution. Further, the beta is queried and # the binomial node has n observations. # # That is we are looking for stuff like: # # alpha beta # \ / # Beta # | # Count Sample # \ / \ # Binomial Query # | # Sample # | \ # Observation 3.0 ... # # to turn it into # # alpha' beta' # \ / # Beta # | # Sample # | # Query if not isinstance(n, bn.BinomialNode): return Inapplicable beta_sample = n.inputs[1] if not ( isinstance(beta_sample, bn.SampleNode) and _theta_is_beta_with_real_params(beta_sample) and _theta_is_queried(beta_sample) and _liklihood_is_observed(n) ): return Inapplicable count = n.inputs[0] assert isinstance(count, bn.UntypedConstantNode) beta_node = beta_sample.inputs[0] assert isinstance(beta_node, bn.BetaNode) obs, samples_to_remove = _get_likelihood_obs_samples(n) alpha = beta_node.inputs[0] assert isinstance(alpha, bn.ConstantNode) obs_sum = sum(o.value for o in obs) transformed_alpha = bmg.add_pos_real(alpha.value + obs_sum) # Update: beta' = beta + sum count - obs_sum beta = beta_node.inputs[1] assert isinstance(beta, bn.ConstantNode) updated_count = len(obs) * count.value transformed_beta = bmg.add_pos_real(beta.value + updated_count - obs_sum) beta_node.inputs[0] = transformed_alpha beta_node.inputs[1] = transformed_beta # We need to remove both the sample and the observation node. for o in obs: bmg.remove_leaf(o) for s in samples_to_remove: if len(s.outputs.items) == 0: bmg.remove_node(s) return n return fixer
beanmachine-main
src/beanmachine/ppl/compiler/fix_beta_conjugate_prior.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Once we have accumulated a graph we often need to associate type information # with some or all of the nodes. This presents a number of practical difficulties: # # * The information we wish to associated with a node varies depending on what we # are doing with the graph; knowing the BMG type associated with the node might # not be relevant for all possible target languages. We therefore wish the type # computation logic to admit a variety of different kinds of type information, # without excessively duplicating the traversal and update logic described below. # # * The type of a given node might depend on the types of all ancestors of that # node. However, the set of ancestors of a node might be large, and we might # wish to obtain the types of a large number of nodes. We therefore wish to # cache as much information as possible to avoid needless recomputation. # # * The shortest path of edges between a node and a given ancestor might be very # long; longer than the default recursion limit of Python. We therefore require # that every algorithm which traverses the graph to compute types be iterative # rather than recursive. # # * A graph is mutable; we may be using type information to motivate mutations. But # since a graph node's type may depend on the type of all its ancestors, when we # mutate a node we might have to recompute the types of all its descendents. # We wish to do this efficiently, and again, without any recursion. # # This abstract base class implements three public operations: # # * __getitem__(node)->T allows you to use the [] accessor to obtain type information # about a node. If the type is already cached then it is returned; if not, then the # types of all ancestors are computed (if not cached), the node type is computed, # cached, and returned. # # * __contains__(node)->bool allows you to use the "in" operator to determine if a node's # type information has already been cached. (TODO: Is this useful? Maybe remove it.) # # * update_type(node)->None informs the type cache that a node has been updated. It # recomputes the node's type and efficiently propagates the change to the descendents. # # All a typer needs to do is: # # * Derive from TyperBase # * Implement _compute_type_inputs_known to compute the type of a single node. # # _compute_type_inputs_known will be called when the type of a node needs to be recomputed. # It will only be called if the types of all input nodes are known; since typing the inputs # required typing *their* inputs, we know that all ancestor nodes are typed. from abc import ABC, abstractmethod from queue import Queue from typing import Dict, Generic, TypeVar import beanmachine.ppl.compiler.bmg_nodes as bn T = TypeVar("T") class TyperBase(ABC, Generic[T]): _nodes: Dict[bn.BMGNode, T] def __init__(self) -> None: self._nodes = {} def __getitem__(self, node: bn.BMGNode) -> T: # If node is already typed, give its type. # If not, type it and its inputs if necessary. if node not in self._nodes: self._update_node_inputs_not_known(node) self._propagate_update_to_outputs(node) assert node in self._nodes return self._nodes[node] def __contains__(self, node: bn.BMGNode) -> bool: return node in self._nodes def _inputs_known(self, node: bn.BMGNode) -> bool: return all(i in self._nodes for i in node.inputs) def update_type(self, node: bn.BMGNode) -> None: # Preconditions: # # * The node's type might already be known, but might now be wrong. # * The types of the inputs of the node might be missing. # * All node output types are either not known, because they # are not relevant, or are known but might now be incorrect. # # Postconditions: # # * If the node's type was not previously known, it still is not. # Otherwise: # * All node input types are known # * Node type is correct # * Any changes caused by node type being updated have been # propagated to its relevant outputs. # # If no one previously wanted the type of this node, then there's # no need to compute it now, and there are no typed descendants # that need updating. Just ignore it, and when someone wants the # type, we can compute it then. if node not in self._nodes: return # We have been asked to update the type of a node, presumably # because its inputs have been edited. Those inputs might not # have been typed in the initial traversal of the graph because # they might be new nodes. Therefore our first task is to # determine the types of those inputs and the new type of this # node. current_type = self._nodes[node] self._update_node_inputs_not_known(node) new_type = self._nodes[node] # We have now computed types for all the previously unknown # input types, if there were any, and we have the previous # and current type of this node. If the type of this node # changed then the type analysis might be wrong for some # of its outputs. Propagate the change to outputs, and # then to their outputs, and so on. if current_type != new_type: self._propagate_update_to_outputs(node) def _propagate_update_to_outputs(self, node: bn.BMGNode) -> None: # We've either just typed node for the first time, or its type # has just changed. That means that the types of its outputs # might have also changed. # # This propagation should be breadth-first. That is, we should # propagate the change to all the outputs, and then all their # outputs, and so on. # # Note that it is possible that a node has an output which is # not typed; there might be a branch of the graph which is not # an ancestor of any query, observation, sample or factor. # We can skip propagating types to such nodes since they are # irrelevant for generating the graph. # # We require that this algorithm, like all algorithms that traverse the # graph, be non-recursive. work = Queue() for o in node.outputs.items: if o in self._nodes: work.put(o) while work.qsize() > 0: cur = work.get() assert cur in self._nodes current_type = self[cur] assert self._inputs_known(cur) new_type = self._compute_type_inputs_known(cur) self._nodes[cur] = new_type if current_type == new_type: continue for o in cur.outputs.items: if o in self._nodes: work.put(o) def _update_node_inputs_not_known(self, node: bn.BMGNode) -> None: # Preconditions: # # * The node is not necessarily already added. # * Inputs are not necessarily already added, and similarly with # their inputs and so on. # # Postconditions: # # * Transitive closure of untyped inputs is added. # * Node is added. # # We require that this algorithm, like all algorithms that traverse the # graph, be non-recursive. if node in self._nodes: del self._nodes[node] work = [node] while len(work) > 0: cur = work.pop() # It is possible that we got the same input in the work # stack twice, so this one might already be typed. Just # skip it. if cur in self._nodes: continue # We must ensure that inputs are all known. If there are any # that are not known, then put the current node back on the # work stack and we will come back to it after the inputs are # all processed. if self._inputs_known(cur): self._nodes[cur] = self._compute_type_inputs_known(cur) else: work.append(cur) for i in cur.inputs: if i not in self._nodes: work.append(i) assert self._inputs_known(node) assert node in self._nodes @abstractmethod def _compute_type_inputs_known(self, node: bn.BMGNode) -> T: pass
beanmachine-main
src/beanmachine/ppl/compiler/typer_base.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import time from typing import Dict, List, Optional accumulate = "accumulate" infer = "infer" fix_problems = "fix_problems" graph_infer = "graph_infer" build_bmg_graph = "build_bmg_graph" transpose_samples = "transpose_samples" build_mcsamples = "build_mcsamples" deserialize_perf_report = "deserialize_perf_report" class Event: begin: bool kind: str timestamp: int def __init__(self, begin: bool, kind: str, timestamp: int) -> None: self.begin = begin self.kind = kind self.timestamp = timestamp def __str__(self) -> str: s = "begin" if self.begin else "finish" return f"{s} {self.kind} {self.timestamp}" class ProfileReport: calls: int total_time: int children: Dict[str, "ProfileReport"] parent: Optional["ProfileReport"] def __init__(self) -> None: self.calls = 0 self.total_time = 0 self.children = {} self.parent = None def _to_string(self, indent: str) -> str: s = "" attributed = 0 # TODO: Sort by total time of children - WARNING: Important not to sort by # runtime, as this leaks timing non-determinsm into report structure. # TODO: compute unattributed via property for key, value in self.children.items(): s += f"{indent}{key}:({value.calls}) {value.total_time // 1000000} ms\n" s += value._to_string(indent + " ") attributed += value.total_time if self.parent is None: s += "Total time: " + str(attributed // 1000000) + " ms\n" elif len(self.children) > 0: # and self.total_time > 0: unattributed = self.total_time - attributed s += f"{indent}unattributed: {abs(unattributed // 1000000)} ms\n" return s def __str__(self) -> str: return self._to_string("") class ProfilerData: events: List[Event] in_flight: List[Event] def __init__(self) -> None: self.events = [] self.in_flight = [] def begin(self, kind: str, timestamp: Optional[int] = None) -> None: t = time.time_ns() if timestamp is None else timestamp e = Event(True, kind, t) self.events.append(e) self.in_flight.append(e) def finish(self, kind: str, timestamp: Optional[int] = None) -> None: t = time.time_ns() if timestamp is None else timestamp while len(self.in_flight) > 0: top = self.in_flight.pop() e = Event(False, top.kind, t) self.events.append(e) if top.kind == kind: break def __str__(self) -> str: return "\n".join(str(e) for e in self.events) def time_in(self, kind: str) -> int: total_time = 0 nesting = 0 outermost_begin = None for e in self.events: if e.kind != kind: continue if nesting == 0 and e.begin: # We've found an outermost begin event. outermost_begin = e nesting = 1 elif nesting == 1 and not e.begin: # We've found an outermost finish event nesting = 0 assert isinstance(outermost_begin, Event) total_time += e.timestamp - outermost_begin.timestamp outermost_begin = None elif nesting > 0: # We've found a nested begin or finish if e.begin: nesting += 1 else: nesting -= 1 return total_time def to_report(self) -> ProfileReport: return event_list_to_report(self.events) def event_list_to_report(events) -> ProfileReport: root = ProfileReport() current = root begins = [] for e in events: if e.begin: if e.kind in current.children: p = current.children[e.kind] else: p = ProfileReport() p.parent = current current.children[e.kind] = p setattr(current, e.kind, p) p.calls += 1 current = p begins.append(e) else: assert len(begins) > 0 b = begins[-1] assert e.kind == b.kind current.total_time += e.timestamp - b.timestamp begins.pop() current = current.parent assert len(begins) == 0 assert current == root return root
beanmachine-main
src/beanmachine/ppl/compiler/profiler.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Any, Callable, Dict, List, Optional import beanmachine.ppl.compiler.bmg_nodes as bn import beanmachine.ppl.compiler.bmg_types as bt import beanmachine.ppl.compiler.profiler as prof import numpy as np import torch import torch.distributions as dist from beanmachine.ppl.compiler.bmg_nodes import BMGNode, ConstantNode from beanmachine.ppl.compiler.execution_context import ExecutionContext from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.utils.memoize import memoize _standard_normal = dist.Normal(0.0, 1.0) def phi(x: Any) -> Any: return _standard_normal.cdf(x) supported_bool_types = {bool, np.bool_} supported_float_types = {np.longdouble, np.float16, np.float32, np.float64, float} supported_int_types = {np.int16, np.int32, np.int64, np.int8, np.longlong} supported_int_types |= {np.uint16, np.uint32, np.uint64, np.uint8, np.ulonglong, int} supported_tensor_types = {torch.Tensor, np.ndarray} _empty_context = ExecutionContext() class BMGraphBuilder: # #### # #### State and initialization # #### # We keep a list of all the nodes in the graph and associate a unique # integer with each. # TODO: The original idea was to use these integers when generating code # that constructs the graph, or DOT files that display the graph. # However, the integer generated is ordered according to when the node # was created, which is not necessarily the order in which we would # like to enumerate them for code generation purposes. # # We have therefore changed the code generation process to do a deterministic # topological sort of the nodes, and then number them in topological sort # order when emitting code; that way the code is generated so that each node # is numbered in the order it appears in the code. This is more pleasant # to read and understand, but the real benefit is that it makes the test # cases more stable and easier to verify. # # We can replace this dictionary with an unordered set; consider doing so. _nodes: Dict[BMGNode, int] _node_counter: int # This allows us to turn on a special problem-fixing pass to help # work around problems under investigation. _fix_observe_true: bool = False _pd: Optional[prof.ProfilerData] execution_context: ExecutionContext def __init__(self, execution_context: ExecutionContext = _empty_context) -> None: self._nodes = {} self._node_counter = 0 self._pd = None self.execution_context = execution_context def _begin(self, s: str) -> None: pd = self._pd if pd is not None: pd.begin(s) def _finish(self, s: str) -> None: pd = self._pd if pd is not None: pd.finish(s) # #### # #### Node creation and accumulation # #### # This code is called while the lifted program executes. # # The "add" methods unconditionally create a new graph node # and add to the builder *if it does not already exist*. # By memoizing almost all the "add" methods we ensure that # the graph is deduplicated automatically. # # TODO: This code does constant folding as well as deduplication, # but that could be moved to a later optimization pass. def add_node(self, node: BMGNode) -> BMGNode: # TODO: This should be private """This adds a node we've recently created to the node set; it maintains the invariant that all the input nodes are also added.""" assert node not in self._nodes for i in node.inputs: assert i in self._nodes self._nodes[node] = self._node_counter self._node_counter += 1 return node def remove_leaf(self, node: BMGNode) -> None: # TODO: This is only used to remove an observation; restrict it # accordingly. Particularly because this code is not correct with # respect to the memoizers. We could add a node, memoize it, # remove, it, add it again, and the memoizer would hand the node # back without adding it to the graph. """This removes a leaf node from the builder, and ensures that the output edges of all its input nodes are removed as well.""" if not node.is_leaf: raise ValueError("remove_leaf requires a leaf node") if node not in self._nodes: raise ValueError("remove_leaf called with node from wrong builder") for i in node.inputs.inputs: i.outputs.remove_item(node) del self._nodes[node] def remove_node(self, node: BMGNode) -> None: # TODO: This is only used to remove observation sample node that # are factored into posterior computation; restrict it # accordingly. Particularly because this code is not correct with # respect to the memoizers. We could add a node, memoize it, # remove, it, add it again, and the memoizer would hand the node # back without adding it to the graph. """This removes a node from the builder, and ensures that the output edges of all its input nodes are removed as well.""" if node not in self._nodes: raise ValueError("remove_node called with node from wrong builder") for i in node.inputs.inputs: i.outputs.remove_item(node) del self._nodes[node] # #### # #### Graph accumulation for constant values # #### # This code handles creating nodes for ordinary values such as # floating point values and tensors created during the execution # of the lifted program. We only create graph nodes for an ordinary # value when that value is somehow involved in a stochastic # operation. # During graph accumulation we accumulate untyped constant nodes for # all non-stochastic values involved in a stochastic operation regardless # of whether or not they can be represented in BMG. During a later # pass we give error messages if we are unable to replace the unsupported # constant values with valid BMG nodes. @memoize def _add_constant(self, value: Any, t: type) -> bn.UntypedConstantNode: # Note that we memoize *after* we've canonicalized the value, # and we ensure that the type is part of the memoization key. # We do not want to get into a situation where some unexpected Python # rule says that the constant 1 and the constant 1.0 are the same. node = bn.UntypedConstantNode(value) self.add_node(node) return node def add_constant(self, value: Any) -> bn.UntypedConstantNode: """This takes any constant value of a supported type, creates a constant graph node for it, and adds it to the builder""" t = type(value) # TODO: This checks whether the types are *exactly* a supported # type, but we might have a value which is of a type derived # from ndarray or tensor. Consider fixing these checks to see # if the value is an instance of one of the supported types # rather than an exact type check. if t in supported_bool_types: value = bool(value) t = bool elif t in supported_int_types: value = int(value) t = int elif t in supported_float_types: value = float(value) t = float elif t in supported_tensor_types: # Note that this makes a *copy* of the tensor if # the operand is a tensor. We want to ensure that # the value we've captured in the graph accumulation # is NOT mutated in the unfortunate event that the # original tensor is mutated. value = torch.Tensor(value) t = torch.Tensor else: raise ValueError( "A constant value used as an operand of a stochastic " + "operation is required to be bool, int, float or tensor. " + f"This model uses a value of type {t.__name__}." ) return self._add_constant(value, t) def add_constant_of_matrix_type( self, value: Any, node_type: bt.BMGMatrixType ) -> ConstantNode: # If we need a simplex, add a simplex. Otherwise, # choose which kind of matrix node to create based on # the matrix element type. if isinstance(node_type, bt.SimplexMatrix): return self.add_simplex(value) if node_type.element_type == bt.real_element: return self.add_real_matrix(value) if node_type.element_type == bt.positive_real_element: return self.add_pos_real_matrix(value) if node_type.element_type == bt.negative_real_element: return self.add_neg_real_matrix(value) if node_type.element_type == bt.probability_element: return self.add_probability_matrix(value) if node_type.element_type == bt.natural_element: return self.add_natural_matrix(value) if node_type.element_type == bt.bool_element: return self.add_boolean_matrix(value) raise NotImplementedError( "add_constant_of_matrix_type not yet " + f"implemented for {node_type.long_name}" ) def add_constant_of_type( self, value: Any, node_type: bt.BMGLatticeType ) -> ConstantNode: """This takes any constant value of a supported type and creates a constant graph node of the stated type for it, and adds it to the builder""" if node_type == bt.Boolean: return self.add_boolean(bool(value)) if node_type == bt.Probability: return self.add_probability(float(value)) if node_type == bt.Natural: return self.add_natural(int(value)) if node_type == bt.PositiveReal: return self.add_pos_real(float(value)) if node_type == bt.NegativeReal: return self.add_neg_real(float(value)) if node_type == bt.Real: return self.add_real(float(value)) if node_type == bt.Tensor: if isinstance(value, torch.Tensor): return self.add_constant_tensor(value) return self.add_constant_tensor(torch.tensor(value)) if isinstance(node_type, bt.BMGMatrixType): return self.add_constant_of_matrix_type(value, node_type) raise NotImplementedError( "add_constant_of_type not yet " + f"implemented for {node_type.long_name}" ) @memoize def add_real(self, value: float) -> bn.RealNode: node = bn.RealNode(value) self.add_node(node) return node @memoize def add_probability(self, value: float) -> bn.ProbabilityNode: node = bn.ProbabilityNode(value) self.add_node(node) return node @memoize def add_pos_real(self, value: float) -> bn.PositiveRealNode: node = bn.PositiveRealNode(value) self.add_node(node) return node @memoize def add_boolean_matrix(self, value: torch.Tensor) -> bn.ConstantBooleanMatrixNode: assert len(value.size()) <= 2 node = bn.ConstantBooleanMatrixNode(value) self.add_node(node) return node @memoize def add_natural_matrix(self, value: torch.Tensor) -> bn.ConstantNaturalMatrixNode: assert len(value.size()) <= 2 node = bn.ConstantNaturalMatrixNode(value) self.add_node(node) return node @memoize def add_probability_matrix( self, value: torch.Tensor ) -> bn.ConstantProbabilityMatrixNode: assert len(value.size()) <= 2 node = bn.ConstantProbabilityMatrixNode(value) self.add_node(node) return node @memoize def add_simplex(self, value: torch.Tensor) -> bn.ConstantSimplexMatrixNode: assert len(value.size()) <= 2 node = bn.ConstantSimplexMatrixNode(value) self.add_node(node) return node @memoize def add_pos_real_matrix( self, value: torch.Tensor ) -> bn.ConstantPositiveRealMatrixNode: assert len(value.size()) <= 2 node = bn.ConstantPositiveRealMatrixNode(value) self.add_node(node) return node @memoize def add_neg_real_matrix( self, value: torch.Tensor ) -> bn.ConstantNegativeRealMatrixNode: assert len(value.size()) <= 2 node = bn.ConstantNegativeRealMatrixNode(value) self.add_node(node) return node @memoize def add_real_matrix(self, value: torch.Tensor) -> bn.ConstantRealMatrixNode: assert len(value.size()) <= 2 node = bn.ConstantRealMatrixNode(value) self.add_node(node) return node @memoize def add_neg_real(self, value: float) -> bn.NegativeRealNode: node = bn.NegativeRealNode(value) self.add_node(node) return node @memoize def add_natural(self, value: int) -> bn.NaturalNode: node = bn.NaturalNode(value) self.add_node(node) return node @memoize def add_boolean(self, value: bool) -> bn.BooleanNode: node = bn.BooleanNode(value) self.add_node(node) return node @memoize def add_constant_tensor(self, value: torch.Tensor) -> bn.ConstantTensorNode: node = bn.ConstantTensorNode(value) self.add_node(node) return node # #### # #### Graph accumulation for distributions # #### # TODO: This code is mostly but not entirely in alpha order # by distribution type; we might reorganize it to make it # slightly easier to follow. @memoize def add_bernoulli(self, probability: BMGNode) -> bn.BernoulliNode: node = bn.BernoulliNode(probability) self.add_node(node) return node @memoize def add_bernoulli_logit(self, probability: BMGNode) -> bn.BernoulliLogitNode: node = bn.BernoulliLogitNode(probability) self.add_node(node) return node @memoize def add_binomial(self, count: BMGNode, probability: BMGNode) -> bn.BinomialNode: node = bn.BinomialNode(count, probability) self.add_node(node) return node @memoize def add_binomial_logit( self, count: BMGNode, probability: BMGNode ) -> bn.BinomialLogitNode: node = bn.BinomialLogitNode(count, probability) self.add_node(node) return node @memoize def add_categorical(self, probability: BMGNode) -> bn.CategoricalNode: node = bn.CategoricalNode(probability) self.add_node(node) return node @memoize def add_categorical_logit(self, probability: BMGNode) -> bn.CategoricalLogitNode: node = bn.CategoricalLogitNode(probability) self.add_node(node) return node @memoize def add_chi2(self, df: BMGNode) -> bn.Chi2Node: node = bn.Chi2Node(df) self.add_node(node) return node @memoize def add_gamma(self, concentration: BMGNode, rate: BMGNode) -> bn.GammaNode: node = bn.GammaNode(concentration, rate) self.add_node(node) return node @memoize def add_halfcauchy(self, scale: BMGNode) -> bn.HalfCauchyNode: node = bn.HalfCauchyNode(scale) self.add_node(node) return node @memoize def add_normal(self, mu: BMGNode, sigma: BMGNode) -> bn.NormalNode: node = bn.NormalNode(mu, sigma) self.add_node(node) return node @memoize def add_halfnormal(self, sigma: BMGNode) -> bn.HalfNormalNode: node = bn.HalfNormalNode(sigma) self.add_node(node) return node @memoize def add_dirichlet(self, concentration: BMGNode) -> bn.DirichletNode: node = bn.DirichletNode(concentration) self.add_node(node) return node @memoize def add_studentt( self, df: BMGNode, loc: BMGNode, scale: BMGNode ) -> bn.StudentTNode: node = bn.StudentTNode(df, loc, scale) self.add_node(node) return node @memoize def add_uniform(self, low: BMGNode, high: BMGNode) -> bn.UniformNode: node = bn.UniformNode(low, high) self.add_node(node) return node @memoize def add_beta(self, alpha: BMGNode, beta: BMGNode) -> bn.BetaNode: node = bn.BetaNode(alpha, beta) self.add_node(node) return node @memoize def add_poisson(self, rate: BMGNode) -> bn.PoissonNode: node = bn.PoissonNode(rate) self.add_node(node) return node @memoize def add_flat(self) -> bn.FlatNode: node = bn.FlatNode() self.add_node(node) return node @memoize def add_lkj_cholesky(self, dim: BMGNode, eta: BMGNode) -> bn.LKJCholeskyNode: if isinstance(dim, bn.ConstantNode) and isinstance(dim.value, int): assert dim.value >= 2 node = bn.LKJCholeskyNode(dim, eta) self.add_node(node) return node # #### # #### Graph accumulation for operators # #### # The handler methods here are both invoked directly, when, say # there was an explicit addition in the original model, and # indirectly as the result of processing a function call such # as tensor.add. # TODO: This code is not very well organized; consider sorting it # into alpha order by operation. @memoize def add_greater_than(self, left: BMGNode, right: BMGNode) -> BMGNode: if isinstance(left, ConstantNode): if isinstance(right, ConstantNode): return self.add_constant(left.value > right.value) node = bn.GreaterThanNode(left, right) self.add_node(node) return node @memoize def add_greater_than_equal(self, left: BMGNode, right: BMGNode) -> BMGNode: if isinstance(left, ConstantNode): if isinstance(right, ConstantNode): return self.add_constant(left.value >= right.value) node = bn.GreaterThanEqualNode(left, right) self.add_node(node) return node @memoize def add_less_than(self, left: BMGNode, right: BMGNode) -> BMGNode: if isinstance(left, ConstantNode): if isinstance(right, ConstantNode): return self.add_constant(left.value < right.value) node = bn.LessThanNode(left, right) self.add_node(node) return node @memoize def add_less_than_equal(self, left: BMGNode, right: BMGNode) -> BMGNode: if isinstance(left, ConstantNode): if isinstance(right, ConstantNode): return self.add_constant(left.value <= right.value) node = bn.LessThanEqualNode(left, right) self.add_node(node) return node @memoize def add_equal(self, left: BMGNode, right: BMGNode) -> BMGNode: if isinstance(left, ConstantNode): if isinstance(right, ConstantNode): return self.add_constant(left.value == right.value) node = bn.EqualNode(left, right) self.add_node(node) return node @memoize def add_not_equal(self, left: BMGNode, right: BMGNode) -> BMGNode: if isinstance(left, ConstantNode): if isinstance(right, ConstantNode): return self.add_constant(left.value != right.value) node = bn.NotEqualNode(left, right) self.add_node(node) return node @memoize def add_is(self, left: BMGNode, right: BMGNode) -> BMGNode: node = bn.IsNode(left, right) self.add_node(node) return node @memoize def add_is_not(self, left: BMGNode, right: BMGNode) -> BMGNode: node = bn.IsNotNode(left, right) self.add_node(node) return node @memoize def add_in(self, left: BMGNode, right: BMGNode) -> BMGNode: node = bn.InNode(left, right) self.add_node(node) return node @memoize def add_not_in(self, left: BMGNode, right: BMGNode) -> BMGNode: node = bn.NotInNode(left, right) self.add_node(node) return node @memoize def add_addition(self, left: BMGNode, right: BMGNode) -> BMGNode: if isinstance(left, ConstantNode) and isinstance(right, ConstantNode): return self.add_constant(left.value + right.value) inps = [left, right] node = bn.AdditionNode(inps) self.add_node(node) return node @memoize def add_bitand(self, left: BMGNode, right: BMGNode) -> BMGNode: node = bn.BitAndNode(left, right) self.add_node(node) return node @memoize def add_bitor(self, left: BMGNode, right: BMGNode) -> BMGNode: node = bn.BitOrNode(left, right) self.add_node(node) return node @memoize def add_bitxor(self, left: BMGNode, right: BMGNode) -> BMGNode: node = bn.BitXorNode(left, right) self.add_node(node) return node @memoize def add_floordiv(self, left: BMGNode, right: BMGNode) -> BMGNode: node = bn.FloorDivNode(left, right) self.add_node(node) return node @memoize def add_lshift(self, left: BMGNode, right: BMGNode) -> BMGNode: node = bn.LShiftNode(left, right) self.add_node(node) return node @memoize def add_mod(self, left: BMGNode, right: BMGNode) -> BMGNode: node = bn.ModNode(left, right) self.add_node(node) return node @memoize def add_rshift(self, left: BMGNode, right: BMGNode) -> BMGNode: node = bn.RShiftNode(left, right) self.add_node(node) return node # No need to memoize this since the addition will be memoized. def add_subtraction(self, left: BMGNode, right: BMGNode) -> BMGNode: # TODO: We don't have a subtraction node; we render this as # left + (-right), which we do have. Should we have a subtraction # node? We could do this transformation in a problem-fixing pass, # like we do for division. return self.add_addition(left, self.add_negate(right)) @memoize def add_multi_addition(self, *inputs: BMGNode) -> BMGNode: node = bn.AdditionNode(list(inputs)) self.add_node(node) return node @memoize def add_multiplication(self, left: BMGNode, right: BMGNode) -> BMGNode: if isinstance(left, ConstantNode) and isinstance(right, ConstantNode): return self.add_constant(left.value * right.value) inps = [left, right] node = bn.MultiplicationNode(inps) self.add_node(node) return node @memoize def add_multi_multiplication(self, *inputs: BMGNode) -> BMGNode: node = bn.MultiplicationNode(list(inputs)) self.add_node(node) return node @memoize def add_if_then_else( self, condition: BMGNode, consequence: BMGNode, alternative: BMGNode ) -> BMGNode: # If the condition is a constant then we can optimize away the if-then-else # node entirely. if bn.is_one(condition): return consequence if bn.is_zero(condition): return alternative node = bn.IfThenElseNode(condition, consequence, alternative) self.add_node(node) return node @memoize def add_choice(self, condition: BMGNode, *values: BMGNode) -> bn.ChoiceNode: vs = list(values) assert len(values) >= 2 node = bn.ChoiceNode(condition, vs) self.add_node(node) return node @memoize def add_matrix_multiplication(self, left: BMGNode, right: BMGNode) -> BMGNode: if isinstance(left, ConstantNode) and isinstance(right, ConstantNode): return self.add_constant(torch.mm(left.value, right.value)) node = bn.MatrixMultiplicationNode(left, right) self.add_node(node) return node @memoize def add_matrix_scale(self, scalar: BMGNode, matrix: BMGNode) -> BMGNode: # Intended convention here is that the scalar comes first. # However, this cannot be checked here # TODO[Walid]: Fix to match reverse order convention of torch.mul if isinstance(scalar, ConstantNode) and isinstance(matrix, ConstantNode): return self.add_constant(scalar.value * matrix.value) node = bn.MatrixScaleNode(scalar, matrix) self.add_node(node) return node @memoize def add_division(self, left: BMGNode, right: BMGNode) -> BMGNode: if isinstance(left, ConstantNode) and isinstance(right, ConstantNode): return self.add_constant(left.value / right.value) node = bn.DivisionNode(left, right) self.add_node(node) return node @memoize def add_power(self, left: BMGNode, right: BMGNode) -> BMGNode: if isinstance(left, ConstantNode) and isinstance(right, ConstantNode): return self.add_constant(left.value**right.value) node = bn.PowerNode(left, right) self.add_node(node) return node @memoize def add_index(self, left: bn.BMGNode, right: bn.BMGNode) -> bn.BMGNode: # Folding optimizations are done in the fixer. node = bn.IndexNode(left, right) self.add_node(node) return node @memoize def add_item(self, input: bn.BMGNode) -> bn.BMGNode: node = bn.ItemNode(input) self.add_node(node) return node @memoize def add_vector_index(self, left: bn.BMGNode, right: bn.BMGNode) -> bn.BMGNode: # Folding optimizations are done in the fixer. node = bn.VectorIndexNode(left, right) self.add_node(node) return node @memoize def add_column_index(self, left: bn.BMGNode, right: bn.BMGNode) -> bn.BMGNode: # Folding optimizations are done in the fixer. node = bn.ColumnIndexNode(left, right) self.add_node(node) return node @memoize def add_negate(self, operand: BMGNode) -> BMGNode: # TODO: We could optimize -(-x) to x here. if isinstance(operand, ConstantNode): return self.add_constant(-operand.value) node = bn.NegateNode(operand) self.add_node(node) return node @memoize def add_invert(self, operand: BMGNode) -> BMGNode: node = bn.InvertNode(operand) self.add_node(node) return node @memoize def add_complement(self, operand: BMGNode) -> BMGNode: if isinstance(operand, ConstantNode): return self.add_constant(1 - operand.value) node = bn.ComplementNode(operand) self.add_node(node) return node # TODO: What should the result of NOT on a tensor be? # TODO: Should it be legal at all in the graph? # TODO: In Python, (not tensor(x)) is equal to (not x). # TODO: It is NOT equal to (tensor(not x)), which is what # TODO: you might expect. @memoize def add_not(self, operand: BMGNode) -> BMGNode: if isinstance(operand, ConstantNode): return self.add_constant(not operand.value) node = bn.NotNode(operand) self.add_node(node) return node @memoize def add_sum(self, operand: BMGNode) -> BMGNode: node = bn.SumNode(operand) self.add_node(node) return node @memoize def add_to_real(self, operand: BMGNode) -> BMGNode: if isinstance(operand, bn.RealNode): return operand if isinstance(operand, ConstantNode): return self.add_real(float(operand.value)) node = bn.ToRealNode(operand) self.add_node(node) return node @memoize def add_to_int(self, operand: BMGNode) -> BMGNode: if isinstance(operand, bn.ConstantTensorNode): return self.add_constant(operand.value.int) if isinstance(operand, ConstantNode): return self.add_constant(int(operand.value)) node = bn.ToIntNode(operand) self.add_node(node) return node @memoize def add_to_real_matrix(self, operand: BMGNode) -> BMGNode: node = bn.ToRealMatrixNode(operand) self.add_node(node) return node @memoize def add_to_positive_real(self, operand: BMGNode) -> BMGNode: if isinstance(operand, bn.PositiveRealNode): return operand if isinstance(operand, ConstantNode): return self.add_to_positive_real(float(operand.value)) node = bn.ToPositiveRealNode(operand) self.add_node(node) return node @memoize def add_to_positive_real_matrix(self, operand: BMGNode) -> BMGNode: node = bn.ToPositiveRealMatrixNode(operand) self.add_node(node) return node @memoize def add_to_probability(self, operand: BMGNode) -> BMGNode: if isinstance(operand, bn.ProbabilityNode): return operand if isinstance(operand, ConstantNode): return self.add_probability(float(operand.value)) node = bn.ToProbabilityNode(operand) self.add_node(node) return node @memoize def add_to_negative_real(self, operand: BMGNode) -> BMGNode: node = bn.ToNegativeRealNode(operand) self.add_node(node) return node @memoize def add_to_negative_real_matrix(self, operand: BMGNode) -> BMGNode: node = bn.ToNegativeRealMatrixNode(operand) self.add_node(node) return node @memoize def add_cholesky(self, operand: BMGNode) -> BMGNode: node = bn.CholeskyNode(operand) self.add_node(node) return node @memoize def add_exp(self, operand: BMGNode) -> BMGNode: if isinstance(operand, bn.ConstantTensorNode): return self.add_constant(torch.exp(operand.value)) if isinstance(operand, ConstantNode): return self.add_constant(math.exp(operand.value)) node = bn.ExpNode(operand) self.add_node(node) return node @memoize def add_exp2(self, operand: BMGNode) -> BMGNode: if isinstance(operand, ConstantNode): return self.add_constant(torch.exp2(operand.value)) node = bn.Exp2Node(operand) self.add_node(node) return node @memoize def add_expm1(self, operand: BMGNode) -> BMGNode: if isinstance(operand, bn.ConstantTensorNode): return self.add_constant(torch.expm1(operand.value)) if isinstance(operand, ConstantNode): return self.add_constant(torch.expm1(torch.tensor(operand.value))) node = bn.ExpM1Node(operand) self.add_node(node) return node @memoize def add_logistic(self, operand: BMGNode) -> BMGNode: if isinstance(operand, bn.ConstantTensorNode): return self.add_constant(torch.sigmoid(operand.value)) if isinstance(operand, ConstantNode): return self.add_constant(torch.sigmoid(torch.tensor(operand.value))) node = bn.LogisticNode(operand) self.add_node(node) return node @memoize def add_phi(self, operand: BMGNode) -> BMGNode: if isinstance(operand, ConstantNode): return self.add_constant(phi(operand.value)) node = bn.PhiNode(operand) self.add_node(node) return node @memoize def add_log(self, operand: BMGNode) -> BMGNode: if isinstance(operand, bn.ConstantTensorNode): return self.add_constant(torch.log(operand.value)) if isinstance(operand, ConstantNode): return self.add_constant(math.log(operand.value)) node = bn.LogNode(operand) self.add_node(node) return node @memoize def add_log10(self, operand: BMGNode) -> BMGNode: if isinstance(operand, bn.ConstantNode): return self.add_constant(torch.log10(operand.value)) node = bn.Log10Node(operand) self.add_node(node) return node @memoize def add_log1p(self, operand: BMGNode) -> BMGNode: if isinstance(operand, bn.ConstantNode): return self.add_constant(torch.log1p(operand.value)) node = bn.Log1pNode(operand) self.add_node(node) return node @memoize def add_log2(self, operand: BMGNode) -> BMGNode: if isinstance(operand, bn.ConstantNode): return self.add_constant(torch.log2(operand.value)) node = bn.Log2Node(operand) self.add_node(node) return node @memoize def add_log1mexp(self, operand: BMGNode) -> BMGNode: if isinstance(operand, bn.ConstantTensorNode): return self.add_constant((1 - operand.value.exp()).log()) if isinstance(operand, ConstantNode): return self.add_constant(math.log(1 - math.exp(operand.value))) node = bn.Log1mexpNode(operand) self.add_node(node) return node @memoize def add_transpose(self, operand: BMGNode) -> bn.TransposeNode: node = bn.TransposeNode(operand) self.add_node(node) return node @memoize def add_squareroot(self, operand: BMGNode) -> BMGNode: if isinstance(operand, bn.ConstantNode): return self.add_constant(torch.sqrt(operand.value)) node = bn.SquareRootNode(operand) self.add_node(node) return node @memoize def add_tensor(self, size: torch.Size, *data: BMGNode) -> bn.TensorNode: node = bn.TensorNode(list(data), size) self.add_node(node) return node @memoize def add_to_matrix( self, rows: bn.NaturalNode, columns: bn.NaturalNode, *data: BMGNode ) -> bn.ToMatrixNode: node = bn.ToMatrixNode(rows, columns, list(data)) self.add_node(node) return node @memoize def add_fill_matrix( self, value: BMGNode, rows: bn.NaturalNode, columns: bn.NaturalNode ) -> bn.FillMatrixNode: node = bn.FillMatrixNode(value, rows, columns) self.add_node(node) return node @memoize def add_broadcast( self, value: BMGNode, rows: bn.NaturalNode, columns: bn.NaturalNode ) -> bn.BroadcastNode: node = bn.BroadcastNode(value, rows, columns) self.add_node(node) return node @memoize def add_logsumexp(self, *inputs: BMGNode) -> bn.LogSumExpNode: node = bn.LogSumExpNode(list(inputs)) self.add_node(node) return node @memoize def add_logsumexp_torch( self, input: BMGNode, dim: BMGNode, keepdim: BMGNode ) -> bn.LogSumExpTorchNode: node = bn.LogSumExpTorchNode(input, dim, keepdim) self.add_node(node) return node @memoize def add_logsumexp_vector(self, operand: BMGNode) -> bn.LogSumExpVectorNode: node = bn.LogSumExpVectorNode(operand) self.add_node(node) return node @memoize def add_logaddexp(self, left: bn.BMGNode, right: bn.BMGNode) -> bn.LogAddExpNode: node = bn.LogAddExpNode(left, right) self.add_node(node) return node @memoize def add_log_prob(self, left: bn.BMGNode, right: bn.BMGNode) -> bn.LogProbNode: node = bn.LogProbNode(left, right) self.add_node(node) return node @memoize def add_switch(self, *elements: BMGNode) -> bn.SwitchNode: # TODO: Verify that the list is well-formed. node = bn.SwitchNode(list(elements)) self.add_node(node) return node # Do NOT memoize add_sample; each sample node must be unique def add_sample(self, operand: bn.DistributionNode) -> bn.SampleNode: node = bn.SampleNode(operand) self.add_node(node) return node # TODO: Should this be idempotent? # TODO: Should it be an error to add two unequal observations to one node? def add_observation(self, observed: bn.SampleNode, value: Any) -> bn.Observation: node = bn.Observation(observed, value) self.add_node(node) return node @memoize def add_query(self, operator: BMGNode, rvidentifier: RVIdentifier) -> bn.Query: # TODO: BMG requires that the target of a query be classified # as an operator and that queries be unique; that is, every node # is queried *exactly* zero or one times. Rather than making # those restrictions here, instead detect bad queries in the # problem fixing phase and report accordingly. node = bn.Query(operator, rvidentifier) self.add_node(node) return node @memoize def add_elementwise_multiplication(self, left: BMGNode, right: BMGNode) -> BMGNode: if isinstance(left, ConstantNode) and isinstance(right, ConstantNode): return self.add_constant(left.value * right.value) node = bn.ElementwiseMultiplyNode(left, right) self.add_node(node) return node @memoize def add_matrix_addition(self, left: BMGNode, right: BMGNode) -> BMGNode: if isinstance(left, ConstantNode) and isinstance(right, ConstantNode): return self.add_constant(left.value + right.value) node = bn.MatrixAddNode(left, right) self.add_node(node) return node @memoize def add_matrix_sum(self, matrix: BMGNode) -> BMGNode: if isinstance(matrix, ConstantNode): return self.add_constant(matrix.value.sum()) node = bn.MatrixSumNode(matrix) self.add_node(node) return node @memoize def add_matrix_exp(self, matrix: BMGNode) -> BMGNode: if isinstance(matrix, ConstantNode): return self.add_constant(matrix.value.exp()) node = bn.MatrixExpNode(matrix) self.add_node(node) return node @memoize def add_matrix_phi(self, matrix: BMGNode) -> BMGNode: if isinstance(matrix, ConstantNode): return self.add_constant(phi(matrix.value)) node = bn.MatrixPhiNode(matrix) self.add_node(node) return node @memoize def add_matrix_log(self, matrix: BMGNode) -> BMGNode: if isinstance(matrix, ConstantNode): return self.add_constant(matrix.value.log()) node = bn.MatrixLogNode(matrix) self.add_node(node) return node @memoize def add_matrix_log1mexp(self, matrix: BMGNode) -> BMGNode: node = bn.MatrixLog1mexpNode(matrix) self.add_node(node) return node @memoize def add_matrix_complement(self, matrix: BMGNode) -> BMGNode: node = bn.MatrixComplementNode(matrix) self.add_node(node) return node @memoize def add_matrix_negate(self, matrix: BMGNode) -> BMGNode: node = bn.MatrixNegateNode(matrix) self.add_node(node) return node def add_exp_product(self, *inputs: BMGNode) -> bn.ExpProductFactorNode: # Note that factors are NOT deduplicated; this method is not # memoized. We need to be able to add multiple factors to the same # node, similar to the way we need to add multiple samples to a # distribution. node = bn.ExpProductFactorNode(list(inputs)) self.add_node(node) return node def all_ancestor_nodes(self) -> List[BMGNode]: """Returns a topo-sorted list of nodes that are ancestors to any sample, observation, query or factor.""" def is_root(n: BMGNode) -> bool: return ( isinstance(n, bn.SampleNode) or isinstance(n, bn.Observation) or isinstance(n, bn.Query) or isinstance(n, bn.FactorNode) ) return self._traverse(is_root) def all_nodes(self) -> List[BMGNode]: """Returns a topo-sorted list of all nodes.""" return self._traverse(lambda n: n.is_leaf) def _traverse(self, is_root: Callable[[BMGNode], bool]) -> List[BMGNode]: """This returns a list of the reachable graph nodes in topologically sorted order. The ordering invariants are (1) all sample, observation, query and factor nodes are enumerated in the order they were added, and (2) all inputs are enumerated before their outputs, and (3) inputs to the "left" are enumerated before those to the "right".""" # We require here that the graph is acyclic. # TODO: The graph should be acyclic by construction; # we detect cycles while executing the lifted model. # However, we might want to add a quick cycle checking # pass here as a sanity check. def key(n: BMGNode) -> int: return self._nodes[n] # We cannot use a recursive algorithm because the graph may have # paths that are deeper than the recursion limit in Python. # Instead we'll use a list as a stack. But we cannot simply do # a normal iterative depth-first or postorder traversal because # that violates our stated invariants above: all inputs are always # enumerated before the node which inputs them, and nodes to the # left are enumerated before nodes to the right. # # What we do here is a modified depth first traversal which maintains # our invariants. result = [] work_stack = sorted( (n for n in self._nodes if is_root(n)), key=key, reverse=True ) already_in_result = set() inputs_already_pushed = set() while len(work_stack) != 0: # Peek the top of the stack but do not pop it yet. current = work_stack[-1] if current in already_in_result: # The top of the stack has already been put into the # result list. There is nothing more to do with this node, # so we can simply pop it away. work_stack.pop() elif current in inputs_already_pushed: # The top of the stack is not on the result list, but we have # already pushed all of its inputs onto the stack. Since they # are gone from the stack, we must have already put all of them # onto the result list, and therefore we are justified in putting # this node onto the result list too. work_stack.pop() result.append(current) already_in_result.add(current) else: # The top of the stack is not on the result list and its inputs # have never been put onto the stack. Leave it on the stack so that # we come back to it later after all of its inputs have been # put on the result list, and put its inputs on the stack. # # We want to process the left inputs before the right inputs, so # reverse them so that the left inputs go on the stack last, and # are therefore closer to the top. for i in reversed(current.inputs): work_stack.append(i) inputs_already_pushed.add(current) return result def all_observations(self) -> List[bn.Observation]: return sorted( (n for n in self._nodes if isinstance(n, bn.Observation)), key=lambda n: self._nodes[n], ) def rv_to_query(bmg: BMGraphBuilder) -> Dict[RVIdentifier, bn.Query]: rv_to_query_map: Dict[RVIdentifier, bn.Query] = {} for node in bmg.all_nodes(): if isinstance(node, bn.Query): rv_to_query_map[node.rv_identifier] = node return rv_to_query_map
beanmachine-main
src/beanmachine/ppl/compiler/bm_graph_builder.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from abc import abstractmethod from typing import Any, Union import torch from beanmachine.ppl.utils.memoize import memoize, MemoizedClass from torch import Size """This module contains class definitions and helper functions for describing the *types* of graph nodes and the *type restrictions* on graph edges. When we construct a graph we know all the "storage" types of the nodes -- Boolean, integer, float, tensor -- as they were in the original Python program. BMG requires that we ensure that "semantic" type associations are made to each node in the graph. The types in the BMG type system are as follows: * Unknown: a pseudo-type used as a marker when the type of a node is undefined. We do not have to worry about this one. There are five "scalar" types: * Boolean (B) * Probability (P) -- a real between 0.0 and 1.0 * Natural (N) -- a non-negative integer * Positive Real (R+) * Negative Real (R-) * Real (R) There are infinitely many "matrix" types, but they can be divided into the following kinds: * Matrix of booleans (MB[r, c]) * Matrix of naturals (MN[r, c]) * Matrix of probabilities (MP[r, c]) * Matrix of positive reals (MR+[r, c]) * Matrix of negative reals (MR-[r, c]) * Matrix of reals (MR[r, c]) * Row simplex (S[r, c]): A restriction on MP[r, c] such that every row adds to 1.0. There are infinitely many because all matrix types track their number of rows and columns. All matrix types are two-dimensional, and the row and column counts are constants, not stochastic quantities. Because a scalar and a 1x1 matrix are effectively the same type, for the purposes of this analysis we will only consider matrix types. That is, we will make "Real" and "Probability" and so on aliases for "1x1 real matrix" and "1x1 probability matrix". To facilitate analysis, we organize the infinite set of types into a *lattice*. See below for further details. """ # TODO: We might also need: # * Bounded natural -- a sample from a categorical # * Positive definite -- a real matrix with positive eigenvalues def _size_to_rc(size: Size): dimensions = len(size) assert dimensions <= 2 if dimensions == 0: return 1, 1 r = 1 if dimensions == 1 else size[0] c = size[0] if dimensions == 1 else size[1] return r, c class BMGLatticeType: short_name: str long_name: str def __init__(self, short_name: str, long_name: str) -> None: self.short_name = short_name self.long_name = long_name def __str__(self) -> str: return self.short_name def is_singleton(self) -> bool: return False class BMGElementType: short_name: str long_name: str def __init__(self, short_name: str, long_name: str) -> None: self.short_name = short_name self.long_name = long_name bool_element = BMGElementType("B", "bool") natural_element = BMGElementType("N", "natural") probability_element = BMGElementType("P", "probability") positive_real_element = BMGElementType("R+", "positive real") negative_real_element = BMGElementType("R-", "negative real") real_element = BMGElementType("R", "real") class BMGMatrixType(BMGLatticeType): element_type: BMGElementType rows: int columns: int def __init__( self, element_type: BMGElementType, short_name: str, long_name: str, rows: int, columns: int, ) -> None: BMGLatticeType.__init__(self, short_name, long_name) self.element_type = element_type self.rows = rows self.columns = columns @abstractmethod def with_dimensions(self, rows: int, columns: int) -> "BMGMatrixType": pass def with_size(self, size: Size) -> "BMGMatrixType": # We store the values for a matrix in a tensor; tensors are # row-major. But the BMG type system expects a column-major # matrix. Here we get the rows and columns of the tensor size, # and swap them to make the column-major type. r, c = _size_to_rc(size) return self.with_dimensions(c, r) def is_singleton(self) -> bool: return (isinstance(self, BMGMatrixType)) and (self.rows * self.columns == 1) class BroadcastMatrixType(BMGMatrixType): def __init__(self, element_type: BMGElementType, rows: int, columns: int) -> None: short_name = ( element_type.short_name if rows == 1 and columns == 1 else f"M{element_type.short_name}[{rows},{columns}]" ) long_name = ( element_type.long_name if rows == 1 and columns == 1 else f"{rows} x {columns} {element_type.long_name} matrix" ) BMGMatrixType.__init__(self, element_type, short_name, long_name, rows, columns) # Note that all matrix type constructors are memoized in their initializer # arguments; that way we ensure that any two MR[2,2]s are reference equal, # which is a nice property to have. class BooleanMatrix(BroadcastMatrixType, metaclass=MemoizedClass): def __init__(self, rows: int, columns: int) -> None: BroadcastMatrixType.__init__(self, bool_element, rows, columns) def with_dimensions(self, rows: int, columns: int) -> BMGMatrixType: return BooleanMatrix(rows, columns) class NaturalMatrix(BroadcastMatrixType, metaclass=MemoizedClass): def __init__(self, rows: int, columns: int) -> None: BroadcastMatrixType.__init__(self, natural_element, rows, columns) def with_dimensions(self, rows: int, columns: int) -> BMGMatrixType: return NaturalMatrix(rows, columns) class ProbabilityMatrix(BroadcastMatrixType, metaclass=MemoizedClass): def __init__(self, rows: int, columns: int) -> None: BroadcastMatrixType.__init__(self, probability_element, rows, columns) def with_dimensions(self, rows: int, columns: int) -> BMGMatrixType: return ProbabilityMatrix(rows, columns) class PositiveRealMatrix(BroadcastMatrixType, metaclass=MemoizedClass): def __init__(self, rows: int, columns: int) -> None: BroadcastMatrixType.__init__(self, positive_real_element, rows, columns) def with_dimensions(self, rows: int, columns: int) -> BMGMatrixType: return PositiveRealMatrix(rows, columns) class NegativeRealMatrix(BroadcastMatrixType, metaclass=MemoizedClass): def __init__(self, rows: int, columns: int) -> None: BroadcastMatrixType.__init__(self, negative_real_element, rows, columns) def with_dimensions(self, rows: int, columns: int) -> BMGMatrixType: return NegativeRealMatrix(rows, columns) class RealMatrix(BroadcastMatrixType, metaclass=MemoizedClass): def __init__(self, rows: int, columns: int) -> None: BroadcastMatrixType.__init__(self, real_element, rows, columns) def with_dimensions(self, rows: int, columns: int) -> BMGMatrixType: return RealMatrix(rows, columns) class SimplexMatrix(BMGMatrixType, metaclass=MemoizedClass): def __init__(self, rows: int, columns: int) -> None: BMGMatrixType.__init__( self, probability_element, f"S[{rows},{columns}]", f"{rows} x {columns} simplex matrix", rows, columns, ) def with_dimensions(self, rows: int, columns: int) -> BMGMatrixType: return SimplexMatrix(rows, columns) class OneHotMatrix(BMGMatrixType, metaclass=MemoizedClass): def __init__(self, rows: int, columns: int) -> None: short_name = "OH" if rows == 1 and columns == 1 else f"OH[{rows},{columns}]" long_name = ( "one-hot" if rows == 1 and columns == 1 else f"{rows} x {columns} one-hot matrix" ) BMGMatrixType.__init__(self, bool_element, short_name, long_name, rows, columns) def with_dimensions(self, rows: int, columns: int) -> BMGMatrixType: return OneHotMatrix(rows, columns) class ZeroMatrix(BMGMatrixType, metaclass=MemoizedClass): def __init__(self, rows: int, columns: int) -> None: short_name = "Z" if rows == 1 and columns == 1 else f"Z[{rows},{columns}]" long_name = ( "zero" if rows == 1 and columns == 1 else f"{rows} x {columns} zero matrix" ) BMGMatrixType.__init__(self, bool_element, short_name, long_name, rows, columns) def with_dimensions(self, rows: int, columns: int) -> BMGMatrixType: return ZeroMatrix(rows, columns) bottom = BMGLatticeType("bottom", "bottom") One = OneHotMatrix(1, 1) Zero = ZeroMatrix(1, 1) Boolean = BooleanMatrix(1, 1) Natural = NaturalMatrix(1, 1) Probability = ProbabilityMatrix(1, 1) PositiveReal = PositiveRealMatrix(1, 1) NegativeReal = NegativeRealMatrix(1, 1) Real = RealMatrix(1, 1) Tensor = BMGLatticeType("T", "tensor") top: BMGLatticeType = Tensor # This is not a real lattice type; rather, this is a marker to indicate # that the node cannot have a lattice type assigned to it in the first # place because we lack BMG typing rules for it. Untypable = BMGLatticeType("U", "untypeable") """ When converting from an accumulated graph that uses Python types, we can express the rules concisely by defining a *type lattice*. A type lattice is a DAG which meets these conditions: * Nodes are types * Edges are directed from "larger" types to "smaller" types. (Note that there is no requirement for a total order.) * There is a function called "supremum" which takes two types and returns the unique type that is the smallest type that is bigger than both arguments. * There is a function called "infimum" which similarly is the unique largest type smaller than both arguments. (Right now we do not actually need this function in our type analysis so it is not implemented.) For matrix types with a single column and any number of rows r, and columns c, the type lattice is: T (Tensor unsupported by BMG) | MR[r,c] (Real matrix) | | | MR+[r,c] (Positive real matrix) MR-[r,c] | | (Negative real matrix) | | | | MN[r,c] | (Natural matrix) | | MP[r,c] (Probability matrix) | | | | | MB[r,c] | (Boolean matrix) | | | S[r,c] (Row-simplex matrix) | | | | | | OH[r,c] (One-hot matrix) | | | Z[r,c] | (All-zero matrix) | | BOTTOM (the bottom type) OH -- one-hot -- is not a type in the BMG type system; we use it only when analyzing the accumulated graph. The OH type is used to track the situation where a constant matrix can be converted to both a Boolean and a simplex matrix; if the rows are "one hot" -- all false (or 0) except for one true (or 1) -- then the matrix is convertable to both Boolean and simplex. Similarly, Z -- the all-zero matrix -- is not a type in the BMG type system. We use it to track cases where a matrix is convertible to both Boolean and negative real. Similarly, T (tensor) -- the top type -- is not found in the BMG type system. The top type is the type more general than all other types, and is used for situations such as attempting to resolve situations such as "what is the type that is more general than both a 1x2 matrix and a 2x2 matrix?" or to represent matrix types not supported in BMG such as 3-dimensional matrices. The BOTTOM type is the type that has no values; it is similarly used as a convenience when you need a type more specific than any other type; it is not in the BMG type system. Why is this useful? Our goal is to generate a graph that meets all the requirements of the BMG type system. In order to do so, we compute a *requirement object* for each *edge* in the graph. There are two kinds of requirements: "input must be exactly type X" and "input must be type X or smaller". The first kind of requirement we call an "exact requirement" and the latter is an "upper bound requirement". (We do not at this time have any scenarios that need "lower bound requirements" but if we need them we can add them.) Once we know the requirements on the *incoming* edges to a node, we can check to see if the requirements are met by the nodes. * If they are, then we're good. * If not then we can do a graph mutation which causes the requirements to be met. * If there is no such mutation then we can report an error. How do we compute the requirements for an edge? It depends on the kind of graph node that the edge is attached to. A Bernoulli node, for example, requires that its input be "exactly P". A "to real" node requires that its input be "R or smaller". Some nodes however introduce more complex requirements. The requirements of a multiplication node, for instance, are: * the input types must be greater than or equal to P * the input types must be exactly the same We do not have a requirement object for either "greater than or equal", or for "this edge must be the same as that edge". And we have one more factor to throw in: the output type of a multiplication is the same as its input types, so we wish to find the *smallest possible* restriction on the input types so that the multiplication's type is minimized. (That is: if we have a multiplication of a natural by a probability, we do not wish to require that both be converted to reals, because then the multiplication node could not be used in a context where a positive real was required. We want the smallest type that works: positive real.) How are we going to do this? We generate the edge requirements for a multiplication as follows: The requirement on both input edges is that they be *exactly* equal to the *supremum* of the *infimum types* of the two inputs. The infimum type is the *smallest* type that each *node* in the graph could possibly be. We call this type the "infimum type" of the node because it is the infimum -- the *greatest lower bound* -- in the lattice. It might not be clear why that works. Let's work through an example. Suppose we have a sample from a beta; it's infimum type is Probability because that's the only type a sample from a beta can be. And suppose we have a sample from a binomial; it's infimum type is Natural, again, because that's the only type it can be. Now suppose we multiply them. What restrictions go on the left and right input edges of the multiplication? The supremum of Natural and Probability is PositiveReal, so we put an "exactly PositiveReal" restriction on the two edges. During the "fix problems" phase, we see that we have an edge from a multiplication to a sample of type Probability, but there is a requirement that it be a PositiveReal, so we insert a ToPositiveRealNode between the multiplication and the sample. And then similarly for the sample of type Natural. The result is that we have a multiplication node that meets its requirements; the input types are the same, and the output type is the smallest type it could possibly be: PositiveReal. """ # This is a map from (class, class) to (int, int)=>BMGMatrix _lookup_table = None def _lookup(): global _lookup_table if _lookup_table is None: R = Real.__class__ RP = PositiveReal.__class__ RN = NegativeReal.__class__ P = Probability.__class__ S = SimplexMatrix(1, 1).__class__ N = Natural.__class__ B = Boolean.__class__ OH = One.__class__ Z = Zero.__class__ _lookup_table = { (R, R): RealMatrix, (R, RP): RealMatrix, (R, RN): RealMatrix, (R, P): RealMatrix, (R, S): RealMatrix, (R, N): RealMatrix, (R, B): RealMatrix, (R, OH): RealMatrix, (R, Z): RealMatrix, (RP, R): RealMatrix, (RP, RP): PositiveRealMatrix, (RP, RN): RealMatrix, (RP, P): PositiveRealMatrix, (RP, S): PositiveRealMatrix, (RP, N): PositiveRealMatrix, (RP, B): PositiveRealMatrix, (RP, OH): PositiveRealMatrix, (RP, Z): PositiveRealMatrix, (RN, R): RealMatrix, (RN, RP): RealMatrix, (RN, RN): NegativeRealMatrix, (RN, P): RealMatrix, (RN, S): RealMatrix, (RN, N): RealMatrix, (RN, B): RealMatrix, (RN, OH): RealMatrix, (RN, Z): NegativeRealMatrix, (P, R): RealMatrix, (P, RP): PositiveRealMatrix, (P, RN): RealMatrix, (P, P): ProbabilityMatrix, (P, S): ProbabilityMatrix, (P, N): PositiveRealMatrix, (P, B): ProbabilityMatrix, (P, OH): ProbabilityMatrix, (P, Z): ProbabilityMatrix, (S, R): RealMatrix, (S, RP): PositiveRealMatrix, (S, RN): RealMatrix, (S, P): ProbabilityMatrix, (S, S): SimplexMatrix, (S, N): PositiveRealMatrix, (S, B): ProbabilityMatrix, (S, OH): SimplexMatrix, (S, Z): ProbabilityMatrix, (N, R): RealMatrix, (N, RP): PositiveRealMatrix, (N, RN): RealMatrix, (N, P): PositiveRealMatrix, (N, S): PositiveRealMatrix, (N, N): NaturalMatrix, (N, B): NaturalMatrix, (N, OH): NaturalMatrix, (N, Z): NaturalMatrix, (B, R): RealMatrix, (B, RP): PositiveRealMatrix, (B, RN): RealMatrix, (B, P): ProbabilityMatrix, (B, S): ProbabilityMatrix, (B, N): NaturalMatrix, (B, B): BooleanMatrix, (B, OH): BooleanMatrix, (B, Z): BooleanMatrix, (OH, R): RealMatrix, (OH, RP): PositiveRealMatrix, (OH, RN): RealMatrix, (OH, P): ProbabilityMatrix, (OH, S): SimplexMatrix, (OH, N): NaturalMatrix, (OH, B): BooleanMatrix, (OH, OH): OneHotMatrix, (OH, Z): BooleanMatrix, (Z, R): RealMatrix, (Z, RP): PositiveRealMatrix, (Z, RN): NegativeRealMatrix, (Z, P): ProbabilityMatrix, (Z, S): ProbabilityMatrix, (Z, N): NaturalMatrix, (Z, B): BooleanMatrix, (Z, OH): BooleanMatrix, (Z, Z): ZeroMatrix, } return _lookup_table @memoize def _supremum(t: BMGLatticeType, u: BMGLatticeType) -> BMGLatticeType: """Takes two BMG types; returns the smallest type that is greater than or equal to both.""" assert t != Untypable and u != Untypable if t == u: return t if t == bottom: return u if u == bottom: return t if t == top or u == top: return top assert isinstance(t, BMGMatrixType) assert isinstance(u, BMGMatrixType) if t.rows != u.rows or t.columns != u.columns: return Tensor # If we've made it here, they are unequal types but have the # same dimensions, and both are matrix types. return _lookup()[(t.__class__, u.__class__)](t.rows, t.columns) # We can extend the two-argument supremum function to any number of arguments: def supremum(*ts: BMGLatticeType) -> BMGLatticeType: """Takes any number of BMG types; returns the smallest type that is greater than or equal to all of them.""" result = bottom for t in ts: result = _supremum(result, t) return result def is_convertible_to(source: BMGLatticeType, target: BMGLatticeType) -> bool: return _supremum(source, target) == target simplex_precision = 1e-10 def _type_of_matrix(v: torch.Tensor) -> BMGLatticeType: elements = v.numel() # If we have tensor([]) then that is not useful as a value # or a matrix; just call it a tensor. if elements == 0: return Tensor # If we have a single element tensor no matter what its dimensionality, # treat it as a single value. if elements == 1: return type_of_value(float(v)) # We have more than one element. What's the shape? shape = v.shape dimensions = len(shape) # If we have more than two dimensions then we cannot make it a matrix. # CONSIDER: Suppose we have something like [[[10, 20]]]] which is 1 x 1 x 2. # We could reduce that to a 1 x 2 matrix if we needed to. We might discard # sizes on the right equal to one. # We have the rows and columns of the original tensor, which is row-major. # But in BMG, constant matrices are expressed in column-major form. # Therefore we swap rows and columns here. if dimensions > 2: return Tensor tensor_rows, tensor_cols = _size_to_rc(shape) # However, for the purposes of analysis below, we still do it row by # row because that is more convenient when working with tensors: v = v.view(tensor_rows, tensor_cols) c = tensor_rows r = tensor_cols # We've got the shape. What is the smallest type # that is greater than or equal to the smallest type of # all the elements? sup = supremum(*[type_of_value(element) for row in v for element in row]) # We should get a 1x1 matrix out; there should be no way to get # top or bottom out. assert isinstance(sup, BMGMatrixType) assert sup.rows == 1 assert sup.columns == 1 if sup in {Real, PositiveReal, NegativeReal, Natural}: return sup.with_dimensions(r, c) # The only remaining possibilities are: # # * Every element was 0 -- sup is Zero # * Every element was 1 -- sup is One # * Every element was 0 or 1 -- sup is Boolean # * At least one element was between 0 and 1 -- sup is Probability # # In the first two cases, we might have a one-hot. # In the third case, it is possible that we have a simplex. assert sup in {Boolean, Zero, One, Probability} sums_to_one = all(abs(float(row.sum()) - 1.0) <= simplex_precision for row in v) if sums_to_one: if sup == Probability: return SimplexMatrix(r, c) return OneHotMatrix(r, c) # It is not a simplex or a one-hot. Is it a matrix of probabilities that # do not sum to one? if sup == Probability: return sup.with_dimensions(r, c) # The only remaining possibilities are all zeros, all ones, # or some mixture of zero and one. # # If we have all zeros then this could be treated as either a matrix # of Booleans or a matrix of negative reals, and we do not know which # we will need; matrix of zeros is the type smaller than both those, # so return it: if sup == Zero: return sup.with_dimensions(r, c) # The only remaining possibility is matrix of all ones, or matrix # of some zeros and some ones. Either way, the smallest type # left is matrix of Booleans. return BooleanMatrix(r, c) def type_of_value(v: Any) -> BMGLatticeType: """This computes the smallest BMG type that a given value fits into.""" if isinstance(v, torch.Tensor): return _type_of_matrix(v) if isinstance(v, bool): return One if v else Zero if isinstance(v, int): if v == 0: return Zero if v == 1: return One if v >= 2: return Natural return NegativeReal if isinstance(v, float): # TODO: add a range check to make sure it fits into the integer # size expected by BMG if v.is_integer(): return type_of_value(int(v)) if 0.0 <= v: if v <= 1.0: return Probability return PositiveReal return NegativeReal return Untypable def is_zero(v: Any) -> bool: return type_of_value(v) == Zero def is_one(v: Any) -> bool: return type_of_value(v) == One def lattice_to_bmg(t: BMGLatticeType) -> BMGLatticeType: # There are situations where we have a Zero or One type in hand # but those are just used for type convertibility analysis; # we sometimes actually need a valid BMG type. In those # situations, choose Boolean. assert t is not Tensor assert t is not Untypable if isinstance(t, OneHotMatrix) or isinstance(t, ZeroMatrix): return BooleanMatrix(t.rows, t.columns) return t # TODO: Move this to bmg_requirements.py # We need to be able to express requirements on inputs; # for example the input to a Bernoulli must be *exactly* a # Probability, but the input to a ToPositiveReal must have # any type smaller than or equal to PositiveReal. # # That is to say, we need to express *exact bounds* and # *upper bounds*. At this time we do not need to express # *lower bounds*; if we do, we can implement it using # the same technique as here. # # To express an upper bound, we will wrap a type object # in an upper bound wrapper; notice that the upper_bound # factory method is memoized, so we can do reference equality # to check to see if two bounds are equal. # # To express an exact bound, we'll just use the unwrapped type # object itself; the vast majority of bounds will be exact bounds # and I do not want to litter the code with calls to an "exact" # helper method. # # The fact that we have unified the types which mean "a single value # of a given type" and "a 1x1 matrix of that type" leads to an unfortunate # wrinkle: there are a small number of rare situations where we must # distinguish between the two. For example, it is bizarre to have the input # tensor([1]) to a Dirichlet, but it is legal. When we generate the BMG code # for that, we need to ensure that the corresponding constant node is created # via add_constant_pos_matrix(), not add_constant_pos(). # # Rather than change the type system so that it distinguishes more clearly # between single values and 1x1 matrices, we will just add a "force it to # be a matrix" requirement; the problem fixer can then use that to ensure # that the correct node is generated. # # We also occasionally need to express that an input edge has no restriction # on it whatsoever; we'll use a singleton object for that. # We should never create a requirement of a "fake" type. _invalid_requirement_types = {Zero, One, Untypable} # TODO: Mark this as abstract class BaseRequirement: short_name: str long_name: str def __init__(self, short_name: str, long_name: str) -> None: self.short_name = short_name self.long_name = long_name class AnyRequirement(BaseRequirement): def __init__(self) -> None: BaseRequirement.__init__(self, "any", "any") any_requirement = AnyRequirement() class AnyRealMatrix(BaseRequirement): # BMG's matrix multiplication node requires that both inputs be # a real, positive real, negative real or probability matrix. # This singleton represents that requirement. def __init__(self) -> None: BaseRequirement.__init__(self, "ARM", "any real matrix") any_real_matrix = AnyRealMatrix() class AnyPosRealMatrix(BaseRequirement): # BMG's matrix log node requires that its input be a R+ or P matrix. # This singleton represents that requirement. def __init__(self) -> None: BaseRequirement.__init__(self, "APRM", "any positive real matrix") any_pos_real_matrix = AnyPosRealMatrix() # TODO: Memoize these, remove memoization of construction functions below. class UpperBound(BaseRequirement): bound: BMGLatticeType def __init__(self, bound: BMGLatticeType) -> None: assert bound not in _invalid_requirement_types self.bound = bound BaseRequirement.__init__(self, f"<={bound.short_name}", f"<={bound.long_name}") class AlwaysMatrix(BaseRequirement): bound: BMGMatrixType def __init__(self, bound: BMGMatrixType) -> None: assert bound not in _invalid_requirement_types self.bound = bound # We won't bother to make these have a special representation # when we display requirements on edges in DOT. BaseRequirement.__init__(self, bound.short_name, bound.long_name) Requirement = Union[BMGLatticeType, BaseRequirement] @memoize def upper_bound(bound: Requirement) -> BaseRequirement: if isinstance(bound, UpperBound): return bound if isinstance(bound, AlwaysMatrix): return upper_bound(bound.bound) if isinstance(bound, BMGLatticeType): return UpperBound(bound) assert bound is any_requirement return bound @memoize def always_matrix(bound: BMGMatrixType) -> Requirement: if bound.rows != 1 or bound.columns != 1: # No need for a special annotation if it already # is a multi-dimensional matrix. return bound return AlwaysMatrix(bound) def requirement_to_type(r: Requirement) -> BMGLatticeType: if isinstance(r, UpperBound): return r.bound if isinstance(r, AlwaysMatrix): return r.bound assert isinstance(r, BMGLatticeType) return r def must_be_matrix(r: Requirement) -> bool: """Does the requirement indicate that the edge must be a matrix?""" if r is any_requirement: return False if r is any_real_matrix: return True if isinstance(r, AlwaysMatrix): return True t = requirement_to_type(r) if isinstance(t, BMGMatrixType): return t.rows != 1 or t.columns != 1 return False def is_atomic(t: BMGLatticeType) -> bool: return ( isinstance(t, BMGMatrixType) and t.rows == 1 and t.columns == 1 and not isinstance(t, SimplexMatrix) )
beanmachine-main
src/beanmachine/ppl/compiler/bmg_types.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.bmg_nodes import ( BernoulliNode, BMGNode, ExpNode, SampleNode, ToIntNode, ToPositiveRealNode, ToProbabilityNode, ToRealNode, UnaryOperatorNode, ) from beanmachine.ppl.compiler.bmg_types import is_one from beanmachine.ppl.compiler.error_report import ErrorReport from beanmachine.ppl.compiler.fix_problem import GraphFixerResult def _is_conversion(n: BMGNode) -> bool: return ( isinstance(n, ToPositiveRealNode) or isinstance(n, ToProbabilityNode) or isinstance(n, ToRealNode) or isinstance(n, ToIntNode) ) def _skip_conversions(n: BMGNode) -> BMGNode: while _is_conversion(n): assert isinstance(n, UnaryOperatorNode) n = n.operand return n # A common technique in model design is to boost the probability density # score of a particular quantity by converting it to a probability # and then observing that a coin flip of that probability comes up heads. # This should be logically equivalent to boosting by adding an EXP_PRODUCT # factor, but when we run models like that through BMG inference, we're # getting different results than when we add a factor. We suspect that is # because of loss of precision. We could avoid the precision problem by # using the logits parameter of the Bernoulli distribution, but that is not # yet supported. # # This pattern is often used to marginalize a model - to remove all # unobserved discrete variables - in order to use an inference algorithm # (like NUTS) that cannot directly handle them. # # To work around the problem while we diagnose it we can use this fixer. # It looks for graphs of the form: # # SOMETHING --> EXP --> TO_PROB --> BERNOULLI --> SAMPLE --> OBSERVE TRUE # # and converts them to # # SOMETHING --> EXP --> TO_PROB # \ # --> EXP_PRODUCT def observe_true_fixer(bmg: BMGraphBuilder) -> GraphFixerResult: made_change = False for o in bmg.all_observations(): if not is_one(o.value): continue sample = o.observed if not isinstance(sample, SampleNode): continue bern = sample.operand if not isinstance(bern, BernoulliNode): continue exp = _skip_conversions(bern.probability) if not isinstance(exp, ExpNode): continue bmg.add_exp_product(exp.operand) # remove the observation. bmg.remove_leaf(o) # If the Bernoulli and its sample are leaves (not used), we remove them # too so as not to have an unobserved discrete variable remaining in the # graph. # # In the long run, we should add a pass that removes dead code so we # don't need to do this. See T127866378 if sample.is_leaf: bmg.remove_leaf(sample) if bern.is_leaf: bmg.remove_leaf(bern) made_change = True return bmg, made_change, ErrorReport()
beanmachine-main
src/beanmachine/ppl/compiler/fix_observe_true.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json as json_ from typing import Any, Optional from beanmachine.ppl.compiler.profiler import event_list_to_report class PerformanceReport: json: Optional[str] = None def _str(self, indent: str) -> str: s = "" for k, v in vars(self).items(): if k == "json" or k == "profiler_data": continue s += indent + k + ":" if isinstance(v, PerformanceReport): s += "\n" + v._str(indent + " ") elif isinstance(v, list): s += " [" + ",".join(str(i) for i in v) + "]\n" else: s += " " + str(v) + "\n" return s def __str__(self) -> str: return self._str("") def _to_perf_rep(v: Any) -> Any: if isinstance(v, dict): p = PerformanceReport() for key, value in v.items(): value = _to_perf_rep(value) setattr(p, key, value) return p if isinstance(v, list): for i in range(len(v)): v[i] = _to_perf_rep(v[i]) return v return v def json_to_perf_report(json: str) -> PerformanceReport: d = json_.loads(json) p = _to_perf_rep(d) assert isinstance(p, PerformanceReport) p.json = json if hasattr(p, "profiler_data"): # pyre-ignore p.bmg_profiler_report = event_list_to_report(p.profiler_data) return p
beanmachine-main
src/beanmachine/ppl/compiler/performance_report.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """This module declares types used for error detection and reporting when compiling Bean Machine models to Bean Machine Graph.""" from abc import ABC from typing import List, Set from beanmachine.ppl.compiler.bmg_nodes import ( BMGNode, MatrixMultiplicationNode, Observation, SampleNode, ) from beanmachine.ppl.compiler.bmg_types import ( BMGLatticeType, BMGMatrixType, Requirement, requirement_to_type, ) from beanmachine.ppl.compiler.execution_context import FunctionCall from beanmachine.ppl.compiler.graph_labels import get_node_error_label from beanmachine.ppl.compiler.sizer import Size from beanmachine.ppl.utils.a_or_an import a_or_an, A_or_An class BMGError(ABC): pass class Violation(BMGError): node: BMGNode node_type: BMGLatticeType requirement: Requirement consumer: BMGNode edge: str node_locations: Set[FunctionCall] def __init__( self, node: BMGNode, node_type: BMGLatticeType, requirement: Requirement, consumer: BMGNode, edge: str, node_locations: Set[FunctionCall], ) -> None: self.node = node self.node_type = node_type self.requirement = requirement self.consumer = consumer self.edge = edge self.node_locations = node_locations def __str__(self) -> str: r = self.requirement t = requirement_to_type(r) assert isinstance(t, BMGLatticeType) # TODO: Fix this error message for the case where we require # a matrix but we can only get a scalar value consumer_label = get_node_error_label(self.consumer) msg = ( f"The {self.edge} of {a_or_an(consumer_label)} " + f"is required to be {a_or_an(t.long_name)} " + f"but is {a_or_an(self.node_type.long_name)}." ) if len(self.node_locations) > 0: msg += f"\nThe {consumer_label} was created in function call " msg += ", ".join(sorted(str(loc) for loc in self.node_locations)) msg += "." return msg class ImpossibleObservation(BMGError): node: Observation distribution_type: BMGLatticeType def __init__(self, node: Observation, distribution_type: BMGLatticeType) -> None: self.node = node self.distribution_type = distribution_type def __str__(self) -> str: v = self.node.value s = self.node.observed assert isinstance(s, SampleNode) d = get_node_error_label(s.operand) t = self.distribution_type.long_name msg = ( f"{A_or_An(d)} distribution is observed to have value {v} " + f"but only produces samples of type {t}." ) return msg class UnsupportedNode(BMGError): # Graph node "consumer" has a parent "node" which is not supported by BMG. # Give an error describing the offending node, the consumer which uses its # value, and the label of the edge connecting them. node: BMGNode consumer: BMGNode edge: str node_locations: Set[FunctionCall] def __init__( self, node: BMGNode, consumer: BMGNode, edge: str, node_locations: Set[FunctionCall], ) -> None: self.node = node self.consumer = consumer self.edge = edge self.node_locations = node_locations def __str__(self) -> str: # TODO: Improve wording and diagnosis. msg = f"The model uses {a_or_an(get_node_error_label(self.node))} " msg += "operation unsupported by Bean Machine Graph." if len(self.node_locations) > 0: msg += "\nThe unsupported node was created in function call " msg += ", ".join(sorted(str(loc) for loc in self.node_locations)) msg += "." else: msg += f"\nThe unsupported node is the {self.edge} " msg += f"of {a_or_an(get_node_error_label(self.consumer))}." return msg class BadMatrixMultiplication(BMGError): node: MatrixMultiplicationNode left_type: BMGMatrixType right_type: BMGMatrixType node_locations: Set[FunctionCall] def __init__( self, node: MatrixMultiplicationNode, left_type: BMGMatrixType, right_type: BMGMatrixType, node_locations: Set[FunctionCall], ) -> None: self.node = node self.left_type = left_type self.right_type = right_type self.node_locations = node_locations def __str__(self) -> str: # TODO: Improve wording and diagnosis. msg = f"The model uses {a_or_an(get_node_error_label(self.node))} " msg += "operation unsupported by Bean Machine Graph.\nThe dimensions of the" msg += f" operands are {self.left_type.rows}x{self.left_type.columns} and " msg += f"{self.right_type.rows}x{self.right_type.columns}." if len(self.node_locations) > 0: msg += "\nThe unsupported node was created in function call " msg += ", ".join(sorted(str(loc) for loc in self.node_locations)) msg += "." return msg class UntypableNode(BMGError): node: BMGNode node_locations: Set[FunctionCall] def __init__( self, node: BMGNode, node_locations: Set[FunctionCall], ) -> None: self.node = node self.node_locations = node_locations def __str__(self) -> str: msg = "INTERNAL COMPILER ERROR: Untypable node\n" msg += "(This indicates a defect in the compiler, not in the model.)\n" msg = f"The model uses {a_or_an(get_node_error_label(self.node))} node.\n" msg += "The compiler is unable to determine its type in the Bean Machine Graph" msg += " type system." if len(self.node_locations) > 0: msg += "\nThe untypable node was created in function call " msg += ", ".join(sorted(str(loc) for loc in self.node_locations)) msg += "." return msg class UnsizableNode(BMGError): node: BMGNode node_locations: Set[FunctionCall] def __init__( self, node: BMGNode, input_sizes: List[Size], node_locations: Set[FunctionCall], ) -> None: self.node = node self.node_locations = node_locations self.input_sizes = input_sizes def __str__(self) -> str: msg = ( f"The node {get_node_error_label(self.node)} cannot be sized." f"The operand sizes may be incompatible or the size may not be computable at compile time." f" The operand sizes are: {self.input_sizes}" ) if len(self.node_locations) > 0: msg += "\nThe unsizable node was created in function call " msg += ", ".join(sorted(str(loc) for loc in self.node_locations)) msg += "." return msg class ErrorReport: errors: List[BMGError] def __init__(self) -> None: self.errors = [] def add_error(self, error: BMGError) -> None: self.errors.append(error) def raise_errors(self) -> None: if len(self.errors) != 0: # TODO: Better error raise ValueError(str(self)) def any(self) -> bool: return len(self.errors) != 0 def __str__(self) -> str: return "\n".join(sorted(str(e) for e in self.errors))
beanmachine-main
src/beanmachine/ppl/compiler/error_report.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import beanmachine.ppl.compiler.bmg_nodes as bn from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.fix_problem import ( Inapplicable, NodeFixer, NodeFixerResult, ) from beanmachine.ppl.compiler.lattice_typer import LatticeTyper from beanmachine.ppl.compiler.sizer import is_scalar, Sizer def matrix_scale_fixer(bmg: BMGraphBuilder, sizer: Sizer) -> NodeFixer: """This node fixer attempts to rewrite binary multiplications that involve a matrix and a scalar into a matrix_scale node.""" def _matrix_scale_fixer(n: bn.BMGNode) -> NodeFixerResult: # A matrix multiplication is fixable (to matrix_scale) if it is # a binary multiplication with non-singleton result size # and the size of one argument is matrix and the other is scalar if not isinstance(n, bn.MultiplicationNode) or len(n.inputs) != 2: return Inapplicable # Now let's check the sizes of the inputs input_scalars = [is_scalar(sizer[i]) for i in n.inputs] # If both are scalar, then there is nothing to do if all(input_scalars): return Inapplicable # Both are scalar # If both are matrices, then there is nothing to do if all(not t for t in input_scalars): return Inapplicable # Both are matrices # The return type of the node should be matrix assert not is_scalar(sizer[n]) left, right = n.inputs if input_scalars[1]: scalar, matrix = right, left else: scalar, matrix = left, right return bmg.add_matrix_scale(scalar, matrix) return _matrix_scale_fixer def trivial_matmul_fixer(bmg: BMGraphBuilder, typer: LatticeTyper) -> NodeFixer: """This node fixer attempts to rewrite matrix multiplications of two scalars into an ordinary multiplication.""" def fixer(n: bn.BMGNode) -> NodeFixerResult: if not isinstance(n, bn.MatrixMultiplicationNode): return Inapplicable left = n.inputs[0] left_type = typer[left] if not left_type.is_singleton(): return Inapplicable right = n.inputs[1] right_type = typer[right] if not right_type.is_singleton(): return Inapplicable return bmg.add_multiplication(left, right) return fixer def nested_matrix_scale_fixer(bmg: BMGraphBuilder) -> NodeFixer: # If we have scalar nodes S1 and S2, matrix node M and matrix scale # nodes MS1 and MS2 then we can rewrite: # # S2 M S1 S2 # \ / \ / # S1 MS2 ==> * M # \ / \ / # MS1 MS # | | # X X # # We go from having two expensive matrix scales to one expensive matrix # scale and one cheap multiplication of atomic values. def _nested_matrix_scale_fixer(node: bn.BMGNode) -> NodeFixerResult: ms1 = node if not isinstance(ms1, bn.MatrixScaleNode): return Inapplicable # Input zero is always the scalar, input 1 is the matrix. s1 = ms1.inputs[0] ms2 = ms1.inputs[1] if not isinstance(ms2, bn.MatrixScaleNode): return Inapplicable s2 = ms2.inputs[0] m = ms2.inputs[1] atomic_mult = bmg.add_multiplication(s1, s2) return bmg.add_matrix_scale(atomic_mult, m) return _nested_matrix_scale_fixer
beanmachine-main
src/beanmachine/ppl/compiler/fix_matrix_scale.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Set import beanmachine.ppl.compiler.bmg_nodes as bn import beanmachine.ppl.compiler.profiler as prof import torch from beanmachine.graph import Graph from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.bmg_node_types import ( dist_type, factor_type, operator_type, ) from beanmachine.ppl.compiler.bmg_types import _size_to_rc from beanmachine.ppl.compiler.fix_problems import ( default_skip_optimizations, fix_problems, ) def _reshape(t: torch.Tensor): # Note that we take the transpose; BMG expects columns, # BM provides rows. r, c = _size_to_rc(t.size()) return t.reshape(r, c).transpose(0, 1) class GeneratedGraph: graph: Graph bmg: BMGraphBuilder node_to_graph_id: Dict[bn.BMGNode, int] query_to_query_id: Dict[bn.Query, int] def __init__(self, bmg: BMGraphBuilder) -> None: self.graph = Graph() self.bmg = bmg self.node_to_graph_id = {} self.query_to_query_id = {} def _add_observation(self, node: bn.Observation) -> None: self.graph.observe(self.node_to_graph_id[node.observed], node.value) def _add_query(self, node: bn.Query) -> None: query_id = self.graph.query(self.node_to_graph_id[node.operator]) self.query_to_query_id[node] = query_id def _inputs(self, node: bn.BMGNode) -> List[int]: if isinstance(node, bn.LKJCholeskyNode): # The LKJ dimension parameter has already been folded into the sample type return [self.node_to_graph_id[node.inputs[1]]] return [self.node_to_graph_id[x] for x in node.inputs] def _add_factor(self, node: bn.FactorNode) -> None: graph_id = self.graph.add_factor(factor_type(node), self._inputs(node)) self.node_to_graph_id[node] = graph_id def _add_distribution(self, node: bn.DistributionNode) -> None: distr_type, elt_type = dist_type(node) graph_id = self.graph.add_distribution(distr_type, elt_type, self._inputs(node)) self.node_to_graph_id[node] = graph_id def _add_operator(self, node: bn.OperatorNode) -> None: graph_id = self.graph.add_operator(operator_type(node), self._inputs(node)) self.node_to_graph_id[node] = graph_id def _add_constant(self, node: bn.ConstantNode) -> None: # noqa t = type(node) v = node.value if t is bn.PositiveRealNode: graph_id = self.graph.add_constant_pos_real(float(v)) elif t is bn.NegativeRealNode: graph_id = self.graph.add_constant_neg_real(float(v)) elif t is bn.ProbabilityNode: graph_id = self.graph.add_constant_probability(float(v)) elif t is bn.BooleanNode: graph_id = self.graph.add_constant_bool(bool(v)) elif t is bn.NaturalNode: graph_id = self.graph.add_constant_natural(int(v)) elif t is bn.RealNode: graph_id = self.graph.add_constant_real(float(v)) elif t is bn.ConstantPositiveRealMatrixNode: graph_id = self.graph.add_constant_pos_matrix(_reshape(v)) elif t is bn.ConstantRealMatrixNode: graph_id = self.graph.add_constant_real_matrix(_reshape(v)) elif t is bn.ConstantNegativeRealMatrixNode: graph_id = self.graph.add_constant_neg_matrix(_reshape(v)) elif t is bn.ConstantProbabilityMatrixNode: graph_id = self.graph.add_constant_probability_matrix(_reshape(v)) elif t is bn.ConstantSimplexMatrixNode: graph_id = self.graph.add_constant_col_simplex_matrix(_reshape(v)) elif t is bn.ConstantNaturalMatrixNode: graph_id = self.graph.add_constant_natural_matrix(_reshape(v)) elif t is bn.ConstantBooleanMatrixNode: graph_id = self.graph.add_constant_bool_matrix(_reshape(v)) elif isinstance(v, torch.Tensor) and v.numel() != 1: graph_id = self.graph.add_constant_real_matrix(_reshape(v)) else: graph_id = self.graph.add_constant_real(float(v)) self.node_to_graph_id[node] = graph_id def _generate_node(self, node: bn.BMGNode) -> None: # We add all nodes that are reachable from a query, observation or # sample to the BMG graph such that inputs are always added before # outputs. # # TODO: We could consider traversing only nodes reachable from # observations or queries. # # There are three cases to consider: # # * Observations: there is no associated value returned by the graph # when we add an observation, so there is nothing to track. # # * Query of an operator (or constant): The graph gives us the column # index in the list of samples it returns for this query. We track it in # query_to_query_id. # # * Any other node: the graph gives us the graph identifier of the new # node. We need to know this for each node that will be used as an input # later, so we track that in node_to_graph_id. if isinstance(node, bn.Observation): self._add_observation(node) elif isinstance(node, bn.Query): self._add_query(node) elif isinstance(node, bn.FactorNode): self._add_factor(node) elif isinstance(node, bn.DistributionNode): self._add_distribution(node) elif isinstance(node, bn.OperatorNode): self._add_operator(node) elif isinstance(node, bn.ConstantNode): self._add_constant(node) def _generate_graph(self, skip_optimizations: Set[str]) -> None: builder, error_report = fix_problems(self.bmg, skip_optimizations) self.bmg = builder error_report.raise_errors() self.bmg._begin(prof.build_bmg_graph) for node in self.bmg.all_ancestor_nodes(): self._generate_node(node) self.bmg._finish(prof.build_bmg_graph) def to_bmg_graph( bmg: BMGraphBuilder, skip_optimizations: Set[str] = default_skip_optimizations ) -> GeneratedGraph: gg = GeneratedGraph(bmg) gg._generate_graph(skip_optimizations) return gg
beanmachine-main
src/beanmachine/ppl/compiler/gen_bmg_graph.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional import beanmachine.ppl.compiler.bmg_nodes as bn import beanmachine.ppl.compiler.bmg_types as bt from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.fix_problem import ( Inapplicable, NodeFixer, NodeFixerResult, ) from beanmachine.ppl.compiler.lattice_typer import LatticeTyper # TODO: Move this to a utilities module def _count(bs) -> int: """Given a sequence of bools, count the Trues""" return sum(1 for b in bs if b) def negative_real_multiplication_fixer( bmg: BMGraphBuilder, typer: LatticeTyper ) -> NodeFixer: """This fixer rewrites multiplications involving negative reals into multiplications using only positive reals.""" # The BMG type system requires that all inputs to a multiplication be of # the same type and that the type is a floating point type. The output type # is then the same as the input type. There are three possibilities: # # P * P --> P # R * R --> R # R+ * R+ --> R+ # # This means that if we have MULT(R-, R+) then the requirements fixer will # convert both inputs to R, and we will lose track of the fact that # we could know in the type system that the result is a R-. # # This is particularly unfortunate when the negative real is a log probability. # If we multiply a log probability by two, say, that is logically squaring # the probability. We know that POW(P, 2) --> P, and MULT(P, P) --> P, but # the BMG multiplication operator does not know that MULT(2, R-) is R-. # # We could solve this problem by modifying the BMG type system so that we # allow mixed-type inputs to a multiplication; until we do so, we'll # work around the problem here by rewriting multiplications that involve # combinations of R-, R+ and P. def _negative_real_multiplication_fixer(node: bn.BMGNode) -> NodeFixerResult: if not isinstance(node, bn.MultiplicationNode): return Inapplicable # If no input is R- then we don't have a rewrite to do here. count = _count(typer[inpt] == bt.NegativeReal for inpt in node.inputs) if count == 0: return Inapplicable # If any input is R, we cannot prevent the output from being R. if any(typer[inpt] == bt.Real for inpt in node.inputs): return Inapplicable new_mult = bmg.add_multi_multiplication( *( bmg.add_negate(inpt) if typer[inpt] == bt.NegativeReal else inpt for inpt in node.inputs ) ) if count % 2 != 0: new_mult = bmg.add_negate(new_mult) return new_mult return _negative_real_multiplication_fixer def _input_of_1m_exp(node: bn.BMGNode) -> Optional[bn.BMGNode]: # Is node ADD(1, NEG(EXP(X))) or ADD(NEG(EXP(X)), 1)? if not isinstance(node, bn.AdditionNode) or len(node.inputs) != 2: return None left = node.inputs[0] right = node.inputs[1] negate = None if bn.is_one(left): negate = right elif bn.is_one(right): negate = left if not isinstance(negate, bn.NegateNode): return None ex = negate.inputs[0] if not isinstance(ex, bn.ExpNode): return None return ex.inputs[0] def log1mexp_fixer(bmg: BMGraphBuilder, typer: LatticeTyper) -> NodeFixer: # To take the complement of a log prob we need to convert # the log prob back to an ordinary prob, then complement it, # then convert it back to a log prob. In BMG we have a special # node just for this operation. def _is_comp_exp(node: bn.BMGNode) -> bool: return isinstance(node, bn.ComplementNode) and isinstance( node.inputs[0], bn.ExpNode ) def _log1mexp_fixer(node: bn.BMGNode) -> NodeFixerResult: if not isinstance(node, bn.LogNode): return Inapplicable comp = node.inputs[0] # There are two situations to consider here. # # Easy case: # # If we've already rewritten the 1-exp(x) into # complement(x), then we already know that x is # a probability. Just generate log1mexp(x). # # Hard case: # # We sometimes get into a situation where you and I know that # a node is a probability, but the type checker does not. # For example, if we have probability p1: # # p2 = 0.5 + p1 / 2 # # then p2 is judged to be R+ because the sum of two Ps is not necessarily a P. # # If we then go on to do: # # x = log(p2) # # then x is NOT known to be R-; rather it is known to be R. # # If later on we wish to invert this log prob: # # inv = log(1 - exp(x)) # # Then the type system says exp(x) is R+ (when it should be P). # We then say that 1 - R+ is R (should be P) and then log(R) is an # error, when it should be R-. # # If the program has log(1-exp(x) then the developer certainly # believes that x is a negative real. Even if the type system # does not, we should generate a graph as though this were a # negative real. x = None if _is_comp_exp(comp): x = comp.inputs[0].inputs[0] else: x = _input_of_1m_exp(comp) # If x is known to be a positive real, there's nothing # we can do. A later pass will give an error. # # If it is real, then force it to be negative real. if x is not None: if typer.is_pos_real(x): return Inapplicable if typer.is_real(x): x = bmg.add_to_negative_real(x) # If x is None then the node does not match log(1-exp(x)). # If x is not None, it still might be untypable. Skip doing # this rewrite until we know that x has a type. if x is None or not typer.is_neg_real(x): return Inapplicable return bmg.add_log1mexp(x) return _log1mexp_fixer def neg_neg_fixer(bmg: BMGraphBuilder) -> NodeFixer: # We can easily end up in a situation where another rewriter causes # the graph to contain X --> NEG --> NEG which could be replaced # with just X. def _neg_neg_fixer(node: bn.BMGNode) -> NodeFixerResult: if not isinstance(node, bn.NegateNode): return Inapplicable neg = node.inputs[0] if not isinstance(neg, bn.NegateNode): return Inapplicable return neg.inputs[0] return _neg_neg_fixer def nested_if_same_cond_fixer(bmg: BMGraphBuilder) -> NodeFixer: # Suppose we have node IF_1 with COND, consequence IF_2 and alternative ALT_1. # Suppose we have node IF_2 with COND consequence COND_2 and alternative ALT_2. # We can replace the IF_1 parent of CHILD with IF(COND, CONS_2, ALT_1). # Similarly, we can also do the following nested if fix, # IF(COND, CONS_1, IF(COND, CONS2, ALT2)) --> IF(COND, CONS1, ALT2). def _nested_if_same_cond_fixer(node: bn.BMGNode) -> NodeFixerResult: if not isinstance(node, bn.IfThenElseNode): return Inapplicable cons = node.consequence alt = node.alternative if not isinstance(cons, bn.IfThenElseNode) and not isinstance( alt, bn.IfThenElseNode ): return Inapplicable if isinstance(cons, bn.IfThenElseNode): if not (node.condition == cons.condition): return Inapplicable else: return bmg.add_if_then_else( node.condition, cons.consequence, node.alternative ) elif isinstance(alt, bn.IfThenElseNode): if not (node.condition == alt.condition): return Inapplicable else: return bmg.add_if_then_else( node.condition, node.consequence, alt.alternative ) return _nested_if_same_cond_fixer
beanmachine-main
src/beanmachine/ppl/compiler/fix_arithmetic.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Tools to transform Bean Machine programs to Bean Machine Graph""" import ast import inspect import sys import types from typing import Any, Callable, Dict, List, Optional, Tuple import astor from beanmachine.ppl.compiler.ast_patterns import ( assign, ast_assert, ast_domain, ast_for, ast_if, attribute, aug_assign, binary_compare, binop, call, get_value, index, keyword, load, name, slice_pattern, starred, subscript, unaryop, ) from beanmachine.ppl.compiler.internal_error import LiftedCompilationError from beanmachine.ppl.compiler.patterns import match_any from beanmachine.ppl.compiler.rules import ( AllOf as all_of, FirstMatch as first, ListEdit, Pattern, PatternRule, remove_from_list, Rule, TryOnce as once, ) from beanmachine.ppl.compiler.runtime import BMGRuntime from beanmachine.ppl.compiler.single_assignment import single_assignment # TODO: Would be helpful if we could track original source code locations. _top_down = ast_domain.top_down _bottom_up = ast_domain.bottom_up _descend_until = ast_domain.descend_until _specific_child = ast_domain.specific_child _eliminate_assertion = PatternRule(ast_assert(), lambda a: remove_from_list) _eliminate_all_assertions: Rule = _top_down(once(_eliminate_assertion)) _name_or_none = match_any(name(), None) def _parse_expr(source: str) -> ast.expr: # Takes a string containing an expression; ast.parse creates # Module(body=[Expr(value=THE_EXPRESSION)]); obtain the expression. e = ast.parse(source).body[0] assert isinstance(e, ast.Expr) return e.value _bmg = _parse_expr("bmg") def _make_bmg_call(name: str, args: List[ast.AST]) -> ast.AST: return ast.Call( func=ast.Attribute(value=_bmg, attr=name, ctx=ast.Load()), args=args, keywords=[], ) # # After rewriting into single assignment form, every call has one of these forms: # # x = f(*args, **kwargs) # x = dict() # x = dict(key = id) # x = dict(**id, **id) # # The first is the only one we need to rewrite. We rewrite it to # # x = bmg.handle_function(f, args, kwargs) _starred_id = starred(value=name()) _double_starred_id = keyword(arg=None, value=name()) _handle_call: PatternRule = PatternRule( assign(value=call(args=[_starred_id], keywords=[_double_starred_id])), lambda a: ast.Assign( a.targets, _make_bmg_call( "handle_function", [ a.value.func, a.value.args[0].value, a.value.keywords[0].value, ], ), ), ) _handle_dot = PatternRule( assign(value=attribute(ctx=load)), lambda a: ast.Assign( a.targets, _make_bmg_call("handle_dot_get", [a.value.value, ast.Str(a.value.attr)]), ), ) def _handle_unary(p: Pattern, s: str) -> PatternRule: # A rule which transforms # # x = OP y # # into # # x = bmg.handle_function(operator.OP, [y]) op = ast.Attribute( value=ast.Name("operator", ctx=ast.Load()), attr=s, ctx=ast.Load() ) return PatternRule( assign(value=unaryop(op=p)), lambda a: ast.Assign( a.targets, _make_bmg_call( "handle_function", [ op, ast.List( elts=[a.value.operand], ctx=ast.Load(), ), ], ), ), ) def _handle_binary(p: Pattern, s: str) -> PatternRule: # A rule which transforms # # x = y OP z # # into # # x = bmg.handle_function(operator.OP, [y, z]) # # p is a pattern which matches the binary operator kind. # s is the name of the operator function. op = ast.Attribute( value=ast.Name("operator", ctx=ast.Load()), attr=s, ctx=ast.Load() ) return PatternRule( assign(value=binop(op=p)), lambda a: ast.Assign( a.targets, _make_bmg_call( "handle_function", [ op, ast.List( elts=[a.value.left, a.value.right], ctx=ast.Load(), ), ], ), ), ) def _handle_aug_assign(p: Pattern, s: str) -> PatternRule: # A rule which transforms # # x OP= y # # into # # x = bmg.handle_function(operator.iOP, [x, y]) # # Note that the x on the left of the assignment must be a Store() # and the one on the right must be a Load(). op = ast.Attribute( value=ast.Name("operator", ctx=ast.Load()), attr=s, ctx=ast.Load() ) return PatternRule( aug_assign(target=name(), value=name(), op=p), lambda a: ast.Assign( [a.target], _make_bmg_call( "handle_function", [ op, ast.List( elts=[ ast.Name(id=a.target.id, ctx=ast.Load()), a.value, ], ctx=ast.Load(), ), ], ), ), ) def _handle_comparison(p: Pattern, s: str) -> PatternRule: # A rule which transforms # # x = y OP z # # into # # x = bmg.handle_function(operator.op, [y, z]) op = ast.Attribute( value=ast.Name("operator", ctx=ast.Load()), attr=s, ctx=ast.Load() ) return PatternRule( assign(value=binary_compare(p)), lambda a: ast.Assign( a.targets, _make_bmg_call( "handle_function", [ op, ast.List( elts=[a.value.left, a.value.comparators[0]], ctx=ast.Load(), ), ], ), ), ) # x = y in z --> x = bmg.handle_function(operator.contains, [z, y]) _handle_in = PatternRule( assign(value=binary_compare(ast.In)), lambda a: ast.Assign( a.targets, _make_bmg_call( "handle_function", [ ast.Attribute( value=ast.Name("operator", ctx=ast.Load()), attr="contains", ctx=ast.Load(), ), ast.List( elts=[ a.value.comparators[0], a.value.left, ], ctx=ast.Load(), ), ], ), ), ) # x = y not in z --> x = bmg.handle_not_in(y, z) _handle_not_in = PatternRule( assign(value=binary_compare(ast.NotIn)), lambda a: ast.Assign( a.targets, _make_bmg_call("handle_not_in", [a.value.left, a.value.comparators[0]]), ), ) # a = b[c] --> a = bmg.handle_index(b, c) _handle_index = PatternRule( assign(value=subscript(slice=index())), lambda a: ast.Assign( a.targets, _make_bmg_call("handle_index", [a.value.value, get_value(a.value.slice)]), ), ) _ast_none = ast.Constant(value=None, kind=None) def _or_none(a): return _ast_none if a is None else a # a = b[c:d:e] --> a = bmg.handle_slice(b, c, d, e) _handle_slice = PatternRule( assign( value=subscript( value=name(), slice=slice_pattern( lower=_name_or_none, upper=_name_or_none, step=_name_or_none ), ) ), lambda a: ast.Assign( a.targets, _make_bmg_call( "handle_slice", [ a.value.value, _or_none(a.value.slice.lower), _or_none(a.value.slice.upper), _or_none(a.value.slice.step), ], ), ), ) # a[b] = e --> bmg.handle_subscript_assign(a, b, None, None, e) _handle_subscript_assign_index = PatternRule( assign( targets=[ subscript( value=name(), slice=index(value=name()), ) ], value=name(), ), lambda a: ast.Expr( _make_bmg_call( "handle_subscript_assign", [ a.targets[0].value, get_value(a.targets[0].slice), _ast_none, _ast_none, a.value, ], ), ), ) # a[b:c:d] = e --> bmg.handle_subscript_assign(a, b, c, d, e) _handle_subscript_assign_slice = PatternRule( assign( targets=[ subscript( value=name(), slice=slice_pattern( lower=_name_or_none, upper=_name_or_none, step=_name_or_none, ), ) ], value=name(), ), lambda a: ast.Expr( _make_bmg_call( "handle_subscript_assign", [ a.targets[0].value, _or_none(a.targets[0].slice.lower), _or_none(a.targets[0].slice.upper), _or_none(a.targets[0].slice.step), a.value, ], ), ), ) _assignments_to_bmg: Rule = first( [ _handle_dot, _handle_call, # Unary operators: ~ not + - _handle_unary(ast.Invert, "inv"), _handle_unary(ast.Not, "not_"), _handle_unary(ast.UAdd, "pos"), _handle_unary(ast.USub, "neg"), # Binary operators & | / // << % * ** >> - @ # "and" and "or" are already eliminated by the single # assignment rewriter. _handle_binary(ast.Add, "add"), _handle_binary(ast.BitAnd, "and_"), _handle_binary(ast.BitOr, "or_"), _handle_binary(ast.BitXor, "xor"), _handle_binary(ast.Div, "truediv"), _handle_binary(ast.FloorDiv, "floordiv"), _handle_binary(ast.LShift, "lshift"), _handle_binary(ast.MatMult, "matmul"), _handle_binary(ast.Mod, "mod"), _handle_binary(ast.Mult, "mul"), _handle_binary(ast.Pow, "pow"), _handle_binary(ast.RShift, "rshift"), _handle_binary(ast.Sub, "sub"), # [] _handle_index, _handle_slice, # Comparison operators: == != > >= < <= # is, is not, in, not in _handle_comparison(ast.Eq, "eq"), _handle_comparison(ast.NotEq, "ne"), _handle_comparison(ast.Gt, "gt"), _handle_comparison(ast.GtE, "ge"), _handle_comparison(ast.Lt, "lt"), _handle_comparison(ast.LtE, "le"), _handle_comparison(ast.Is, "is_"), _handle_comparison(ast.IsNot, "is_not"), _handle_in, _handle_not_in, # Augmented assignments _handle_aug_assign(ast.Add, "iadd"), _handle_aug_assign(ast.Sub, "isub"), _handle_aug_assign(ast.Mult, "imul"), _handle_aug_assign(ast.Div, "itruediv"), _handle_aug_assign(ast.FloorDiv, "ifloordiv"), _handle_aug_assign(ast.Mod, "imod"), _handle_aug_assign(ast.Pow, "ipow"), _handle_aug_assign(ast.MatMult, "imatmul"), _handle_aug_assign(ast.LShift, "ilshift"), _handle_aug_assign(ast.RShift, "irshift"), _handle_aug_assign(ast.BitAnd, "iand"), _handle_aug_assign(ast.BitXor, "ixor"), _handle_aug_assign(ast.BitOr, "ior"), # Indexed assignments _handle_subscript_assign_index, _handle_subscript_assign_slice, ] ) # Rewrite # # if ID: # consequence # else: # alternative # # to # # bmg.handle_if(ID) # if ID: # ... # # Note that handle_if must not be an operand of the top-down combinator # because we would just enter an infinite loop of adding the handler # before the if-statement. _handle_if = PatternRule( ast_if(test=name()), lambda a: ListEdit( [ ast.Expr(_make_bmg_call("handle_if", [a.test])), a, ] ), ) _handle_for = PatternRule( ast_for(iter=name()), lambda a: ListEdit( [ ast.Expr(_make_bmg_call("handle_for", [a.iter])), a, ] ), ) _control_flow_to_bmg: Rule = first( [ _handle_for, _handle_if, ] ) # Note that we are NOT attempting to iterate to a fixpoint here; we do a transformation # on every statement once. _statements_to_bmg: Rule = all_of( [ _top_down(once(_assignments_to_bmg)), _bottom_up(once(_control_flow_to_bmg)), ] ) # TODO: Add classes, lambdas, and so on _supported_code_containers = {types.MethodType, types.FunctionType} def _bm_ast_to_bmg_ast(a: ast.Module) -> ast.Module: """This function takes any AST module, say: def f(): return norm() + 1.0 and transforms it to a form where every operation becomes a call into the BMG runtime: def f(): t1 = [] t2 = bmg.handle_function(norm, t1) t3 = 1.0 t4 = bmg.handle_function(operator.add, [t2, t3]) return t4 It returns the AST of the transformed code as a module. """ no_asserts = _eliminate_all_assertions(a).expect_success() assert isinstance(no_asserts, ast.Module) sa = single_assignment(no_asserts) assert isinstance(sa, ast.Module) # Now we're in single assignment form. rewrites = [_statements_to_bmg] bmg = all_of(rewrites)(sa).expect_success() assert isinstance(bmg, ast.Module) return bmg def _unindent(lines): # TODO: Handle the situation if there are tabs if len(lines) == 0: return lines num_spaces = len(lines[0]) - len(lines[0].lstrip(" ")) if num_spaces == 0: return lines spaces = lines[0][0:num_spaces] return [(line[num_spaces:] if line.startswith(spaces) else line) for line in lines] def _get_lines_ast(f: Callable) -> Tuple[str, ast.Module]: """Takes a function object, returns the code containing its definition as both text and a module. Note that if the function is a lambda then we return all the lines containing the lambda, not just the lambda.""" lines, _ = inspect.getsourcelines(f) # The code may be indented because it is a local function or class member; # either way, we cannot parse an indented function. Unindent it. source = "".join(_unindent(lines)) module = ast.parse(source) return source, module def _transform_lambda(f: Callable) -> Tuple[Optional[List[ast.stmt]], str, str]: """Takes a lambda such as lambda n: norm() + 1.0 and transforms it to a form where every operation becomes a call into the BMG runtime: def t1(n): t2 = [n] t3 = bmg.handle_function(norm, t2) t4 = 1.0 t5 = bmg.handle_function(operator.add, [t3, t4]) return t5 _lambda = t1 It returns: * the body of the transformed function, or None if the transformation failed * the name of an identifier which refers to the transformed function (_lambda in this example) * the source code of the original lambda""" # See http://xion.io/post/code/python-get-lambda-code.html and its comments # for an extended discussion of how broken python is when you need the # source code for a lambda. # # Summary: getsourceslines does exactly what it says on the tin: gives you # the source code **lines** associated with a function. If for example we # have the line "x = y(lambda: 2, lambda: 3)" and we wish to know the source # code of the first lambda, calling getsourcelines returns that string, # containing the entire line. # # How can we then tell which lambda is the one we want? There is no reliable way to # do so! The __code__ object of the function only contains the line number, not the # column offset. # # The vast majority of the time there will be a single lambda on the line, so we'll # implement for that scenario. # # TODO: Consider producing a warning or error if we cannot determine which lambda # is intended. # TODO: return None if we are unable to get the source source, module = _get_lines_ast(f) all_lambdas = [ astnode for astnode in ast.walk(module) if isinstance(astnode, ast.Lambda) ] if len(all_lambdas) != 1: return None, "", "" name = "_lambda" # Give the rewriter "_lambda = lambda: whatever", and it will rewrite that statement # into a function definition and assignment. assignment = ast.Module( body=[ ast.Assign( targets=[ast.Name(id=name, ctx=ast.Store())], value=all_lambdas[0] ), ] ) bmg = _bm_ast_to_bmg_ast(assignment) assert isinstance(bmg, ast.Module) assert len(bmg.body) == 2 return bmg.body, name, source def _transform_function(f: Callable) -> Tuple[Optional[List[ast.stmt]], str, str]: """Takes a function such as @functional def f(): return norm() + 1.0 and transforms it to a form where every operation becomes a call into the BMG runtime: def f(): t1 = [] t2 = bmg.handle_function(norm, t1) t3 = 1.0 t4 = bmg.handle_function(operator.add, [t2, t3]) return t4 t5 = [f] f = bmg.handle_function(functional, t5) It returns: * a list of statement ASTs of the transformed function, or None if the transformation failed * the name of an identifier which refers to the transformed function * the source code of the original function Notice that if the original function is decorated with the @decorator syntax then the transformed function is decorated by calling the decorator directly. """ # We need special handling for lambdas. if f.__name__ == "<lambda>": return _transform_lambda(f) # TODO: return None if we are unable to get the source source, original_ast = _get_lines_ast(f) assert len(original_ast.body) == 1 if not isinstance(original_ast.body[0], ast.FunctionDef): return None, "", "" transformed_ast: ast.Module = _bm_ast_to_bmg_ast(original_ast) assert len(transformed_ast.body) >= 1 funcdef = transformed_ast.body[0] assert isinstance(funcdef, ast.FunctionDef) return transformed_ast.body, funcdef.name, source def _create_enclosing_helper( f: Callable, transformed_body: List[ast.stmt], name: str, helper_name: str, ) -> ast.AST: """Takes: * the original function being transformed * the AST of the transformed body * the name of an identifier referring to the function in the transformed code * the name of a helper method Returns the AST of a helper method which closes the transformed body over: * the BMG runtime that accumulates the operations * the free variables of the original function""" # For example, if we are given the transformed method # # def f(): # t1 = [] # t2 = bmg.handle_function(norm, t1) # t3 = 1.0 # t4 = bmg.handle_function(operator.add, [t2, t3]) # return t4 # # Then we generate: # # def f_helper(bmg): # def f(): # t1 = [] # t2 = bmg.handle_function(norm, t1) # t3 = 1.0 # t4 = bmg.handle_function(operator.add, [t2, t3]) # return t4 # return f # # Suppose we've been asked to transform a function which is closed over some outer # variables; how does that work? For example: # # def x(offset): # def y(): # return flip() + offset # return y # # f = x(1) # # @functional def some_functional(): # return some_rv(f()) # some_rv(1) or some_rv(2) # # The *functional* never calls x; by the time we call y(), x(1) is long gone, # so we cannot rely on the body of x being transformed. Somehow we must transform # y to: # # def y(): # t1 = bmg.handle_function(flip, []) # t2 = bmg.handle_function(operator.add, [t1, offset]) # return t2 # # where offset needs to be 1. # # Python implements closures by storing the names of the outer variables in a tuple # at y.__code__.co_freevars, and the cells (references to *variables*) in a tuple # at y.__closure__. You might think that we could simply generate the function above # and the set co_freevars and __closure__ on the new function object to the appropriate # values, but unfortunately these are both read-only attributes of function objects. # # Instead what we'll do is generate: # # def y_helper(bmg, offset): # def y(): # t1 = bmg.handle_function(flip, []) # t2 = bmg.handle_function(operator.add, [t1, offset]) # return t2 # return y # # and then call y_helper with the appropriate values. That is, generate a new set of # identical outer variables with the same values. # # How can this go wrong? # # Closures are closed over *variables*, not *values*, and this is observable when # an inner function uses the *nonlocal* statement: # # def new_counter(): # counter = 0 # def inc(): # nonlocal counter # counter += 1 # print(counter) # def dec(): # nonlocal counter # counter += 1 # print(counter) # return inc, dec # i, d = new_counter() # i() # 1 # i() # 2 # d() # 1 # # In this example the nonlocal statement causes "counter" to be an alias for the # outer variable. # # If we were asked to compile functions i or d, we would pass in the *current* value # of counter, but we would not create an alias to the *existing* counter variable that # i and d are closed over. # # For now, we're going to have to live with this. It should be rare for a model # to have a callee that does this. # # TODO: Consider detecting jitted functions which use the nonlocal statement, and # producing an error or warning. # # TODO: rename bmg outer variable to something less likely # to be shadowed by an inner variable. assert type(f) in _supported_code_containers helper_parameters = [ast.arg(arg="bmg", annotation=None)] if hasattr(f, "__code__"): code = f.__code__ # pyre-ignore if hasattr(code, "co_freevars"): for outer in code.co_freevars: helper_parameters.append(ast.arg(arg=outer, annotation=None)) helper_args = ast.arguments( posonlyargs=[], args=helper_parameters, vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[], ) helper_body = ( [ast.Import(names=[ast.alias(name="operator", asname=None)])] + transformed_body + [ast.Return(value=ast.Name(id=name, ctx=ast.Load()))] ) helper_func = ast.FunctionDef( name=helper_name, args=helper_args, body=helper_body, decorator_list=[], returns=None, ) helper_ast = ast.Module(body=[helper_func], type_ignores=[]) ast.fix_missing_locations(helper_ast) return helper_ast def _get_globals(f: Callable) -> Dict: """Given a function, returns the global variable dictionary of the module containing the function.""" if f.__module__ not in sys.modules: msg = ( f"module {f.__module__} for function {f.__name__} not " + f"found in sys.modules.\n{str(sys.modules.keys())}" ) raise Exception(msg) return sys.modules[f.__module__].__dict__ def _get_helper_arguments( original_function: Callable, runtime: BMGRuntime ) -> List[Any]: # The helper function takes the runtime as its first argument and # then all the outer variables of the original function. arguments = [runtime] if hasattr(original_function, "__closure__"): closure = original_function.__closure__ # pyre-ignore if closure is not None: for cell in closure: arguments.append(cell.cell_contents) return arguments def _bm_function_to_bmg_function( original_function: Callable, runtime: BMGRuntime ) -> Callable: """Takes a function object and -- if possible -- returns a function of the same signature which calls the BMGRuntime object on each operation that was in the original function. If not possible, it returns the original function and we hope that it did not do anything involving a stochastic quantity. """ # We only know how to transform certain kinds of code containers. # If we don't have one of those, just return the function unmodified # and hope for the best. if type(original_function) not in _supported_code_containers: return original_function # First obtain the transformed function itself; the resulting AST will not # yet be closed over either the runtime or any outer variables of the # original function. transformed_body, name, original_source = _transform_function(original_function) if transformed_body is None: return original_function # Now create a helper function wrapping the transformed function; this # wrapper creates a closure. helper_name = name + "_helper" helper_ast = _create_enclosing_helper( original_function, transformed_body, name, helper_name ) # We now have an AST containing a function, foo_helper, which when called # will return the rewritten function. We need to call this helper somehow. # First thing to do is to compile it. # # Python requires a "filename" to compile code; we'll make up a fake one. filename = "<BMGJIT>" try: compiled_helper = compile(helper_ast, filename, "exec") except Exception as ex: # If something went wrong during compilation, we probably have a # bug in the AST rewriting step. Report the error here. raise LiftedCompilationError(original_source, helper_ast, ex) from ex # The AST is now compiled into bytecode but has not yet been executed. # That bytecode, when executed, will define a module containing the # helper function. # What happens if the new code accesses a global variable defined in the # *original* code's module? We need to tell Python that the new module # and the old module have the same global variables. # # We need the dictionary mapping from global variables to their values. original_globals = _get_globals(original_function) # All right, we have the bytecode of the helper function, and we have # the global state of the original function. When we execute the bytecode # we've just compiled, that will add the helper method to the global variable # dictionary. exec(compiled_helper, original_globals) # noqa # The helper function is now in the global variables. Obtain it and call it. helper_function = original_globals[helper_name] arguments = _get_helper_arguments(original_function, runtime) transformed_function = helper_function(*arguments) # For debugging purposes we'll stick some helpful information into # the function object. transformed_function.runtime = runtime transformed_function.original_function = original_function transformed_function.original_source = original_source transformed_function.transformed_ast = helper_ast transformed_function.transformed_source = astor.to_source(helper_ast) return transformed_function def _bm_function_to_bmg_ast(f: Callable, helper_name: str) -> ast.AST: # TODO: This method is only here for testing purposes. Get rid of it. transformed_body, name, _ = _transform_function(f) return _create_enclosing_helper( f, transformed_body, name, helper_name # pyre-ignore )
beanmachine-main
src/beanmachine/ppl/compiler/bm_to_bmg.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.distributions as dist class Flat(dist.Distribution): """ Flat distribution used as an improper prior. Samples 0 with shape sample shape Args shape: pass a tuple, and give a shape of Flat prior. """ has_enumerate_support = False support = dist.constraints.real has_rsample = True arg_constraints = {} def __init__(self, shape=(1,)): self.shape = shape self._event_shape = [shape] def rsample(self, sample_shape): return torch.zeros(sample_shape) def sample(self): return torch.zeros(self.shape) def log_prob(self, value): return torch.tensor(0.0)
beanmachine-main
src/beanmachine/ppl/distributions/flat.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from beanmachine.ppl.distributions.delta import Delta from beanmachine.ppl.distributions.flat import Flat from beanmachine.ppl.distributions.unit import Unit __all__ = ["Delta", "Flat", "Unit"]
beanmachine-main
src/beanmachine/ppl/distributions/__init__.py
# @lint-ignore-every LICENSELINT # Copyright Contributors to the Pyro project. # SPDX-License-Identifier: Apache-2.0 import torch from torch.distributions import constraints def broadcast_shape(*shapes, **kwargs): """ Similar to ``np.broadcast()`` but for shapes. Equivalent to ``np.broadcast(*map(np.empty, shapes)).shape``. :param tuple shapes: shapes of tensors. :param bool strict: whether to use extend-but-not-resize broadcasting. :returns: broadcasted shape :rtype: tuple :raises: ValueError """ strict = kwargs.pop("strict", False) reversed_shape = [] for shape in shapes: for i, size in enumerate(reversed(shape)): if i >= len(reversed_shape): reversed_shape.append(size) elif reversed_shape[i] == 1 and not strict: reversed_shape[i] = size elif reversed_shape[i] != size and (size != 1 or strict): raise ValueError( "shape mismatch: objects cannot be broadcast to a single shape: {}".format( " vs ".join(map(str, shapes)) ) ) return tuple(reversed(reversed_shape)) class Unit(torch.distributions.Distribution): """ Trivial nonnormalized distribution representing the unit type. The unit type has a single value with no data, i.e. ``value.numel() == 0``. This is used for :func:`pyro.factor` statements. """ arg_constraints = {"log_factor": constraints.real} support = constraints.real def __init__(self, log_factor, validate_args=None): log_factor = torch.as_tensor(log_factor) batch_shape = log_factor.shape event_shape = torch.Size((0,)) # This satisfies .numel() == 0. self.log_factor = log_factor super().__init__(batch_shape, event_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Unit, _instance) new.log_factor = self.log_factor.expand(batch_shape) super(Unit, new).__init__(batch_shape, self.event_shape, validate_args=False) new._validate_args = self._validate_args return new def sample(self, sample_shape=torch.Size()): # noqa: B008 return self.log_factor.new_empty(sample_shape) def log_prob(self, value): shape = broadcast_shape(self.batch_shape, value.shape[:-1]) return self.log_factor.expand(shape)
beanmachine-main
src/beanmachine/ppl/distributions/unit.py
# @lint-ignore-every LICENSELINT # Copyright Contributors to the Pyro project. # SPDX-License-Identifier: Apache-2.0 import numbers import torch from torch.distributions import constraints, Distribution # Helper function def sum_rightmost(value, dim): if isinstance(value, numbers.Number): return value if dim < 0: dim += value.dim() if dim == 0: return value if dim >= value.dim(): return value.sum() return value.reshape(value.shape[:-dim] + (-1,)).sum(-1) # For MAP estimation class Delta(Distribution): has_rsample = True arg_constraints = {"v": constraints.real, "log_density": constraints.real} support = constraints.real def __init__(self, v, log_density=0.0, event_dim=0, validate_args=None): if event_dim > v.dim(): raise ValueError( "Expected event_dim <= v.dim(), actual {} vs {}".format( event_dim, v.dim() ) ) batch_dim = v.dim() - event_dim batch_shape = v.shape[:batch_dim] event_shape = v.shape[batch_dim:] if isinstance(log_density, numbers.Number): log_density = v.new_empty(batch_shape).fill_(log_density) elif validate_args and log_density.shape != batch_shape: raise ValueError( "Expected log_density.shape = {}, actual {}".format( log_density.shape, batch_shape ) ) self.v = v self.log_density = log_density super(Delta, self).__init__( batch_shape, event_shape, validate_args=validate_args ) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Delta, _instance) batch_shape = torch.Size(batch_shape) new.v = self.v.expand(batch_shape + self.event_shape) new.log_density = self.log_density.expand(batch_shape) super(Delta, new).__init__(batch_shape, self.event_shape, validate_args=False) new._validate_args = self._validate_args return new def shape(self, sample_shape=torch.Size()): # noqa: B008 return sample_shape + self.batch_shape + self.event_shape @property def event_dim(self): return len(self.event_shape) def rsample(self, sample_shape=torch.Size()): # noqa: B008 shape = sample_shape + self.v.shape return self.v.expand(shape) def log_prob(self, x): v = self.v.expand(self.shape()) log_prob = ((x == v).type(x.dtype)).log() log_prob = sum_rightmost(log_prob, self.event_dim) return log_prob + self.log_density @property def mean(self): return self.v @property def variance(self): return torch.zeros_like(self.v)
beanmachine-main
src/beanmachine/ppl/distributions/delta.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
beanmachine-main
src/beanmachine/tutorials/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Module defining a data extract, transform, and load API.""" import logging from pathlib import Path from typing import Any import pandas as pd logger = logging.getLogger(__name__) class Extract: """Base class defining a data extraction API.""" def extract(self) -> Any: """Extract data.""" return self._extract() def _extract(self) -> Any: """Extract method to be written by the inheriting class.""" msg = "To be implemented by the inheriting class." raise NotImplementedError(msg) class Transform: """Base class defining a data transformation API.""" extractor = None def transform(self) -> Any: """Transform data.""" self.extracted_data = self.extractor().extract() return self._transform() def _transform(self) -> Any: """Transform method to be written by the inheriting class.""" msg = "To be implemented by the inheriting class." raise NotImplementedError(msg) class Load: """Base class defining a data load API.""" transformer = None filename = None data_dir = Path(__file__).parent.joinpath("data") def is_cached(self) -> bool: return Path(self.data_dir.joinpath(self.filename)).exists() def load(self) -> Any: """Load data.""" if self.filename is not None and self.is_cached(): return pd.read_csv(str(self.data_dir.joinpath(self.filename))) self.transformed_data = self.transformer().transform() # Cache to disk try: if not self.data_dir.exists(): self.data_dir.mkdir() self.transformed_data.to_csv( str(self.data_dir.joinpath(self.filename)), index=False, ) except Exception as e: logger.warn(f"Fail to cache data due to {e}") return self._load() def _load(self) -> Any: """Load method to be written by the inheriting class.""" msg = "To be implemented by the inheriting class." raise NotImplementedError(msg)
beanmachine-main
src/beanmachine/tutorials/utils/etl.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from beanmachine.tutorials.utils import etl, plots __all__ = ["etl", "plots"]
beanmachine-main
src/beanmachine/tutorials/utils/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Basic plotting methods used in the tutorials.""" from itertools import cycle from typing import Any, Dict, List, Optional, Tuple, Union import arviz as az import numpy as np from beanmachine.ppl import RVIdentifier from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples from bokeh.layouts import LayoutDOM from bokeh.models import ( Circle, ColumnDataSource, Div, FixedTicker, HoverTool, Legend, Line, ) from bokeh.palettes import Colorblind from bokeh.plotting import figure, gridplot from bokeh.plotting.figure import Figure def style(plot: Figure) -> None: """Style the given plot. Args: plot (Figure): Bokeh `Figure` object to be styled. Returns: None: Nothing is returned, but the figure is now styled. """ plot.set_from_json(name="outline_line_color", json="black") plot.grid.grid_line_alpha = 0.2 plot.grid.grid_line_color = "grey" plot.grid.grid_line_width = 0.2 plot.xaxis.minor_tick_line_color = "grey" plot.yaxis.minor_tick_line_color = "grey" def choose_palette(n: int) -> Tuple[str]: """Choose an appropriate colorblind palette, given ``n``. Args: n (int): The size of the number of glyphs for a plot. Returns: palette (Tuple[str]): A tuple of color strings from Bokeh's colorblind palette. """ palette_indices = [key for key in Colorblind.keys() if n <= key] if not palette_indices: palette_index = max(Colorblind.keys()) else: palette_index = min(palette_indices) palette = Colorblind[palette_index] return palette def bar_plot( plot_source: ColumnDataSource, orientation: Optional[str] = "vertical", figure_kwargs: Union[Dict[str, Any], None] = None, plot_kwargs: Union[Dict[str, Any], None] = None, tooltips: Union[List[Tuple[str, str]], None] = None, ) -> Figure: """Interactive Bokeh bar plot. Args: plot_source (ColumnDataSource): Bokeh object that contains data for the plot. orientation (Optional[str]): Optional orientation for the figure. Can be one of either: "vertical" (default) or "horizontal". figure_kwargs (Union[Dict[str, Any], None]): Figure arguments that change the style of the figure. plot_kwargs (Union[Dict[str, Any], None]): Plot arguments that change the style of the glyphs. tooltips (Union[List[Tuple[str, str]], None]): Hover tooltips. Returns: Figure: A Bokeh `Figure` object you can display in a notebook. """ if figure_kwargs is None: figure_kwargs = {} if plot_kwargs is None: plot_kwargs = {} x = np.array(plot_source.data["x"]) y = np.array(plot_source.data["y"]) tick_labels = plot_source.data["tick_labels"] padding = 0.2 range_ = [] if orientation == "vertical": y_range_start = 1 - padding y_range_end = (1 + padding) * y.max() log_bounds = [y_range_start, y_range_end] minimum = (1 - padding) * y.min() maximum = (1 + padding) * y.max() no_log_bounds = [minimum, maximum] range_ = ( log_bounds if figure_kwargs.get("y_axis_type", None) is not None else no_log_bounds ) elif orientation == "horizontal": x_range_start = 1 - padding x_range_end = (1 + padding) * x.max() log_bounds = [x_range_start, x_range_end] minimum = (1 - padding) * x.min() maximum = (1 + padding) * x.max() no_log_bounds = [minimum, maximum] range_ = ( log_bounds if figure_kwargs.get("x_axis_type", None) is not None else no_log_bounds ) # Define default plot and figure keyword arguments. fig_kwargs = { "plot_width": 700, "plot_height": 500, "y_range" if orientation == "vertical" else "x_range": range_, } if figure_kwargs: fig_kwargs.update(figure_kwargs) plt_kwargs = { "fill_color": "steelblue", "fill_alpha": 0.7, "line_color": "white", "line_width": 1, "line_alpha": 0.7, "hover_fill_color": "orange", "hover_fill_alpha": 1, "hover_line_color": "black", "hover_line_width": 2, "hover_line_alpha": 1, } if plot_kwargs: plt_kwargs.update(plot_kwargs) # Create the plot. plot = figure(**fig_kwargs) # Bind data to the plot. glyph = plot.quad( left="left", top="top", right="right", bottom="bottom", source=plot_source, **plt_kwargs, ) if tooltips is not None: tips = HoverTool(renderers=[glyph], tooltips=tooltips) plot.add_tools(tips) # Style the plot. style(plot) plot.xaxis.major_label_orientation = np.pi / 4 if orientation == "vertical": plot.xaxis.ticker = FixedTicker(ticks=list(range(len(tick_labels)))) plot.xaxis.major_label_overrides = dict(zip(range(len(x)), tick_labels)) plot.xaxis.minor_tick_line_color = None if orientation == "horizontal": plot.yaxis.ticker = FixedTicker(ticks=list(range(len(tick_labels)))) plot.yaxis.major_label_overrides = dict(zip(range(len(y)), tick_labels)) plot.yaxis.minor_tick_line_color = None return plot def histogram_plot( data, n_bins: Union[int, None] = None, figure_kwargs: Union[Dict[str, Any], None] = None, plot_kwargs: Union[Dict[str, Any], None] = None, ) -> Figure: if figure_kwargs is None: figure_kwargs = {} if plot_kwargs is None: plot_kwargs = {} if n_bins is None: n_bins = int(np.ceil(2 * np.log2(len(data)) + 1)) top, density, bins = az.stats.density_utils.histogram(data=data, bins=n_bins) bottom = np.zeros(len(top)) left = bins[:-1].tolist() right = bins[1:].tolist() label = [f"{item[0]:.3f} - {item[1]:.3f}" for item in zip(bins[:-1], bins[1:])] cds = ColumnDataSource( { "left": left, "top": top, "right": right, "bottom": bottom, "label": label, } ) fig = figure( plot_width=800, plot_height=500, y_axis_label="Counts", **figure_kwargs, ) glyph = fig.quad( left="left", top="top", right="right", bottom="bottom", source=cds, fill_color="steelblue", line_color="white", fill_alpha=0.7, hover_fill_color="orange", hover_line_color="black", hover_alpha=1.0, **plot_kwargs, ) tips = HoverTool( renderers=[glyph], tooltips=[("Counts", "@top"), ("Bin", "@label")], ) fig.add_tools(tips) style(fig) return fig def scatter_plot( # noqa flake8 C901 too complex plot_sources: Union[ColumnDataSource, List[ColumnDataSource]], figure_kwargs: Union[Dict[str, Any], None] = None, plot_kwargs: Union[Dict[str, Any], None] = None, tooltips: Union[List[List[Tuple[str, str]]], List[Tuple[str, str]], None] = None, legend_items: Union[str, List[str], None] = None, ) -> Figure: """Create a scatter plot using Bokeh. Args: plot_sources (Union[ColumnDataSource, List[ColumnDataSource]]): Bokeh ``ColumnDataSource`` object(s). figure_kwargs (Union[Dict[str, Any], None]): (optional, default is None) Figure arguments that change the style of the figure. plot_kwargs (Union[Dict[str, Any], None]): (optional, default is None) Plot arguments that change the style of the glyphs of the figure. tooltips (Union[List[List[Tuple[str, str]]], List[Tuple[str, str]], None]): (optional, default is None) Hover tooltips. legend_items (Union[str, List[str], None]): (optional, default is None) Labels for the scatter items. Returns: plot (Figure): Bokeh figure you can visualize in a notebook. """ if figure_kwargs is None: figure_kwargs = {} if plot_kwargs is None: plot_kwargs = {} if not isinstance(plot_sources, list): plot_sources = [plot_sources] if not isinstance(tooltips[0], list): if isinstance(tooltips[0], tuple): tooltips = [tooltips] if legend_items: if not isinstance(legend_items, list): legend_items = [legend_items] palette = choose_palette(len(plot_sources)) colors = cycle(palette) # Define default plot and figure keyword arguments. fig_kwargs = { "plot_width": 700, "plot_height": 500, } if figure_kwargs: fig_kwargs.update(figure_kwargs) plt_kwargs = { "size": 10, "fill_alpha": 0.7, "line_color": "white", "line_width": 1, "line_alpha": 0.7, "hover_fill_color": "orange", "hover_fill_alpha": 1, "hover_line_color": "black", "hover_line_width": 2, "hover_line_alpha": 1, } if plot_kwargs: plt_kwargs.update(plot_kwargs) # Create the plot. plot = figure(**fig_kwargs) for i, plot_source in enumerate(plot_sources): color = next(colors) if plot_kwargs: if "fill_color" in plot_kwargs: color = plot_kwargs["fill_color"] plot_kwargs.update({"fill_color": color}) if legend_items: glyph = plot.circle( x="x", y="y", source=plot_source, legend_label=legend_items[i], **plt_kwargs, ) else: glyph = plot.circle( x="x", y="y", source=plot_source, **plt_kwargs, ) if tooltips is not None: tips = HoverTool(renderers=[glyph], tooltips=tooltips[i]) plot.add_tools(tips) # Style the plot. style(plot) return plot def line_plot( plot_sources: Union[ColumnDataSource, List[ColumnDataSource]], labels: Union[List[str], None] = None, figure_kwargs: Union[Dict[str, Any], None] = None, tooltips: Union[List[List[Tuple[str, str]]], None] = None, plot_kwargs: Union[Dict[str, Any], None] = None, ) -> Figure: """Create a line plot using Bokeh. Args: plot_sources (Union[ColumnDataSource, List[ColumnDataSource]]): List of Bokeh `ColumnDataSource` objects or a single `ColumnDataSource`. labels (Union[List[str], None]): Labels for the legend. If none are given, then no legend will be generated. figure_kwargs (Union[Dict[str, Any], None]): Figure arguments that change the style of the figure. tooltips (Union[List[List[Tuple[str, str]]], None]): Hover tooltips. plot_kwargs (Union[Dict[str, Any], None]): Plot arguments that change the style of the glyphs. Returns: Figure: """ if not isinstance(plot_sources, list): plot_sources = [plot_sources] if figure_kwargs is None: figure_kwargs = {} if plot_kwargs is None: plot_kwargs = {} palette = choose_palette(len(plot_sources)) colors = cycle(palette) # Define default plot and figure keyword arguments. fig_kwargs = { "plot_width": 700, "plot_height": 500, } if figure_kwargs: fig_kwargs.update(figure_kwargs) plot = figure(**fig_kwargs) for i, plot_source in enumerate(plot_sources): color = next(colors) if labels is not None: plot_kwargs.update({"legend_label": labels[i]}) locals()[f"glyph_{i}"] = plot.line( x="x", y="y", source=plot_source, color=color, **plot_kwargs, ) if tooltips: plot.add_tools( HoverTool( renderers=[locals()[f"glyph_{i}"]], tooltips=tooltips[i], ) ) # Style the plot. style(plot) return plot def plot_marginal( queries: List[RVIdentifier], samples: MonteCarloSamples, true_values: Union[List[float], List[None], None] = None, n_bins: Union[int, None] = None, bandwidth: Union[float, str] = "experimental", figure_kwargs: Union[Dict[str, Any], None] = None, joint_plot_title: Union[str, None] = None, ) -> LayoutDOM: """Marginal plot using Bokeh. - If one RV is given, then a single marginal plot will be shown. - If two RVs are given, then a joint plot is shown along with marginal densities. Args: queries (List[RVIdentifier]): Bean Machine `RVIdentifier` objects. samples (MonteCarloSamples): Bean Machine `MonteCarloSamples` object use to query data for the given queries. true_values (Union[List[float], List[None], None]): If you are creating simulated data then you can plot the true values if they are supplied. n_bins (Union[int, None]): The number of bins to use when generating the marginal plots. If no value is supplied, then twice the Sturges value will be used. See https://en.wikipedia.org/wiki/Histogram#Sturges'_formula or https://doi.org/10.1080%2F01621459.1926.10502161. bandwidth (Union[float, str]): Bandwidth to use for calculating the KDE, the default is `experimental`. figure_kwargs (Union[Dict[str, Any], None]): Figure keyword arguments supplied to the central figure. joint_plot_title (Union[str, None]): The title to display if two query objects has been given. Returns: LayoutDOM: An interactive Bokeh object that can be displayed in a notebook. """ if len(queries) > 2: raise NotImplementedError("Can only handle two random variables at this time.") if true_values is None: true_values = [None] * len(queries) if figure_kwargs is None: figure_kwargs = {} layout = LayoutDOM() # Create an empty figure plot_width = 500 plot_height = 500 min_border = 0 central_fig = figure( plot_width=plot_width, plot_height=plot_height, outline_line_color="black", min_border=min_border, title="", x_axis_label="", y_axis_label="", **figure_kwargs, ) central_fig.add_layout(Legend(), "below") central_fig.grid.visible = False # Prepare data for the figure(s) figure_data = {} scaled_density = np.empty(0) for query in queries: data = samples[query] kde = az.stats.density_utils.kde(data.flatten().numpy(), bw=bandwidth) support = kde[0] density = kde[1] normalized_density = density / density.max() n_bins = ( int(np.ceil(2 * np.log2(data.shape[1])) + 1) if n_bins is None else n_bins ) histogram, _, bins = az.stats.density_utils.histogram(data, n_bins) scaled_density = normalized_density * histogram.max() density_cds = ColumnDataSource( { "x": support.tolist(), "scaled": scaled_density.tolist(), "normalized": normalized_density.tolist(), } ) labels = [f"{item[0]:.3f}–{item[1]:.3f}" for item in zip(bins[:-1], bins[1:])] histogram_cds = ColumnDataSource( { "left": bins[:-1].tolist(), "top": histogram.tolist(), "right": bins[1:].tolist(), "bottom": [0] * len(histogram), "label": labels, } ) figure_data[str(query).replace("()", "")] = { "histogram": histogram_cds, "density": density_cds, } if len(queries) == 1: query = queries[0] # Label the figure central_fig.xaxis.axis_label = str(query).replace("()", "") central_fig.yaxis.visible = False central_fig.set_from_json(name="outline_line_color", json="black") # Bind data to the figure histogram_glyph = central_fig.quad( left="left", top="top", right="right", bottom="bottom", source=figure_data[str(query).replace("()", "")]["histogram"], fill_color="steelblue", line_color="white", fill_alpha=0.6, hover_fill_color="orange", hover_line_color="black", hover_alpha=1, legend_label="Histogram", ) density_glyph = central_fig.line( x="x", y="scaled", source=figure_data[str(query).replace("()", "")]["density"], line_color="brown", line_width=2, line_alpha=0.6, hover_line_color="magenta", hover_line_alpha=1, legend_label="Density", ) # Add tooltips to the figure histogram_tips = HoverTool( renderers=[histogram_glyph], tooltips=[(f"{str(query).replace('()', '')}", "@label")], ) central_fig.add_tools(histogram_tips) density_tips = HoverTool( renderers=[density_glyph], tooltips=[(f"{str(query).replace('()', '')}", "@x{0.000}")], ) central_fig.add_tools(density_tips) true_value = true_values[0] true_cds = ColumnDataSource( { "x": [true_value] * 100, "y": np.linspace(0, scaled_density.max(), 100).tolist(), } ) true_glyph = central_fig.line( x="x", y="y", source=true_cds, line_color="magenta", line_width=2, line_alpha=1, legend_label="True value", ) true_tips = HoverTool( renderers=[true_glyph], tooltips=[("True value", "@x{0.000}")], ) central_fig.add_tools(true_tips) mean_cds = ColumnDataSource( { "x": [samples[query].mean().item()] * 100, "y": np.linspace(0, scaled_density.max(), 100).tolist(), } ) mean_glyph = central_fig.line( x="x", y="y", source=mean_cds, line_color="black", line_width=2, line_alpha=1, legend_label="Posterior mean value", ) mean_tips = HoverTool( renderers=[mean_glyph], tooltips=[("Posterior mean value", "@x{0.000}")], ) central_fig.add_tools(mean_tips) layout = gridplot([[central_fig]]) if len(queries) == 2: title_div = None if joint_plot_title is not None: title_div = Div(text=f"<h3>{joint_plot_title}</h3>") # Prepare the 2D data v0 = samples[queries[0]].flatten().numpy() v1 = samples[queries[1]].flatten().numpy() density, xmin, xmax, ymin, ymax = az.stats.density_utils._fast_kde_2d(v0, v1) simulated_mean_cds = ColumnDataSource( { # Simulated mean f"{str(queries[1]).replace('()', '')}_x": np.linspace( xmin, xmax, 100, ).tolist(), f"{str(queries[1]).replace('()', '')}_y": [v1.mean()] * 100, f"{str(queries[0]).replace('()', '')}_x": [v0.mean()] * 100, f"{str(queries[0]).replace('()', '')}_y": np.linspace( ymin, ymax, 100, ).tolist(), # True f"{str(queries[1]).replace('()', '')}_true_x": np.linspace( xmin, xmax, 100, ).tolist(), f"{str(queries[1]).replace('()', '')}_true_y": [true_values[1]] * 100, f"{str(queries[0]).replace('()', '')}_true_x": [true_values[0]] * 100, f"{str(queries[0]).replace('()', '')}_true_y": np.linspace( ymin, ymax, 100, ).tolist(), } ) # Style the central figure central_fig.x_range.start = xmin central_fig.x_range.end = xmax central_fig.x_range.max_interval = xmax central_fig.y_range.start = ymin central_fig.y_range.end = ymax central_fig.y_range.max_interval = ymax central_fig.set_from_json(name="match_aspect", json=True) central_fig.set_from_json(name="background_fill_color", json="#440154") central_fig.background_fill_alpha = 0.5 central_fig.xaxis.axis_label = f"{str(queries[0]).replace('()', '')}" central_fig.yaxis.axis_label = f"{str(queries[1]).replace('()', '')}" # Create empty figures v0_fig = figure( plot_width=plot_width, plot_height=100, outline_line_color=None, x_range=central_fig.x_range, x_axis_location=None, min_border=min_border, ) v0_fig.yaxis.visible = False v0_fig.xaxis.visible = False v0_fig.grid.visible = False v1_fig = figure( plot_width=100, plot_height=plot_height, outline_line_color=None, y_range=central_fig.y_range, y_axis_location=None, min_border=min_border, ) v1_fig.yaxis.visible = False v1_fig.xaxis.visible = False v1_fig.grid.visible = False # Bind density data to the marginal plots v0_density_glyph = v0_fig.line( x="x", y="normalized", source=figure_data[str(queries[0]).replace("()", "")]["density"], line_color="steelblue", line_width=2, line_alpha=1, ) v0_density_tips = HoverTool( renderers=[v0_density_glyph], tooltips=[(f"{str(queries[0]).replace('()', '')}", "@x{0.000}")], ) v0_fig.add_tools(v0_density_tips) v0_mean_cds = ColumnDataSource( {"x": [v0.mean()] * 100, "y": np.linspace(0, 1, 100).tolist()} ) v0_mean_glyph = v0_fig.line( x="x", y="y", source=v0_mean_cds, line_color="magenta", line_width=2, alpha=1, ) v0_mean_tips = HoverTool( renderers=[v0_mean_glyph], tooltips=[(f"{str(queries[0]).replace('()', '')} mean", "@x{0.000}")], ) v0_fig.add_tools(v0_mean_tips) v0_true_cds = ColumnDataSource( {"x": [true_values[0]] * 100, "y": np.linspace(0, 1, 100).tolist()} ) v0_true_glyph = v0_fig.line( x="x", y="y", source=v0_true_cds, line_color="steelblue", line_width=2, alpha=1, ) v0_true_tips = HoverTool( renderers=[v0_true_glyph], tooltips=[(f"{str(queries[0]).replace('()', '')} true", "@x{0.000}")], ) v0_fig.add_tools(v0_true_tips) v1_true_cds = ColumnDataSource( {"y": [true_values[1]] * 100, "x": np.linspace(0, 1, 100).tolist()} ) v1_true_glyph = v1_fig.line( x="x", y="y", source=v1_true_cds, line_color="steelblue", line_width=2, alpha=1, ) v1_true_tips = HoverTool( renderers=[v1_true_glyph], tooltips=[(f"{str(queries[1]).replace('()', '')} true", "@y{0.000}")], ) v1_fig.add_tools(v1_true_tips) v1_density_glyph = v1_fig.line( x="normalized", y="x", source=figure_data[str(queries[1]).replace("()", "")]["density"], line_color="steelblue", line_width=2, line_alpha=1, ) v1_density_tips = HoverTool( renderers=[v1_density_glyph], tooltips=[(f"{str(queries[1]).replace('()', '')}", "@normalized{0.000}")], ) v1_fig.add_tools(v1_density_tips) v1_mean_cds = ColumnDataSource( {"x": [v1.mean()] * 100, "y": np.linspace(0, 1, 100).tolist()} ) v1_mean_glyph = v1_fig.line( x="y", y="x", source=v1_mean_cds, line_color="magenta", line_width=2, alpha=1, ) v1_mean_tips = HoverTool( renderers=[v1_mean_glyph], tooltips=[(f"{str(queries[1]).replace('()', '')} mean", "@x{0.000}")], ) v1_fig.add_tools(v1_mean_tips) central_fig.image( image=[density.T], x=xmin, y=ymin, dw=xmax - xmin, dh=ymax - ymin, palette="Viridis256", ) v0_mean_joint_glyph = central_fig.line( x=f"{str(queries[0]).replace('()', '')}_x", y=f"{str(queries[0]).replace('()', '')}_y", source=simulated_mean_cds, line_color="magenta", line_width=2, line_alpha=0.5, legend_label="Posterior marginal mean", ) v0_mean_joint_tips = HoverTool( renderers=[v0_mean_joint_glyph], tooltips=[ ( f"{str(queries[0]).replace('()', '')} mean", f"@{str(queries[0]).replace('()', '')}_x", ) ], ) central_fig.add_tools(v0_mean_joint_tips) v0_true_joint_glyph = central_fig.line( x=f"{str(queries[0]).replace('()', '')}_true_x", y=f"{str(queries[0]).replace('()', '')}_true_y", source=simulated_mean_cds, line_color="steelblue", line_width=2, line_alpha=0.5, legend_label="True value", ) v0_true_joint_tips = HoverTool( renderers=[v0_true_joint_glyph], tooltips=[ ( f"{str(queries[0]).replace('()', '')} true", f"@{str(queries[0]).replace('()', '')}_true_x", ) ], ) central_fig.add_tools(v0_true_joint_tips) v1_true_joint_glyph = central_fig.line( x=f"{str(queries[1]).replace('()', '')}_true_x", y=f"{str(queries[1]).replace('()', '')}_true_y", source=simulated_mean_cds, line_color="steelblue", line_width=2, line_alpha=0.5, legend_label="True value", ) v1_true_joint_tips = HoverTool( renderers=[v1_true_joint_glyph], tooltips=[ ( f"{str(queries[1]).replace('()', '')} true", f"@{str(queries[1]).replace('()', '')}_true_x", ) ], ) central_fig.add_tools(v1_true_joint_tips) v1_mean_joint_glyph = central_fig.line( x=f"{str(queries[1]).replace('()', '')}_x", y=f"{str(queries[1]).replace('()', '')}_y", source=simulated_mean_cds, line_color="magenta", line_width=2, line_alpha=0.5, legend_label="Posterior marginal mean", ) v1_mean_joint_tips = HoverTool( renderers=[v1_mean_joint_glyph], tooltips=[ ( f"{str(queries[1]).replace('()', '')} mean", f"@{str(queries[1]).replace('()', '')}_y", ) ], ) central_fig.add_tools(v1_mean_joint_tips) mean_cds = ColumnDataSource({"x": [v0.mean()], "y": [v1.mean()]}) mean_glyph = central_fig.circle( x="x", y="y", source=mean_cds, size=10, fill_color="magenta", line_color="white", fill_alpha=1, legend_label="Posterior marginal mean", hover_fill_color="orange", hover_line_color="black", hover_alpha=1, ) mean_tips = HoverTool( renderers=[mean_glyph], tooltips=[ (f"{str(queries[1]).replace('()', '')} mean", "@y{0.000}"), (f"{str(queries[0]).replace('()', '')} mean", "@x{0.000}"), ], ) central_fig.add_tools(mean_tips) true_cds = ColumnDataSource({"x": [true_values[0]], "y": [true_values[1]]}) true_glyph = central_fig.circle( x="x", y="y", source=true_cds, size=10, fill_color="steelblue", line_color="white", fill_alpha=1, legend_label="True value", hover_fill_color="orange", hover_line_color="black", hover_alpha=1, ) true_tips = HoverTool( renderers=[true_glyph], tooltips=[ (f"{str(queries[1]).replace('()', '')} true", "@y{0.000}"), (f"{str(queries[0]).replace('()', '')} true", "@x{0.000}"), ], ) central_fig.add_tools(true_tips) if title_div is not None: layout = gridplot( [[title_div, None], [v0_fig, None], [central_fig, v1_fig]] ) else: layout = gridplot([[v0_fig, None], [central_fig, v1_fig]]) return layout def marginal_2d( x, y, x_label: str, y_label: str, title: str, true_x: Union[float, None] = None, true_y: Union[float, None] = None, figure_kwargs: Union[Dict[str, Any], None] = None, bandwidth: Union[float, str] = "experimental", n_bins: Union[int, None] = None, ): # NOTE: This is duplicated from the plot_marginal, but it uses non Bean Machine # objects to generate the plot. This replication was done for quickly getting # a figure to work for the tutorials. if figure_kwargs is None: figure_kwargs = {} # Prepare the 1D data for the figure each feature. figure_data = {} # x-axis x = np.array(x) kde_x = az.stats.density_utils.kde(x, bw=bandwidth) support_x = kde_x[0] density_x = kde_x[1] normalized_density_x = density_x / density_x.max() n_bins_x = int(np.ceil(2 * np.log2(len(x)) + 1)) if n_bins is None else n_bins histogram_x, _, bins_x = az.stats.density_utils.histogram(x, n_bins_x) scaled_density_x = normalized_density_x * histogram_x.max() labels_x = [f"{item[0]:.3f}–{item[1]:.3f}" for item in zip(bins_x[:-1], bins_x[1:])] density_cds_x = ColumnDataSource( { "x": support_x.tolist(), "scaled": scaled_density_x.tolist(), "normalized": normalized_density_x.tolist(), } ) histogram_cds_x = ColumnDataSource( { "left": bins_x[:-1].tolist(), "top": histogram_x.tolist(), "right": bins_x[1:].tolist(), "bottom": [0] * len(histogram_x), "label": labels_x, } ) figure_data[x_label] = { "histogram": histogram_cds_x, "density": density_cds_x, } # y-axis y = np.array(y) kde_y = az.stats.density_utils.kde(y, bw=bandwidth) support_y = kde_y[0] density_y = kde_y[1] normalized_density_y = density_y / density_y.max() n_bins_y = int(np.ceil(2 * np.log2(len(y)) + 1)) if n_bins is None else n_bins histogram_y, _, bins_y = az.stats.density_utils.histogram(y, n_bins_y) scaled_density_y = normalized_density_y * histogram_y.max() labels_y = [f"{item[0]:.3f}–{item[1]:.3f}" for item in zip(bins_y[:-1], bins_y[1:])] density_cds_y = ColumnDataSource( { "x": support_y.tolist(), "scaled": scaled_density_y.tolist(), "normalized": normalized_density_y.tolist(), } ) histogram_cds_y = ColumnDataSource( { "left": bins_y[:-1].tolist(), "top": histogram_y.tolist(), "right": bins_y[1:].tolist(), "bottom": [0] * len(histogram_y), "label": labels_y, } ) figure_data[y_label] = { "histogram": histogram_cds_y, "density": density_cds_y, } # Prepare the 2D data for the figure. density_2d, xmin, xmax, ymin, ymax = az.stats.density_utils._fast_kde_2d(x, y) simulated_mean_cds = ColumnDataSource( { # Simulated mean "yaxis_x": np.linspace(xmin, xmax, 100).tolist(), "yaxis_y": [y.mean()] * 100, "xaxis_x": [x.mean()] * 100, "xaxis_y": np.linspace(ymin, ymax, 100).tolist(), # True "yaxis_true_x": np.linspace(xmin, xmax, 100).tolist(), "yaxis_true_y": [true_y] * 100, "xaxis_true_x": [true_x] * 100, "xaxis_true_y": np.linspace(ymin, ymax, 100).tolist(), } ) # Figure title title_div = None if title is not None: title_div = Div(text=f"<h3>{title}</h3>") # Create central figure. plot_width = 500 plot_height = 500 min_border = 0 central_fig = figure( plot_width=plot_width, plot_height=plot_height, outline_line_color="black", min_border=min_border, title="", x_axis_label=x_label, y_axis_label=y_label, **figure_kwargs, ) central_fig.add_layout(Legend(), "below") central_fig.grid.visible = False # Style the central figure central_fig.x_range.start = xmin central_fig.x_range.end = xmax central_fig.x_range.max_interval = xmax central_fig.y_range.start = ymin central_fig.y_range.end = ymax central_fig.y_range.max_interval = ymax central_fig.set_from_json(name="match_aspect", json=True) central_fig.set_from_json(name="background_fill_color", json="#440154") central_fig.background_fill_alpha = 0.5 central_fig.xaxis.axis_label = x_label central_fig.yaxis.axis_label = y_label # Create the marginal figures. # x-axis x_fig = figure( plot_width=plot_width, plot_height=100, outline_line_color=None, x_range=central_fig.x_range, x_axis_location=None, min_border=min_border, ) x_fig.yaxis.visible = False x_fig.xaxis.visible = False x_fig.grid.visible = False # y-axis y_fig = figure( plot_width=100, plot_height=plot_height, outline_line_color=None, y_range=central_fig.y_range, y_axis_location=None, min_border=min_border, ) y_fig.yaxis.visible = False y_fig.xaxis.visible = False y_fig.grid.visible = False # Bind density data to the marginal plots # x-axis x_density_glyph = x_fig.line( x="x", y="normalized", source=figure_data[x_label]["density"], line_color="steelblue", line_width=2, line_alpha=1, ) x_density_tips = HoverTool( renderers=[x_density_glyph], tooltips=[(x_label, "@x{0.000}")], ) x_fig.add_tools(x_density_tips) x_mean_cds = ColumnDataSource( {"x": [x.mean()] * 100, "y": np.linspace(0, 1, 100).tolist()} ) x_mean_glyph = x_fig.line( x="x", y="y", source=x_mean_cds, line_color="magenta", line_width=2, alpha=1, ) x_mean_tips = HoverTool( renderers=[x_mean_glyph], tooltips=[(f"{x_label} mean", "@x{0.000}")], ) x_fig.add_tools(x_mean_tips) if true_x is not None: x_true_cds = ColumnDataSource( {"x": [true_x] * 100, "y": np.linspace(0, 1, 100).tolist()} ) x_true_glyph = x_fig.line( x="x", y="y", source=x_true_cds, line_color="steelblue", line_width=2, alpha=1, ) x_true_tips = HoverTool( renderers=[x_true_glyph], tooltips=[(f"{x_label} true", "@x{0.000}")], ) x_fig.add_tools(x_true_tips) # y-axis if true_y is not None: y_true_cds = ColumnDataSource( {"y": [true_y] * 100, "x": np.linspace(0, 1, 100).tolist()} ) y_true_glyph = y_fig.line( x="x", y="y", source=y_true_cds, line_color="steelblue", line_width=2, alpha=1, ) y_true_tips = HoverTool( renderers=[y_true_glyph], tooltips=[(f"{y_label} true", "@y{0.000}")], ) y_fig.add_tools(y_true_tips) y_density_glyph = y_fig.line( x="normalized", y="x", source=figure_data[y_label]["density"], line_color="steelblue", line_width=2, line_alpha=1, ) y_density_tips = HoverTool( renderers=[y_density_glyph], tooltips=[(y_label, "@normalized{0.000}")], ) y_fig.add_tools(y_density_tips) y_mean_cds = ColumnDataSource( {"x": [y.mean()] * 100, "y": np.linspace(0, 1, 100).tolist()} ) y_mean_glyph = y_fig.line( x="y", y="x", source=y_mean_cds, line_color="magenta", line_width=2, alpha=1, ) y_mean_tips = HoverTool( renderers=[y_mean_glyph], tooltips=[(f"{y_label} mean", "@x{0.000}")], ) y_fig.add_tools(y_mean_tips) # joint figure central_fig.image( image=[density_2d.T], x=xmin, y=ymin, dw=xmax - xmin, dh=ymax - ymin, palette="Viridis256", ) x_mean_joint_glyph = central_fig.line( x="xaxis_x", y="xaxis_y", source=simulated_mean_cds, line_color="magenta", line_width=2, line_alpha=0.5, legend_label="Posterior marginal mean", ) x_mean_joint_tips = HoverTool( renderers=[x_mean_joint_glyph], tooltips=[(f"{x_label} mean", "@xaxis_x")], ) central_fig.add_tools(x_mean_joint_tips) if true_x is not None: x_true_joint_glyph = central_fig.line( x="xaxis_true_x", y="xaxis_true_y", source=simulated_mean_cds, line_color="steelblue", line_width=2, line_alpha=0.5, legend_label="True value", ) x_true_joint_tips = HoverTool( renderers=[x_true_joint_glyph], tooltips=[(f"{x_label} true", "@xaxis_true_x")], ) central_fig.add_tools(x_true_joint_tips) if true_y is not None: y_true_joint_glyph = central_fig.line( x="yaxis_true_x", y="yaxis_true_y", source=simulated_mean_cds, line_color="steelblue", line_width=2, line_alpha=0.5, legend_label="True value", ) y_true_joint_tips = HoverTool( renderers=[y_true_joint_glyph], tooltips=[(f"{y_label} true", "@yaxis_true_y")], ) central_fig.add_tools(y_true_joint_tips) y_mean_joint_glyph = central_fig.line( x="yaxis_x", y="yaxis_y", source=simulated_mean_cds, line_color="magenta", line_width=2, line_alpha=0.5, legend_label="Posterior marginal mean", ) y_mean_joint_tips = HoverTool( renderers=[y_mean_joint_glyph], tooltips=[(f"{y_label} mean", "@yaxis_y")], ) central_fig.add_tools(y_mean_joint_tips) mean_cds = ColumnDataSource({"x": [x.mean()], "y": [y.mean()]}) mean_glyph = central_fig.circle( x="x", y="y", source=mean_cds, size=10, fill_color="magenta", line_color="white", fill_alpha=1, legend_label="Posterior marginal mean", hover_fill_color="orange", hover_line_color="black", hover_alpha=1, ) mean_tips = HoverTool( renderers=[mean_glyph], tooltips=[ (f"{y_label} mean", "@y{0.000}"), (f"{x_label} mean", "@x{0.000}"), ], ) central_fig.add_tools(mean_tips) if true_x is not None and true_y is not None: true_cds = ColumnDataSource({"x": [true_x], "y": [true_y]}) true_glyph = central_fig.circle( x="x", y="y", source=true_cds, size=10, fill_color="steelblue", line_color="white", fill_alpha=1, legend_label="True value", hover_fill_color="orange", hover_line_color="black", hover_alpha=1, ) true_tips = HoverTool( renderers=[true_glyph], tooltips=[ (f"{y_label} true", "@y{0.000}"), (f"{x_label} true", "@x{0.000}"), ], ) central_fig.add_tools(true_tips) if title_div is not None: layout = gridplot([[title_div, None], [x_fig, None], [central_fig, y_fig]]) else: layout = gridplot([[x_fig, None], [central_fig, y_fig]]) return layout def plot_diagnostics( samples: MonteCarloSamples, ordering: Union[None, List[str]] = None, plot_posterior: bool = False, ) -> List[Figure]: """ Plot model diagnostics. :param samples: Bean Machine inference object. :type samples: MonteCarloSamples :param ordering: Define an ordering for how the plots are displayed. :type ordering: List[str] :return: Bokeh figures with visual diagnostics. :rtype: List[Figure] """ COLORS = ["#2a2eec", "#fa7c17", "#328c06", "#c10c90"] # Prepare the data for the figure. samples_xr = samples.to_xarray() data = {str(key): value.values for key, value in samples_xr.data_vars.items()} if ordering is not None: diagnostics_data = {} for key in ordering: key = str(key) diagnostics_data[key] = data[key] else: diagnostics_data = data # Cycle through each query and create the diagnostics plots using arviz. diagnostics_plots = [] for key, value in diagnostics_data.items(): posterior_plot = None if plot_posterior: posterior_plot = az.plot_posterior({key: value}, show=False)[0][0] posterior_plot.plot_width = 300 posterior_plot.plot_height = 300 posterior_plot.grid.grid_line_alpha = 0.2 posterior_plot.grid.grid_line_color = "gray" posterior_plot.grid.grid_line_width = 0.3 posterior_plot.yaxis.minor_tick_line_color = None posterior_plot.outline_line_color = "black" tr_plot = az.plot_trace(az.from_dict({key: value}), show=False)[0][1] line_index = 0 circle_index = 0 for renderer in tr_plot.renderers: glyph = renderer._property_values["glyph"] if isinstance(glyph, Line): glyph.line_color = COLORS[line_index] glyph.line_dash = "solid" glyph.line_width = 2 glyph.line_alpha = 0.6 line_index += 1 if isinstance(renderer._property_values["glyph"], Circle): glyph.fill_color = COLORS[circle_index] glyph.line_color = COLORS[circle_index] glyph.fill_alpha = 0.6 circle_index += 1 tr_plot.plot_width = 300 tr_plot.plot_height = 300 tr_plot.grid.grid_line_alpha = 0.2 tr_plot.grid.grid_line_color = "gray" tr_plot.grid.grid_line_width = 0.3 tr_plot.yaxis.minor_tick_line_color = None tr_plot.outline_line_color = "black" tr_plot.title.text = f"{tr_plot.title.text} trace plot" ac_plot = az.plot_autocorr({key: value}, show=False)[0].tolist() for i, p in enumerate(ac_plot): for renderer in p.renderers: glyph = renderer._property_values["glyph"] glyph.line_color = COLORS[i] p.plot_width = 300 p.plot_height = 300 p.grid.grid_line_alpha = 0.2 p.grid.grid_line_color = "gray" p.grid.grid_line_width = 0.3 p.yaxis.minor_tick_line_color = None p.outline_line_color = "black" p.title.text = f"{p.title.text.split()[0]}\nautocorrelation chain {i}" if plot_posterior: ps = [posterior_plot, tr_plot, *ac_plot] else: ps = [tr_plot, *ac_plot] diagnostics_plots.append(ps) return diagnostics_plots
beanmachine-main
src/beanmachine/tutorials/utils/plots.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Data ETL for the radon tutorial.""" from typing import Dict import arviz as az import numpy as np import pandas as pd import statsmodels.api as sm from beanmachine.tutorials.utils import etl from bokeh.layouts import layout from bokeh.models import ColumnDataSource, HoverTool, Whisker from bokeh.models.tickers import FixedTicker from bokeh.plotting import figure, gridplot from bokeh.plotting.figure import Figure from pandas import DataFrame from scipy import stats class ExtractRadonTutorialData(etl.Extract): """Extract data for the radon tutorial.""" _SCHEME = "http" _NETLOC = "raw.githubusercontent.com" _PATH = "pymc-devs/pymc-examples/main/examples/data" _RADON_FILENAME = "srrs2.dat" _COUNTY_FILENAME = "cty.dat" def __init__(self) -> None: self.radon_data_url = self._build_url(self._RADON_FILENAME) self.county_data_url = self._build_url(self._COUNTY_FILENAME) self.extracted_data = self._extract() def _build_url(self, filename: str) -> str: return self._SCHEME + "://" + self._NETLOC + "/" + self._PATH + "/" + filename def _extract(self) -> Dict[str, DataFrame]: radon_df = pd.read_csv(self.radon_data_url, skipinitialspace=True) county_df = pd.read_csv(self.county_data_url, skipinitialspace=True) return {"radon": radon_df, "county": county_df} class TransformRadonTutorialData(etl.Transform): """Transform radon data for the tutorial.""" extractor = ExtractRadonTutorialData counties = None def _tx_radon_df(self, df: DataFrame) -> DataFrame: # Select only counties in Minnesota. df = df[df["state"] == "MN"].copy() # Fix the spelling mistakes. misspellings = { "ST LOUIS": "ST. LOUIS", "SHAKOPEE-MDEWAKANTO": "SHAKOPEE MDEWAKANTON SIOUX", "MILLIE LACS": "MILLE LACS", } df["county"] = df["county"].str.strip().replace(misspellings) # Sort the data. df = df.sort_values(by="county").reset_index(drop=True) # Create an index for the counties. self.counties = sorted(df["county"].unique()) counties_dict = {county: i for i, county in enumerate(self.counties)} df["county_index"] = df["county"].map(counties_dict) # We only need a few columns for this analysis. df = df[["county_index", "county", "floor", "activity"]] # Calculate the logarithm of the activity data. df["log_activity"] = np.log(df["activity"].values + 0.1) return df def _tx_cty_df(self, df: DataFrame) -> DataFrame: # Filter for Minnesota. df = df[df["st"] == "MN"].copy() # Fix spelling errors. df["cty"] = df["cty"].replace( { "BIGSTONE": "BIG STONE", "BLUEEARTH": "BLUE EARTH", "CROWWING": "CROW WING", "LACQUIPARLE": "LAC QUI PARLE", "LAKEOFTHEWOODS": "LAKE OF THE WOODS", "LESUEUR": "LE SUEUR", "MILLELACS": "MILLE LACS", "OTTERTAIL": "OTTER TAIL", "STLOUIS": "ST. LOUIS", "YELLOWMEDICINE": "YELLOW MEDICINE", } ) # Drop counties in the `cty` data not found in the `srrs` data. df = df.drop(df[~df["cty"].isin(self.counties)].index) # Drop duplicates. df = df.drop(df[df["cty"].duplicated()].index) # We only need a few columns for this analysis. df = df[["cty", "Uppm"]].sort_values(by="cty").copy() df = df.rename(columns={"cty": "county"}) # Sort the data df = df.sort_values(by="county").reset_index(drop=True) # Calculate the logarithm of the uranium concentration. df["log_Uppm"] = np.log(df["Uppm"].values) return df def _tx_data(self, radon: DataFrame, county: DataFrame) -> DataFrame: radon_df = self._tx_radon_df(radon) county_df = self._tx_cty_df(county) df = pd.merge( left=radon_df, right=county_df, left_on="county", right_on="county", how="left", ) return df def _transform(self) -> DataFrame: """Transform the data.""" radon_df = self.extracted_data["radon"] county_df = self.extracted_data["county"] return self._tx_data(radon_df, county_df) class LoadRadonTutorialData(etl.Load): """Load the transformed radon data.""" transformer = TransformRadonTutorialData filename = "radon.csv" def _load(self) -> DataFrame: """Load transformed data.""" return self.transformed_data def load_data() -> DataFrame: """Load the radon data.""" loader = LoadRadonTutorialData() return loader.load() def log_plot_comparison(data: pd.Series, nbins: int = 40): """Compare data plot with the log(data) plot.""" # Take the log of the given data. log_data = np.log(data + 0.01) # Determine histograms for the data. histogram, bins = np.histogram(data, bins=nbins) log_histogram, log_bins = np.histogram(log_data, bins=nbins) # Estimate the densities and scale them to their histograms. kde = sm.nonparametric.KDEUnivariate(data) kde.fit() scaled_density = (kde.density / kde.density.max()) * histogram.max() log_kde = sm.nonparametric.KDEUnivariate(log_data) log_kde.fit() log_scaled_density = (log_kde.density / log_kde.density.max()) * log_histogram.max() # Create the plots. plot = figure( plot_width=400, plot_height=400, title=f"Histogram of {data.name}", y_axis_label="Counts", x_axis_label=data.name, ) log_plot = figure( plot_width=400, plot_height=400, title=f"Histogram of log({data.name})", y_axis_label="Counts", x_axis_label=f"log({data.name})", ) # Bind data to the plots. density_source = ColumnDataSource({"x": kde.support, "y": scaled_density}) density_glyph = plot.line( x="x", y="y", source=density_source, line_color="black", line_width=2.0, line_alpha=0.7, hover_line_color="brown", hover_line_width=3.0, hover_line_alpha=1.0, legend_label="Kernel density estimation", ) density_tooltips = HoverTool( renderers=[density_glyph], tooltips=[ ("Density", ""), ("Count", "@y"), (f"{data.name.title()}", "@x"), ], ) plot.add_tools(density_tooltips) histogram_source = ColumnDataSource( { "left": bins[:-1], "right": bins[1:], "top": histogram, "bottom": np.zeros(histogram.shape[0]), "activity": [ f"{item[0]:.3f} - {item[1]:.3f}" for item in zip(bins[:-1], bins[1:]) ], } ) histogram_glyph = plot.quad( left="left", right="right", top="top", bottom="bottom", source=histogram_source, fill_color="steelblue", fill_alpha=0.7, line_color="white", line_width=1.0, hover_color="orange", hover_alpha=1.0, hover_line_color="black", hover_line_width=2.0, legend_label="Histogram", ) histogram_tooltips = HoverTool( renderers=[histogram_glyph], tooltips=[ ("Histogram", ""), ("Counts", "@top"), (f"{data.name.title()}", "@activity"), ], ) plot.add_tools(histogram_tooltips) log_density_source = ColumnDataSource( {"x": log_kde.support, "y": log_scaled_density} ) log_density_glyph = log_plot.line( x="x", y="y", source=log_density_source, line_color="black", line_width=2.0, line_alpha=0.7, hover_line_color="brown", hover_line_width=3.0, hover_line_alpha=1.0, ) log_density_tooltips = HoverTool( renderers=[log_density_glyph], tooltips=[ ("Density", ""), ("Count", "@y"), (f"log({data.name})", "@x"), ], ) log_plot.add_tools(log_density_tooltips) log_histogram_source = ColumnDataSource( { "left": log_bins[:-1], "right": log_bins[1:], "top": log_histogram, "bottom": np.zeros(log_histogram.shape[0]), "activity": [ f"{item[0]:.3f} - {item[1]:.3f}" for item in zip(log_bins[:-1], log_bins[1:]) ], } ) log_histogram_glyph = log_plot.quad( left="left", right="right", top="top", bottom="bottom", source=log_histogram_source, fill_color="steelblue", fill_alpha=0.7, line_color="white", line_width=1.0, hover_color="orange", hover_alpha=1.0, hover_line_color="black", hover_line_width=2.0, ) log_histogram_tooltips = HoverTool( renderers=[log_histogram_glyph], tooltips=[ ("Histogram", ""), ("Counts", "@top"), (f"log({data.name.title()})", "@activity"), ], ) log_plot.add_tools(log_histogram_tooltips) # Style the plots. plot.outline_line_color = "black" plot.grid.grid_line_color = "grey" plot.grid.grid_line_alpha = 0.2 plot.grid.grid_line_width = 0.3 log_plot.outline_line_color = "black" log_plot.grid.grid_line_color = "grey" log_plot.grid.grid_line_alpha = 0.2 log_plot.grid.grid_line_width = 0.3 return gridplot([[plot, log_plot]]) def floor_plot(df): # Create the plot. radon_floor_plot = figure( plot_width=800, plot_height=500, title="log(radon) measurement vs floor", y_range=[-6, 6], x_range=[-0.5, 1.5], x_axis_label="Floor", y_axis_label="log(radon)", ) # Prepare data for the plot. basement_floor_data = df[df["floor"] == 0]["log_activity"].values basement_floor_kde = sm.nonparametric.KDEUnivariate(basement_floor_data) basement_floor_kde.fit() ground_floor_data = df[df["floor"] == 1]["log_activity"].values ground_floor_kde = sm.nonparametric.KDEUnivariate(ground_floor_data) ground_floor_kde.fit() radon_floor_source = ColumnDataSource( { "x": (df["floor"].values + np.random.normal(scale=0.02, size=df.shape[0])), "y": df["log_activity"].values, "county": df["county"].values, "color": df["floor"].apply( lambda floor: "orange" if floor == 1 else "steelblue" ), } ) # Bind data to the plot. radon_floor_glyph = radon_floor_plot.circle( x="x", y="y", source=radon_floor_source, size=5, fill_color="color", line_color="white", alpha=0.7, ) radon_floor_tooltips = HoverTool( renderers=[radon_floor_glyph], tooltips=[ ("County", "@county"), ("log(radon)", "@y{0.000}"), ], ) radon_floor_plot.add_tools(radon_floor_tooltips) x = [-0.25, 0.25] y = [basement_floor_kde.support[np.argmax(basement_floor_kde.density)]] * 2 radon_floor_plot.line( y=y, x=x, line_color="steelblue", line_dash="dashed", line_width=4.0, alpha=0.5, ) x = 0.25 * (basement_floor_kde.density / basement_floor_kde.density.max()) radon_floor_plot.line( y=basement_floor_kde.support, x=x, line_color="steelblue", alpha=0.7, ) radon_floor_plot.line( y=basement_floor_kde.support, x=-x, line_color="steelblue", alpha=0.7, ) x = 0.25 * (ground_floor_kde.density / ground_floor_kde.density.max()) radon_floor_plot.line( y=ground_floor_kde.support, x=1 + x, line_color="orange", alpha=0.7, ) radon_floor_plot.line( y=ground_floor_kde.support, x=1 - x, line_color="orange", alpha=0.7, ) radon_floor_plot.line( y=[ground_floor_kde.support[np.argmax(ground_floor_kde.density)]] * 2, x=[0.75, 1.25], line_color="orange", line_dash="dashed", line_width=4.0, alpha=0.5, ) # Style the plot. radon_floor_plot.xaxis.ticker = FixedTicker(ticks=[0, 1]) radon_floor_plot.xaxis.major_label_overrides = { 0: "Basement", 1: "First", } radon_floor_plot.grid.grid_line_color = None radon_floor_plot.outline_line_color = "black" radon_floor_plot.output_backend = "svg" return radon_floor_plot def sample_of_priors(): half_cauchys = [] normals = [] x = np.linspace(0, 100, 10000) X = np.linspace(-100, 100, 10000) for i in range(1, 6): half_cauchys.append(stats.halfcauchy(loc=0, scale=i).pdf(x)) normals.append(stats.norm(0, i).pdf(X)) cauchy_plot = figure( plot_width=400, plot_height=400, title="Half Cauchy priors", x_range=[1e-2, 100], x_axis_type="log", ) normal_plot = figure( plot_width=400, plot_height=400, title="Normal priors", x_range=[-10, 10], ) colors = ["steelblue", "magenta", "black", "orange", "brown"] for i, half_cauchy in enumerate(half_cauchys): cauchy_source = ColumnDataSource({"x": x, "y": half_cauchy}) cauchy_plot.line( x="x", y="y", source=cauchy_source, line_width=2, color=colors[i], legend_label=f"μ = 0; γ = {i + 1}", ) normal_source = ColumnDataSource({"x": X, "y": normals[i]}) normal_plot.line( x="x", y="y", source=normal_source, line_width=2, color=colors[i], legend_label=f"μ = 0; σ = {i + 1}", ) cauchy_plot.outline_line_color = normal_plot.outline_line_color = "black" cauchy_plot.grid.grid_line_alpha = normal_plot.grid.grid_line_alpha = 0.2 cauchy_plot.grid.grid_line_color = normal_plot.grid.grid_line_color = "grey" cauchy_plot.grid.grid_line_width = normal_plot.grid.grid_line_width = 0.2 priors_plot = gridplot([[cauchy_plot, normal_plot]]) return priors_plot def plot_trace_ranks(keys, values, samples): parameters = dict(zip(keys, values)) plots = [] colors = ["#2a2eec", "#fa7c17", "#328c06", "#c10c90"] for title, parameter in parameters.items(): data = {title: samples.get(parameter).numpy()} trace = az.plot_trace(data, show=False, kind="rank_bars").reshape(-1) for i, p in enumerate(trace): if i == 0: p.plot_width = 250 for j, renderer in enumerate(p.renderers): renderer._property_values["glyph"].line_color = colors[j] renderer._property_values["glyph"].line_dash = "solid" renderer._property_values["glyph"].line_width = 2 renderer._property_values["glyph"].line_alpha = 0.6 else: p.plot_width = 550 p.plot_height = 250 p.outline_line_color = "black" p.grid.grid_line_alpha = 0.2 p.grid.grid_line_color = "grey" p.grid.grid_line_width = 0.2 plots.append([layout([[trace[0], trace[1]]])]) return gridplot([*plots]) def sample_county_trace_ranks(sample_counties: Dict[int, str], alphas) -> Figure: colors = ["#2a2eec", "#fa7c17", "#328c06", "#c10c90"] plots = [] for index, county in sample_counties.items(): data = {f"α[{county}]": alphas[:, :, index]} trace = az.plot_trace(data, show=False, kind="rank_bars").reshape(-1) for i, p in enumerate(trace): if i == 0: p.plot_width = 300 for j, renderer in enumerate(p.renderers): renderer._property_values["glyph"].line_color = colors[j] renderer._property_values["glyph"].line_dash = "solid" renderer._property_values["glyph"].line_width = 2 renderer._property_values["glyph"].line_alpha = 0.6 else: p.plot_width = 600 p.plot_height = 300 p.outline_line_color = "black" p.grid.grid_line_alpha = 0.2 p.grid.grid_line_color = "grey" p.grid.grid_line_width = 0.2 plots.append(layout([[trace[0], trace[1]]])) return gridplot([plots]) def uranium(summary_df: DataFrame, df: DataFrame) -> Figure: """ Plot uranium linear regression. :param summary_df: The dataframe output from arviz. :param df: The original dataframe data. :returns plot: A bokeh Figure object. """ alpha_hat_df = ( summary_df[["mean", "sd"]] .loc[summary_df.index.astype(str).str.startswith("alpha_hat"), :] .reset_index(drop=True) .copy() ) alpha_hat_df["log_Uppm"] = df["log_Uppm"].values alpha_hat_df["county"] = df["county"].values alpha_hat_df = alpha_hat_df.drop_duplicates().reset_index(drop=True) alpha_hat_df["lower"] = alpha_hat_df["mean"] - alpha_hat_df["sd"] alpha_hat_df["upper"] = alpha_hat_df["mean"] + alpha_hat_df["sd"] intercepts_source = ColumnDataSource( { "x": alpha_hat_df["log_Uppm"].values, "y": alpha_hat_df["mean"].values, "lower": alpha_hat_df["lower"].values, "upper": alpha_hat_df["upper"].values, "county": alpha_hat_df["county"].values, } ) plot = figure( plot_width=800, plot_height=500, title="Partial-pooling with individual and group level predictors", x_axis_label="log(uranium)", y_axis_label="Intercept estimate (log(radon activity))", y_range=[0.5, 2.2], x_range=[-1, 0.6], ) markers = plot.circle( x="x", y="y", source=intercepts_source, size=10, fill_color="steelblue", line_color="white", fill_alpha=0.7, line_alpha=0.7, hover_fill_color="orange", hover_line_color="black", hover_fill_alpha=1.0, legend_label="County", ) tooltips = HoverTool( renderers=[markers], tooltips=[ ("County", "@county"), ("Estimated α", "@y{0.000}"), ], ) plot.add_tools(tooltips) whiskers = Whisker( base="x", upper="upper", lower="lower", source=intercepts_source, line_color="steelblue", ) whiskers.upper_head.line_color = "steelblue" whiskers.lower_head.line_color = "steelblue" plot.add_layout(whiskers) x = np.array([-1, 1]) a = summary_df.loc[ summary_df.index.astype(str).str.startswith("mu_alpha"), "mean" ].values g = summary_df.loc[ summary_df.index.astype(str).str.startswith("gamma"), "mean" ].values y = a + g * x plot.line( x=x, y=y, line_color="black", line_alpha=0.3, line_width=3, legend_label="Estimated linear regression", level="underlay", ) plot.outline_line_color = "black" plot.grid.grid_line_alpha = 0.2 plot.grid.grid_line_color = "grey" plot.grid.grid_line_width = 0.2 plot.legend.location = "top_left" plot.output_backend = "svg" return plot
beanmachine-main
src/beanmachine/tutorials/utils/radon.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Data ETL for the NBA item response tutorial.""" import numpy as np import pandas as pd from bokeh.models import ColumnDataSource # usort: skip from beanmachine.tutorials.utils import etl, plots class ExtractNBATutorialData(etl.Extract): """Extraction process for the NBA data.""" _SCHEME = "https" _NETLOC = "raw.githubusercontent.com" _PATH = ( "polygraph-cool/last-two-minute-report/" + "32f1c43dfa06c2e7652cc51ea65758007f2a1a01/output" ) _FILENAME = "all_games.csv" def __init__(self) -> None: self.data_url = self._build_url(self._FILENAME) self.extracted_data = self._extract() def _build_url(self, filename: str) -> str: return self._SCHEME + "://" + self._NETLOC + "/" + self._PATH + "/" + filename def _extract(self) -> pd.DataFrame: return pd.read_csv(self.data_url) class TransformNBATutorialData(etl.Transform): """Transform NBA data for the tutorial.""" extractor = ExtractNBATutorialData def _season_name(self, df: pd.DataFrame) -> None: # From https://en.wikipedia.org/wiki/2015%E2%80%9316_NBA_season season2015_16_start_date = pd.to_datetime( "October 27, 2015", format="%B %d, %Y", ) season2015_16_stop_date = pd.to_datetime( "April 13, 2016", format="%B %d, %Y", ) # From https://en.wikipedia.org/wiki/2016%E2%80%9317_NBA_season season2016_17_start_date = pd.to_datetime( "October 25, 2016", format="%B %d, %Y", ) season2016_17_stop_date = pd.to_datetime( "April 12, 2017", format="%B %d, %Y", ) # Create the mask and the choices. choices = ["2015-2016", "2016-2017"] conditions = [ np.logical_and( df["date"] >= season2015_16_start_date, df["date"] <= season2015_16_stop_date, ), np.logical_and( df["date"] >= season2016_17_start_date, df["date"] <= season2016_17_stop_date, ), ] # Apply the mask. df["season"] = np.select(conditions, choices, default=None) def _transform(self) -> pd.DataFrame: # Copy the data so we can manipulate it. df = self.extracted_data.copy() # Ensure the date column is a date object. df["date"] = pd.to_datetime(df["date"].values, format="%Y%m%d") # Append the season name. self._season_name(df) # Fix spelling errors. teams = { "NKY": "NYK", "COS": "BOS", "SAT": "SAS", "CHi": "CHI", "LA)": "LAC", "AT)": "ATL", "ARL": "ATL", } columns = ["away", "home", "committing_team", "disadvantaged_team"] for column in columns: df[column] = df[column].rename(teams) # Fill in NaN review_decision values with INC. df["review_decision"] = df["review_decision"].fillna("INC") # Filter the data for specific foul call_types and keep only the # descriptors (word after the :). These types of fouls generally # involve two players. See # https://austinrochford.com/posts/2018-02-04-nba-irt-2.html for more # info. fouls = [ "Foul: Personal", "Foul: Shooting", "Foul: Offensive", "Foul: Loose Ball", "Foul: Away from Play", ] df = df[df["call_type"].isin(fouls)] df["call_type"] = df["call_type"].str.split(": ", expand=True)[1].values # Filter the data on fourth quarters only. Then remove that column. df = df[df["period"] == "Q4"] df = df.drop("period", axis=1) # Only keep records that have a named season value. df = df.dropna(subset=["season"]) # Remove any NaN values that may be in the players columns. df = df.dropna(subset=["committing_player", "disadvantaged_player"]) # Create IDs for the players. committing_players = df["committing_player"].tolist() disadvantaged_players = df["disadvantaged_player"].tolist() players = sorted(set(committing_players + disadvantaged_players)) players = {player: i for i, player in enumerate(players)} df["committing_player_id"] = df["committing_player"].map(players) df["disadvantaged_player_id"] = df["disadvantaged_player"].map(players) # Create IDs for the foul type. fouls = {name: i for i, name in enumerate(sorted(df["call_type"].unique()))} df["call_type_id"] = df["call_type"].map(fouls) # Create IDs for the season. seasons = {name: i for i, name in enumerate(sorted(df["season"].unique()))} df["season_id"] = df["season"].map(seasons) # New score columns. df["score_committing"] = ( df["score_home"] .where(df["committing_team"] == df["home"], df["score_away"]) .astype(int) ) df["score_disadvantaged"] = ( df["score_home"] .where( df["disadvantaged_team"] == df["home"], df["score_away"], ) .astype(int) ) # Round the seconds left in the game. df["seconds_left"] = df["seconds_left"].round(0).astype(int) # Foul called ID. df["foul_called"] = 1 * df["review_decision"].isin(["CC", "INC"]) # Trailing flag df["trailing_committing"] = ( df["score_committing"] < df["score_disadvantaged"] ).astype(int) # Calculate the difference between the teams scores. df["score_diff"] = df["score_disadvantaged"] - df["score_committing"] # Calculate the trailing possessions needed. df["trailing_poss"] = np.ceil(df["score_diff"].values / 3).astype(int) # Possessions needed ID. df["trailing_poss_id"] = df["trailing_poss"].map( {poss: i for i, poss in enumerate(sorted(df["trailing_poss"].unique()))} ) # Remaining possessions. df["remaining_poss"] = df["seconds_left"].floordiv(25).add(1).astype(int) # Remaining possessions ID. df["remaining_poss_id"] = df["remaining_poss"].map( {poss: i for i, poss in enumerate(sorted(df["remaining_poss"].unique()))} ) # Keep only a few columns. columns = [ "seconds_left", "call_type", "call_type_id", "foul_called", "committing_player", "committing_player_id", "disadvantaged_player", "disadvantaged_player_id", "score_committing", "score_disadvantaged", "season", "season_id", "trailing_committing", "score_diff", "trailing_poss", "trailing_poss_id", "remaining_poss", "remaining_poss_id", ] df = df[columns] # Drop any duplicates. df = df.drop_duplicates().reset_index(drop=True) return df class LoadNBATutorialData(etl.Load): """Load the transformed data.""" transformer = TransformNBATutorialData filename = "nba.csv" def _load(self) -> pd.DataFrame: """Load transformed data.""" return self.transformed_data def load_data() -> pd.DataFrame: """Load the data.""" loader = LoadNBATutorialData() return loader.load() def plot_foul_types(series): tick_labels = series.index.values x = np.arange(len(tick_labels)) left = x + 0.5 y = top = series.values right = x - 0.5 bottom = [0.1] * len(tick_labels) source = ColumnDataSource( { "left": left, "top": top, "right": right, "bottom": bottom, "tick_labels": tick_labels, "x": x, "y": y, } ) tooltips = [("Foul", "@tick_labels"), ("Count", "@top{0,0}")] plot = plots.bar_plot( plot_source=source, figure_kwargs={ "title": "Foul types", "y_axis_label": "Counts", "y_axis_type": "log", }, orientation="vertical", tooltips=tooltips, ) return plot def plot_foul_frequency(series): tick_labels = series.index.values x = np.arange(len(tick_labels)) left = x + 0.5 y = top = series.values right = x - 0.5 bottom = np.zeros(len(tick_labels)) source = ColumnDataSource( { "left": left, "top": top, "right": right, "bottom": bottom, "tick_labels": tick_labels, "x": x, "y": y, } ) tooltips = [("Season", "@tick_labels"), ("Frequency", "@top{0.000}")] plot = plots.bar_plot( plot_source=source, figure_kwargs={ "title": "Foul frequency", "y_axis_label": "Frequency", }, orientation="vertical", tooltips=tooltips, ) plot.y_range.start = 0 return plot def plot_basic_model_residuals(residual_df): temp_df = residual_df.groupby("seconds_left").mean() plot_source = ColumnDataSource( { "x": temp_df.index.values, "y": temp_df["resid"].values, } ) tooltips = [ ("Residual", "@y{0.000}"), ("Seconds remaining", "@x"), ] return plots.scatter_plot( plot_sources=[plot_source], figure_kwargs={ "x_axis_label": "Seconds remaining in game", "y_axis_label": "Residual", "x_range": [125, -5], }, tooltips=[tooltips], ) def plot_call_type_means(series): tick_labels = series.index.values x = np.arange(len(tick_labels)) left = x + 0.5 y = top = series.values right = x - 0.5 bottom = np.zeros(len(tick_labels)) plot_source = ColumnDataSource( { "x": y, "y": x, "left": bottom, "top": left, "right": top, "bottom": right, "tick_labels": tick_labels, } ) tooltips = [ ("Call type", "@tick_labels"), ("Rate", "@x{0.000}"), ] return plots.bar_plot( plot_source=plot_source, orientation="horizontal", figure_kwargs={ "x_axis_label": "Observed foul call rate", "y_axis_label": "Call type", }, tooltips=tooltips, ) def plot_trailing_team_committing(df): plot_data = ( df.pivot_table("foul_called", "seconds_left", "trailing_committing") .rolling(20) .mean() .rename(columns={0: "No", 1: "Yes"}) .rename_axis( "Committing team is trailing", axis=1, ) ) x = plot_data.index.values plot_sources = [ ColumnDataSource({"x": x, "y": plot_data["No"].values}), ColumnDataSource({"x": x, "y": plot_data["Yes"].values}), ] labels = plot_data.columns.values tooltips = [ [("Rate", "@y{0.000}"), ("Time", "@x")], [("Rate", "@y{0.000}"), ("Time", "@x")], ] p = plots.line_plot( plot_sources, labels=labels, tooltips=tooltips, figure_kwargs={ "title": "Committing team is trailing", "x_axis_label": "Seconds remaining", "y_axis_label": "Observed foul rate", "x_range": [125, 15], }, ) p.legend.location = "top_left" return p def plot_trailing_possessions(df): plot_data = ( df.pivot_table("foul_called", "seconds_left", "trailing_poss") .loc[:, 1:3] .rolling(20) .mean() .rename_axis( "Trailing possessions (committing team)", axis=1, ) ) x = plot_data.index.values plot_sources = [ ColumnDataSource({"x": x, "y": plot_data[1].values}), ColumnDataSource({"x": x, "y": plot_data[2].values}), ColumnDataSource({"x": x, "y": plot_data[3].values}), ] labels = plot_data.columns.astype(str).values tooltips = [ [("Rate", "@y{0.000}"), ("Time left", "@x")], [("Rate", "@y{0.000}"), ("Time left", "@x")], [("Rate", "@y{0.000}"), ("Time left", "@x")], ] p = plots.line_plot( plot_sources, labels=labels, tooltips=tooltips, figure_kwargs={ "title": "Trailing possessions (committing team)", "x_axis_label": "Seconds remaining", "y_axis_label": "Observed foul rate", "x_range": [125, 15], }, ) p.legend.location = "top_left" return p def plot_possession_model_residuals(residual_df): temp_df = residual_df.groupby("seconds_left").mean() plot_source = ColumnDataSource( { "x": temp_df.index.values[::-1], "y": temp_df["resid"].values, } ) tooltips = [ ("Residual", "@y{0.000}"), ("Seconds remaining", "@x"), ] return plots.scatter_plot( plot_sources=[plot_source], figure_kwargs={ "x_axis_label": "Seconds remaining in game", "y_axis_label": "Residual", }, tooltips=[tooltips], ) def plot_irt_residuals(residual_df): temp_df = residual_df.groupby("seconds_left").mean() plot_source = ColumnDataSource( { "x": temp_df.index.values[::-1], "y": temp_df["resid"].values, } ) tooltips = [ ("Residual", "@y{0.000}"), ("Seconds remaining", "@x"), ] return plots.scatter_plot( plot_sources=[plot_source], figure_kwargs={ "x_axis_label": "Seconds remaining in game", "y_axis_label": "Residual", }, tooltips=[tooltips], )
beanmachine-main
src/beanmachine/tutorials/utils/nba.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Module for the ``Mixture model using count data`` tutorial.""" from numbers import Number from typing import Any, Dict, List, Union import arviz as az import numpy as np import pandas as pd import torch import torch.distributions as dist from beanmachine.ppl import RVIdentifier from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples from bokeh.models import Band, ColumnDataSource, HoverTool from bokeh.plotting import figure from bokeh.plotting.figure import Figure from torch import Tensor COLORS = ["#2a2eec", "#fa7c17", "#328c06", "#c10c90"] def plot_value_counts(df: pd.DataFrame) -> List[List[Figure]]: """ Plot the pre-drug and post-drug value counts as a bar plot. :param df: Pandas dataframe object of the model data. :type df: pd.DataFrame :return: A list of figures to display in the notebook. :rtype: List[List[Figure]] """ predrug_data = df["predrug"].value_counts().sort_index() predrug_index = predrug_data.index.values postdrug_data = df["postdrug"].value_counts().sort_index() postdrug_index = postdrug_data.index.values PADDING = 2 x = np.arange(0, max(predrug_data.index.max(), postdrug_data.index.max()) + PADDING) top_predrug = np.zeros(len(x)) top_predrug[predrug_index] = predrug_data top_postdrug = np.zeros(len(x)) top_postdrug[postdrug_index] = postdrug_data OFFSET = 0.5 left = x - OFFSET right = x + OFFSET bottom = np.zeros(len(x)) figs = [] for i, column in enumerate(["predrug", "postdrug"]): # Create the figure. p = figure( plot_width=700, plot_height=300, y_axis_label="Counts", x_axis_label="PVC events", title=f'PVC events "{column}"', y_range=[0, 8], x_range=[-2, 52], outline_line_color="black", ) # Prepare data for the figure. source_data = { "x": x, "left": left, "top": None, "right": right, "bottom": bottom, } if i == 0: source_data["top"] = top_predrug else: source_data["top"] = top_postdrug source = ColumnDataSource(source_data) # Add data to the figure. glyph = p.quad( left="left", top="top", right="right", bottom="bottom", source=source, fill_color="steelblue", line_color="white", fill_alpha=0.7, hover_fill_color="orange", hover_line_color="black", hover_alpha=1, ) # Add tooltips to the figure. tips = HoverTool( renderers=[glyph], tooltips=[("Count", "@top"), ("PVC events", "@x")], ) p.add_tools(tips) # Style the figure p.grid.grid_line_alpha = 0.2 p.grid.grid_line_color = "gray" p.grid.grid_line_width = 0.3 p.yaxis.minor_tick_line_color = None figs.append(p) return [[figs[0]], [figs[1]]] class SimulatedPredictiveChecks: """Simulated predictive checks base class.""" def __init__( self, data: pd.DataFrame, samples_with_observations: Union[None, MonteCarloSamples] = None, samples_without_observations: Union[None, MonteCarloSamples] = None, *args, **kwargs, ) -> None: """ Plot prior/posterior predictive checks. :param data: Pandas dataframe object of the model data. :type data: pd.DataFrame :param samples_with_observations: Bean Machine inference object. :type samples_with_observations: MonteCarloSamples :param samples_without_observations: Bean Machine inference object. :type samples_without_observations: MonteCarloSamples """ self.data = data self.n_records = self.data.shape[0] if samples_with_observations is None: if samples_without_observations is None: msg = ( 'Either one of "samples_with_observations" or ' '"samples_without_observations" must be supplied. ' ) raise TypeError(msg) if samples_with_observations is not None: self.samples_with_observations_xr = samples_with_observations.to_xarray() coords = self.samples_with_observations_xr.coords self.n_chains_with_observations = coords.get("chain").values.shape[0] self.n_samples_with_observations = coords.get("draw").values.shape[0] self.queries_with_observations = self.samples_with_observations_xr.data_vars else: self.samples_with_observations_xr = None self.n_chains_with_observations = None self.n_samples_with_observations = None self.queries_with_observations = None if samples_without_observations is not None: self.samples_without_observations_xr = ( samples_without_observations.to_xarray() ) coords = self.samples_without_observations_xr.coords self.n_chains_without_observations = coords.get("chain").values.shape[0] self.n_samples_without_observations = coords.get("draw").values.shape[0] self.queries_without_observations = ( self.samples_without_observations_xr.data_vars ) else: self.samples_without_observations_xr = None self.n_chains_without_observations = None self.n_samples_without_observations = None self.queries_without_observations = None def _simulate_data(self, *args, **kwargs) -> Any: msg = "To be implemented by the inheriting class." raise NotImplementedError(msg) def simulate_data(self, *args, **kwargs) -> Any: """Simulate data for the predictive plots.""" return self._simulate_data(*args, **kwargs) def _generate_plot_data( self, simulated_data: List[Tensor], ) -> Dict[str, List[Number]]: xs = [] ys = [] lower_hdis = [] upper_hdis = [] mins = [] maxs = [] histograms = [] bins = [] for i in range(self.n_records): n_bins = int(simulated_data[i].max()) + 1 hist, bins_ = np.histogram(simulated_data[i], bins=n_bins) histograms.append(hist.tolist()) bins.append(bins_.tolist()) simulation_mean = simulated_data[i].mean().item() xs.append(simulation_mean) ys.append(i + 1) lower_hdi, upper_hdi = az.hdi(simulated_data[i].numpy(), hdi_prob=0.89) lower_hdis.append(int(lower_hdi)) upper_hdis.append(int(upper_hdi)) mins.append(int(simulated_data[i].min().item())) maxs.append(int(simulated_data[i].max().item())) return { "x": xs, "y": ys, "lower_hdi": lower_hdis, "upper_hdi": upper_hdis, "minimum": mins, "maximum": maxs, "histogram": histograms, "bins": bins, } def _model(self, *args, **kwargs) -> Any: msg = "To be implemented by the inheriting class." raise NotImplementedError(msg) def model(self, *args, **kwargs) -> Any: """Model definition for data simulation.""" return self._model(*args, **kwargs) def _plot_prior_predictive_checks(self) -> Figure: msg = "To be implemented by the inheriting class." raise NotImplementedError(msg) def plot_prior_predictive_checks(self) -> Figure: """Plot the prior predictive checks.""" return self._plot_prior_predictive_checks() def _plot_posterior_predictive_checks(self) -> Figure: msg = "To be implemented by the inheriting class." raise NotImplementedError(msg) def plot_posterior_predictive_checks(self) -> Figure: """Plot the posterior predictive checks.""" return self._plot_posterior_predictive_checks() class PlotMixin: """Mixin for plotting.""" MODEL_NAME = None def plot_predictive_checks( self, simulated_data: Dict[str, List[Number]], title: str, ) -> Figure: """ Plot of the predictive check. :param simulated_data: Object containing the simulated data to plot. :type simulated_data: Dict[str, List[Number]] :param title: String to differentiate between prior or predictive plots. :type title: str :returns: Bokeh plot of the predictive check. :rtype: Figure """ p = figure( title=f"{title} distributions vs. observations", outline_line_color="black", y_axis_label="Patient ID", x_axis_label="PVC events", toolbar_location=None, x_range=[-2, 52], y_range=[0.5, 12.99], ) for i in range(self.n_records): # Prepare data. SCALE = 0.8 lower_hdis = np.array(simulated_data["lower_hdi"]) upper_hdis = np.array(simulated_data["upper_hdi"]) bins = np.array(simulated_data["bins"][i]) histogram = np.array(simulated_data["histogram"][i]) histogram = SCALE * (histogram / histogram.max()) bin_pairs = list(zip(bins[:-1], bins[1:])) step_x = [bin_pairs[0][0]] step_y = [i + 1] for j, bin_pair in enumerate(bin_pairs): step_x.append(bin_pair[0]) step_y.append(histogram[j] + i + 1) step_x.append(bin_pair[1]) step_y.append(histogram[j] + i + 1) step_x.append(bin_pairs[-1][1]) step_y.append(i + 1) # Histogram hist_source = ColumnDataSource({"x": step_x, "y": step_y}) p.step( # Top portion of the histogram. x="x", y="y", source=hist_source, line_color="steelblue", line_width=1, line_alpha=0.7, ) p.line( # Lower portion of the histogram. x=[step_x[0], step_x[-1]], y=[step_y[0], step_y[-1]], line_color="steelblue", line_width=0.5, line_alpha=0.7, ) band_source = ColumnDataSource( # Fill for the histogram. { "base": step_x, "lower": np.linspace(step_y[0], step_y[-1], num=len(step_y)), "upper": step_y, } ) band = Band( base="base", lower="lower", upper="upper", source=band_source, fill_color="steelblue", fill_alpha=0.1, level="underlay", ) p.add_layout(band) p.varea( # Legend label for the histogram. x="base", y1="lower", y2="upper", source=band_source, fill_color="steelblue", fill_alpha=0.1, legend_label="Simulated distribution", ) # HDI p.line( x=[lower_hdis[i], upper_hdis[i]], y=[simulated_data["y"][i] - 0.025] * 2, line_color="steelblue", line_width=2, legend_label="Simulated 89% HDI", ) # Simulated mean source = ColumnDataSource( { "x": [simulated_data["x"][i]], "y": [simulated_data["y"][i]], "hdi": [ f"{simulated_data['lower_hdi'][i]}–" f"{simulated_data['upper_hdi'][i]}" ], "minmax": [ f"{simulated_data['minimum'][i]}–" f"{simulated_data['maximum'][i]}" ], } ) locals()[f"mean_{i}"] = p.circle( x="x", y="y", source=source, size=7, fill_color="white", line_color="steelblue", hover_fill_color="orange", hover_line_color="black", level="overlay", legend_label="Simulated mean", ) locals()[f"mean_tips_{i}"] = HoverTool( renderers=[locals()[f"mean_{i}"]], tooltips=[ ("Simulated mean", "@x{0.00}"), ("Simulated 89% HDI", "@hdi"), ("Simulated min/max", "@minmax"), ], ) p.add_tools(locals()[f"mean_tips_{i}"]) # Observed data source = ColumnDataSource( { "x": [self.observed[i]], "y": [simulated_data["y"][i]], } ) locals()[f"true_{i}"] = p.square( x="x", y="y", size=10, source=source, fill_color="magenta", line_color="white", hover_fill_color="green", legend_label="Postdrug PVC counts", ) locals()[f"true_tips_{i}"] = HoverTool( renderers=[locals()[f"true_{i}"]], tooltips=[ ("Patient ID", "@y"), ("Postdrug PVC counts", "@x"), ], ) p.add_tools(locals()[f"true_tips_{i}"]) # Total data source = ColumnDataSource( { "x": [self.t[i]], "y": [simulated_data["y"][i]], } ) locals()[f"true_{i}"] = p.diamond( x="x", y="y", size=15, source=source, fill_color="brown", line_color="white", hover_fill_color="black", legend_label="Predrug + Postdrug PVC counts", ) locals()[f"true_tips_{i}"] = HoverTool( renderers=[locals()[f"true_{i}"]], tooltips=[ ("Patient ID", "@y"), ("Predrug + Postdrug PVC counts", "@x"), ], ) p.add_tools(locals()[f"true_tips_{i}"]) p.grid.grid_line_alpha = 0.2 p.grid.grid_line_color = "gray" p.grid.grid_line_width = 0.3 p.yaxis.minor_tick_line_color = None p.yaxis[0].ticker.desired_num_ticks = len(self.observed) p.legend.location = "bottom_right" return p def prior_predictive_plot(self) -> Figure: """Prior predictive plot mixin manager.""" simulated_data = self._simulate_data(using="prior") plot_data = self._generate_plot_data(simulated_data) return self.plot_predictive_checks(plot_data, f"{self.MODEL_NAME} prior") def posterior_predictive_plot(self) -> Figure: """Posterior predictive plot mixin manager.""" simulated_data = self._simulate_data(using="posterior") plot_data = self._generate_plot_data(simulated_data) return self.plot_predictive_checks(plot_data, f"{self.MODEL_NAME} posterior") class Model1PredictiveChecks(SimulatedPredictiveChecks, PlotMixin): """Model 1 predictive checks.""" MODEL_NAME = "Model 1" def __init__(self, p_query: RVIdentifier, *args, **kwargs) -> None: """ Model 1 predictive check initialization. :param p_query: Bean Machine query object. :type p_query: RVIdentifier :returns: None """ super().__init__(*args, **kwargs) self.t = self.data["total"].astype(int).values self.observed = self.data["postdrug"].astype(int).values if self.samples_with_observations_xr is not None: data_vars = self.samples_with_observations_xr.data_vars p_with_observations = data_vars.get(p_query()) p_with_observations = p_with_observations.values.flatten() self.p_with_observations = torch.tensor(p_with_observations) if self.samples_without_observations_xr is not None: data_vars = self.samples_without_observations_xr.data_vars p_without_observations = data_vars.get(p_query()) p_without_observations = p_without_observations.values.flatten() self.p_without_observations = torch.tensor(p_without_observations) def _model(self, i: int, p: Tensor) -> dist.Binomial: return dist.Binomial(torch.tensor(self.t[i]), p) def _simulate_data(self, using: str = "prior", N: int = 1) -> List[Tensor]: p = torch.zeros((0,)) if using == "prior": p = self.p_without_observations elif using == "posterior": p = self.p_with_observations simulated_data = [] for i in range(self.n_records): simulation = self._model(i, p).sample((N,)).flatten() simulated_data.append(simulation) return simulated_data def _plot_prior_predictive_checks(self) -> Figure: return self.prior_predictive_plot() def _plot_posterior_predictive_checks(self) -> Figure: return self.posterior_predictive_plot() class Model2PredictiveChecks(SimulatedPredictiveChecks, PlotMixin): """Model 2 predictive checks.""" MODEL_NAME = "Model 2" def __init__( self, p_query: RVIdentifier, s_query_str: str, *args, **kwargs ) -> None: """ Model 2 predictive checks initialization. :param p_query: Bean Machine query object. :type p_query: RVIdentifier :param s_query_str: Start of the name of the Bean Machine query object. :type s_query_str: str :returns: None """ super().__init__(*args, **kwargs) self.t = self.data["total"].astype(int).values self.observed = self.data["postdrug"].astype(int).values if self.samples_with_observations_xr is not None: data_vars = self.samples_with_observations_xr.data_vars p_with_observations = data_vars.get(p_query()) p_with_observations = p_with_observations.values.flatten() self.p_with_observations = torch.tensor(p_with_observations) s_with_observations = { key: data_vars.get(key) for key, _ in self.samples_with_observations_xr.items() if s_query_str in str(key) } s_with_observations = dict( sorted(s_with_observations.items(), key=lambda item: item[0].arguments) ) self.s_with_observations = list(s_with_observations.values()) if self.samples_without_observations_xr is not None: data_vars = self.samples_without_observations_xr.data_vars p_without_observations = data_vars.get(p_query()) p_without_observations = p_without_observations.values.flatten() self.p_without_observations = torch.tensor(p_without_observations) s_without_observations = { key: data_vars.get(key) for key, _ in self.samples_without_observations_xr.items() if s_query_str in str(key) } s_without_observations = dict( sorted( s_without_observations.items(), key=lambda item: item[0].arguments ) ) self.s_without_observations = list(s_without_observations.values()) def _model(self, i: int, p: Tensor, s: Tensor) -> dist.Binomial: return dist.Binomial(torch.tensor(self.t[i]), p * s) def _simulate_data(self, using: str = "prior", N: int = 1) -> List[Tensor]: p = torch.zeros((0,)) s = torch.zeros((self.n_records, 0)) if using == "prior": p = self.p_without_observations s = self.s_without_observations elif using == "posterior": p = self.p_with_observations s = self.s_with_observations simulated_data = [] for i in range(self.n_records): simulation = self._model(i, p, s[i].values.flatten()) simulation = simulation.sample((N,)).flatten() simulated_data.append(simulation) return simulated_data def _plot_prior_predictive_checks(self) -> Figure: return self.prior_predictive_plot() def _plot_posterior_predictive_checks(self) -> Figure: return self.posterior_predictive_plot() class Model3PredictiveChecks(SimulatedPredictiveChecks, PlotMixin): """Model 3 predictive checks.""" MODEL_NAME = "Model 3" def __init__( self, p_query: RVIdentifier, s_query_str: str, *args, **kwargs, ) -> None: """ Model 3 predictive checks initialization. :param p_query: Bean Machine query object. :type p_query: RVIdentifier :param s_query_str: Start of the name of the Bean Machine query object. :type s_query_str: str """ super().__init__(*args, **kwargs) self.t = self.data["total"].astype(int).values self.observed = self.data["postdrug"].astype(int).values if self.samples_with_observations_xr is not None: data_vars = self.samples_with_observations_xr.data_vars p_with_observations = data_vars.get(p_query()) p_with_observations = p_with_observations.values.flatten() self.p_with_observations = torch.tensor(p_with_observations) s_with_observations = { key: torch.tensor(data_vars.get(key).values).reshape( self.n_chains_with_observations * self.n_samples_with_observations, 2, ) for key, _ in self.samples_with_observations_xr.items() if s_query_str in str(key) } s_with_observations = dict( sorted(s_with_observations.items(), key=lambda item: item[0].arguments) ) self.s_with_observations = list(s_with_observations.values()) if self.samples_without_observations_xr is not None: data_vars = self.samples_without_observations_xr.data_vars p_without_observations = data_vars.get(p_query()) p_without_observations = p_without_observations.values.flatten() self.p_without_observations = torch.tensor(p_without_observations) s_without_observations = { key: torch.tensor(data_vars.get(key).values).reshape( self.n_chains_without_observations * self.n_samples_without_observations, 2, ) for key, _ in self.samples_without_observations_xr.items() if s_query_str in str(key) } s_without_observations = dict( sorted( s_without_observations.items(), key=lambda item: item[0].arguments ) ) self.s_without_observations = list(s_without_observations.values()) def _model(self, i: int, p: Tensor, s: List[Tensor]) -> dist.Binomial: # Handle the case when both weights are zero. switch = s[i].clone() switch[(switch.sum(dim=1) == 0.0).nonzero()] = torch.tensor( [1.0, 1.0], dtype=torch.float64, ) s = dist.Categorical(probs=switch).sample((1,)).flatten() return dist.Binomial(torch.tensor(self.t[i]), p * s) def _simulate_data(self, using: str = "prior", N: int = 1) -> List[Tensor]: p = torch.zeros((0,)) s = [torch.zeros((self.n_records, 0))] if using == "prior": p = self.p_without_observations s = self.s_without_observations elif using == "posterior": p = self.p_with_observations s = self.s_with_observations simulated_data = [] for i in range(self.n_records): simulation = self._model(i, p, s) simulation = simulation.sample((N,)).flatten() simulated_data.append(simulation) return simulated_data def _plot_prior_predictive_checks(self) -> Figure: return self.prior_predictive_plot() def _plot_posterior_predictive_checks(self) -> Figure: return self.posterior_predictive_plot() def plot_diagnostics( samples: MonteCarloSamples, ordering: Union[None, List[str]] = None, ) -> List[Figure]: """ Plot model diagnostics. :param samples: Bean Machine inference object. :type samples: MonteCarloSamples :param ordering: Define an ordering for how the plots are displayed. :type ordering: List[str] :return: Bokeh figures with visual diagnostics. :rtype: List[Figure] """ # Prepare the data for the figure. samples_xr = samples.to_xarray() data = {str(key): value.values for key, value in samples_xr.data_vars.items()} if ordering is not None: diagnostics_data = {} for key in ordering: key = str(key) diagnostics_data[key] = data[key] else: diagnostics_data = data # Cycle through each query and create the diagnostics plots using arviz. diagnostics_plots = [] for key, value in diagnostics_data.items(): ac_plot = az.plot_autocorr({key: value}, show=False)[0].tolist() tr_plot = az.plot_trace( {key: value}, kind="rank_bars", show=False, )[0].tolist() posterior_plot = az.plot_posterior({key: value}, show=False)[0][0] posterior_plot.plot_width = 300 posterior_plot.plot_height = 300 posterior_plot.grid.grid_line_alpha = 0.2 posterior_plot.grid.grid_line_color = "gray" posterior_plot.grid.grid_line_width = 0.3 posterior_plot.yaxis.minor_tick_line_color = None posterior_plot.outline_line_color = "black" for i, p in enumerate(tr_plot): # Style the plots from arviz. if i == 0: p.plot_width = 300 for j, renderer in enumerate(p.renderers): renderer._property_values["glyph"].line_color = COLORS[j] renderer._property_values["glyph"].line_dash = "solid" renderer._property_values["glyph"].line_width = 2 renderer._property_values["glyph"].line_alpha = 0.6 else: p.plot_width = 600 p.plot_height = 300 p.grid.grid_line_alpha = 0.2 p.grid.grid_line_color = "gray" p.grid.grid_line_width = 0.3 p.yaxis.minor_tick_line_color = None p.outline_line_color = "black" for p in ac_plot: p.plot_width = 300 p.plot_height = 300 p.grid.grid_line_alpha = 0.2 p.grid.grid_line_color = "gray" p.grid.grid_line_width = 0.3 p.yaxis.minor_tick_line_color = None p.outline_line_color = "black" ps = [posterior_plot, tr_plot[0], tr_plot[1], *ac_plot] diagnostics_plots.append(ps) return diagnostics_plots
beanmachine-main
src/beanmachine/tutorials/utils/hearts.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Helper module for the hierarchical model tutorial.""" from typing import Dict, List import arviz as az import numpy as np import pandas as pd from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.tutorials.utils import plots from bokeh.models import Arrow, Band, HoverTool, VeeHead, Whisker from bokeh.models.sources import ColumnDataSource from bokeh.plotting import figure from bokeh.plotting.figure import Figure from scipy.stats import beta, pareto, uniform COLORS = ["#2a2eec", "#fa7c17", "#328c06", "#c10c90"] def plot_current_hits(df: pd.DataFrame) -> Figure: """ Plot ``current hits`` data. :param df: Dataframe of the model data. :type df: pd.DataFrame :return: Bokeh figure of the current hits data. :rtype: Figure """ # Prepare data for the figure. y = df["Current hits"].values[::-1] x = np.linspace(0, max(y) + 1, len(y)) names = df["Name"].values[::-1] # Create the figure data source. source = ColumnDataSource({"x": x, "y": y, "name": names}) # Add labels to the figure. figure_kwargs = { "title": "Current hits", "y_axis_label": "Hits", "x_axis_label": "Player", } # Add tooltips to the figure. tips = [("Name", "@name"), ("Hits", "@y")] # Create the figure. p = plots.scatter_plot( plot_sources=[source], figure_kwargs=figure_kwargs, tooltips=[tips], ) # Style the figure. plots.style(p) p.xaxis.major_tick_line_color = None p.xaxis.minor_tick_line_color = None p.xaxis.major_label_text_font_size = "0pt" return p def plot_complete_pooling_priors() -> Figure: """ Plot a family of priors for the complete-pooling model. :return: Bokeh figure of the priors. :rtype: Figure """ # Support for the priors. N_ = int(1e4) x = np.linspace(0, 1, N_) # Prior PDFs. beta_samples = beta(1, 1).pdf(x) uniform_samples = uniform(0, 1).pdf(x) # Create the figure data sources. beta_source = ColumnDataSource({"x": x, "y": beta_samples}) uniform_source = ColumnDataSource({"x": x, "y": uniform_samples}) # Create the figure. plot = figure( plot_width=400, plot_height=400, title="Beta(1, 1) vs Uniform", x_axis_label="Support", x_range=[0, 1], y_range=[0.8, 1.2], ) # Create glyphs on the figure. plot.line( x="x", y="y", source=beta_source, line_color="steelblue", line_alpha=0.6, line_width=6, legend_label="Beta(1, 1)", ) plot.line( x="x", y="y", source=uniform_source, line_color="orange", line_alpha=1, line_width=2, legend_label="Uniform distribution", ) # Style the figure. plots.style(plot) plot.yaxis.major_tick_line_color = None plot.yaxis.minor_tick_line_color = None return plot def plot_complete_pooling_diagnostics(samples: MonteCarloSamples) -> List[Figure]: """ Plot the complete-pooling diagnostics. :param samples: Bean Machine inference object. :type samples: MonteCarloSamples :return: Bokeh figure of some visual diagnostics. :rtype: List[Figure] """ # Prepare the data for the figure. diagnostics_data = { key.__dict__["wrapper"].__name__: value.values for key, value in samples.to_xarray().data_vars.items() } # Cycle through each query and create the diagnostics plots using arviz. diagnostics_plots = [] for key, value in diagnostics_data.items(): ac_plot = az.plot_autocorr({key: value}, show=False)[0].tolist() tr_plot = az.plot_trace( {key: value}, kind="rank_bars", show=False, )[0].tolist() for i, p in enumerate(tr_plot): # Style the plots from arviz. if i == 0: p.plot_width = 300 for j, renderer in enumerate(p.renderers): renderer._property_values["glyph"].line_color = COLORS[j] renderer._property_values["glyph"].line_dash = "solid" renderer._property_values["glyph"].line_width = 2 renderer._property_values["glyph"].line_alpha = 0.6 else: p.plot_width = 600 p.plot_height = 300 plots.style(p) for p in ac_plot: p.plot_width = 300 p.plot_height = 300 plots.style(p) diagnostics_plots = [tr_plot[0], tr_plot[1], *ac_plot] return diagnostics_plots def plot_complete_pooling_model( df: pd.DataFrame, samples: MonteCarloSamples, query: RVIdentifier, ) -> Figure: """ Complete-pooling model plot. :param df: Dataframe of model data. :type df: pd.DataFrame :param samples: Bean Machine inference object. :type samples: MonteCarloSamples :param query: Bean Machine query object. :type query: RVIdentifier :return: Bokeh figure of the model. :rtype: Figure """ # Calculate the HDIs for the complete-pooling model. data = {"φ": samples.get(query).numpy()} hdi_df = az.hdi(data, hdi_prob=0.89).to_dataframe() hdi_df = hdi_df.T.rename(columns={"lower": "hdi_11%", "higher": "hdi_89%"}) # Calculate the summary statistics for the complete-pooling model. summary_df = az.summary(data, round_to=6).join(hdi_df) # Calculate empirical values. population_mean = (df["Current hits"] / df["Current at-bats"]).mean() population_std = (df["Current hits"] / df["Current at-bats"]).std() x = (df["Current hits"] / df["Current at-bats"]).values posterior_upper_hdi = np.array(summary_df["hdi_89%"].tolist() * df.shape[0]) posterior_lower_hdi = np.array(summary_df["hdi_11%"].tolist() * df.shape[0]) # Create the figure data source. source = ColumnDataSource( { "x": x, "y": summary_df["mean"].tolist() * df.shape[0], "upper_hdi": posterior_upper_hdi, "lower_hdi": posterior_lower_hdi, "lower_std": [population_mean - population_std] * df.shape[0], "upper_std": [population_mean + population_std] * df.shape[0], "name": df["Name"].values, } ) # Create the figure. plot = figure( plot_width=500, plot_height=500, title="Complete-pooling", x_axis_label="Observed hits / at-bats", y_axis_label="Predicted chance of a hit", y_range=[0.05, 0.55], x_range=[0.14, 0.41], ) # Create the mean chance for at-bat hits line. plot.line( x=[0, 1], y=[population_mean, population_mean], line_color="orange", line_width=3, level="underlay", legend_label="Population mean", ) # Create a band that contains the standard deviation of the mean chance for # at-bat hits. std_band = Band( base="x", lower="lower_std", upper="upper_std", source=source, level="underlay", fill_alpha=0.2, fill_color="orange", line_width=0.2, line_color="orange", ) plot.add_layout(std_band) # Create the HDI interval whiskers for each player. whiskers = Whisker( base="x", upper="upper_hdi", lower="lower_hdi", source=source, line_color="steelblue", ) whiskers.upper_head.line_color = "steelblue" whiskers.lower_head.line_color = "steelblue" plot.add_layout(whiskers) # Create the player's at-bat hit chance for the complete-pooling model. glyph = plot.circle( x="x", y="y", source=source, size=10, line_color="white", fill_color="steelblue", legend_label="Players", ) tooltips = HoverTool( renderers=[glyph], tooltips=[ ("Name", "@name"), ("Posterior Upper HDI", "@upper_hdi{0.000}"), ("Posterior Mean", "@y{0.000}"), ("Posterior Lower HDI", "@lower_hdi{0.000}"), ], ) plot.add_tools(tooltips) # Add a legend to the figure. plot.legend.location = "top_left" plot.legend.click_policy = "mute" # Style the figure. plots.style(plot) return plot def plot_no_pooling_diagnostics( samples: MonteCarloSamples, query: RVIdentifier, df: pd.DataFrame, ) -> List[List[Figure]]: """ Plot the no-pooling model diagnostics. :param samples: Bean Machine inference object. :type samples: MonteCarloSamples :param query: Bean Machine query object. :type query: RVIdentifier :param df: Dataframe of model data. :type df: pd.DataFrame :return: A nested list of Bokeh figures. :rtype: List[List[Figure]] """ # Prepare data for the figures. names = df["Name"].values keys = [f"θ[{name}]" for name in names] values = np.dsplit(samples.get(query).numpy(), 18) values = [value.reshape(samples.num_chains, -1) for value in values] diagnostics_data = dict(zip(keys, values)) # Cycle through each query and create the diagnostics plots using arviz. diag_plots = [] for key, value in diagnostics_data.items(): ac_plot = az.plot_autocorr({key: value}, show=False)[0].tolist() tr_plot = az.plot_trace( {key: value}, kind="rank_bars", show=False, )[0].tolist() ess = az.plot_ess({key: value}, kind="evolution", show=False)[0][0] post = az.plot_posterior({key: value}, show=False)[0][0] # Style the plots from arviz. for i, p in enumerate(tr_plot): if i == 0: p.plot_width = 300 for j, renderer in enumerate(p.renderers): renderer._property_values["glyph"].line_color = COLORS[j] renderer._property_values["glyph"].line_dash = "solid" renderer._property_values["glyph"].line_width = 2 renderer._property_values["glyph"].line_alpha = 0.6 p.x_range.start = 0 p.x_range.end = 1 else: p.plot_width = 600 p.plot_height = 300 plots.style(p) for p in ac_plot: p.plot_width = 300 p.plot_height = 300 plots.style(p) ess.plot_width = 300 ess.plot_height = 300 plots.style(ess) post.plot_width = 300 post.plot_height = 300 plots.style(post) post.x_range.start = 0 post.x_range.end = 1 diag_plots.append([post, ess, tr_plot[0], tr_plot[1], *ac_plot]) return diag_plots def plot_pareto_prior() -> Figure: """ Plot a family of Pareto distributions. :return: Bokeh figure with prior distributions. :rtype: Figure """ plot = figure( plot_width=400, plot_height=400, title="Pareto distribution", x_axis_label="Support", x_range=[1, 3], ) colors = ["steelblue", "orange", "brown", "magenta"] x = np.linspace(1, 3, 1000) for i, alpha in enumerate(np.linspace(start=1.5, stop=3, num=4)): pareto_samples = pareto.pdf(x, alpha) pareto_source = ColumnDataSource({"x": x, "y": pareto_samples}) plot.line( x="x", y="y", source=pareto_source, line_color=colors[i], line_alpha=0.7, line_width=2, legend_label=f"α = {alpha}", ) plots.style(plot) plot.yaxis.major_tick_line_color = None plot.yaxis.minor_tick_line_color = None plot.yaxis.major_label_text_font_size = "0pt" return plot def plot_no_pooling_model( samples: MonteCarloSamples, query: RVIdentifier, df: pd.DataFrame, ) -> Figure: """ Plot the no-pooling model. :param samples: Bean Machine inference object. :type samples: MonteCarloSamples :param query: Bean Machine query object. :type query: RVIdentifier :param df: Pandas dataframe with model data. :type df: pd.DataFrame :return: Bokeh figure of the model. :rtype: Figure """ names = df["Name"].values keys = [f"θ[{name}]" for name in names] values = np.dsplit(samples.get(query).numpy(), 18) values = [value.reshape(samples.num_chains, -1) for value in values] data = dict(zip(keys, values)) hdi_df = az.hdi(data, hdi_prob=0.89).to_dataframe() hdi_df = hdi_df.T.rename(columns={"lower": "hdi_11%", "higher": "hdi_89%"}) summary_df = az.summary(data, round_to=4).join(hdi_df) x = (df["Current hits"] / df["Current at-bats"]).values posterior_upper_hdi = summary_df["hdi_89%"] posterior_lower_hdi = summary_df["hdi_11%"] population_mean = (df["Current hits"] / df["Current at-bats"]).mean() # Create the source of data for the figure. source = ColumnDataSource( { "x": x, "y": summary_df["mean"].values, "upper_hdi": posterior_upper_hdi, "lower_hdi": posterior_lower_hdi, "name": df["Name"].values, } ) # Create the figure. plot = figure( plot_width=500, plot_height=500, title="No-pooling", x_axis_label="Observed hits / at-bats", y_axis_label="Predicted chance of a hit", x_range=[0.14, 0.41], y_range=[0.05, 0.55], ) # Add the mean at-bat hit chance to the figure. plot.line( x=[0, 1], y=[population_mean, population_mean], line_color="orange", line_width=3, level="underlay", legend_label="Population mean", ) # Add the standard deviation of the current mean at-bat hit chance to the # figure. std_band = Band( base="x", lower="lower_std", upper="upper_std", source=source, level="underlay", fill_alpha=0.2, fill_color="orange", line_width=0.2, line_color="orange", ) plot.add_layout(std_band) # Add the empirical current at-bat hits to the figure. plot.line( x=x, y=(df["Current hits"] / df["Current at-bats"]).values, line_color="grey", line_alpha=0.7, line_width=2.0, legend_label="Current hits / Current at-bats", ) # Add HDI whiskers to each player in the figure. whiskers = Whisker( base="x", upper="upper_hdi", lower="lower_hdi", source=source, line_color="steelblue", ) whiskers.upper_head.line_color = "steelblue" whiskers.lower_head.line_color = "steelblue" plot.add_layout(whiskers) # Add the modeled player at-bat hit chance to the figure. glyph = plot.circle( x="x", y="y", source=source, size=10, line_color="white", fill_color="steelblue", legend_label="Players", ) tooltips = HoverTool( renderers=[glyph], tooltips=[ ("Name", "@name"), ("Posterior Upper HDI", "@upper_hdi{0.000}"), ("Posterior Mean", "@y{0.000}"), ("Posterior Lower HDI", "@lower_hdi{0.000}"), ], ) plot.add_tools(tooltips) # Add a legend to the figure. plot.legend.location = "top_left" plot.legend.click_policy = "mute" # Style the figure. plots.style(plot) return plot def _sample_data_prep( samples: MonteCarloSamples, df: pd.DataFrame, ) -> Dict[str, np.ndarray]: """ Prepare sample data for plotting. :param samples: Bean Machine inference object. :type samples: MonteCarloSamples :param df: Dataframe of the model data. :type df: pd.DataFrame :return: Dictionary of data for plotting. :rtype: Dict[str, np.ndarray] """ keys = [] values = [] samples_xr = samples.to_xarray() n_chains = samples_xr.coords.get("chain").values.shape[0] n_samples = samples_xr.coords.get("draw").values.shape[0] data_vars = samples_xr.data_vars for key in data_vars.keys(): name = key.__dict__["wrapper"].__name__ if "theta" in name: v = np.dsplit(samples.get(key).numpy(), df.shape[0]) v = [value_.reshape(n_chains, n_samples) for value_ in v] values.extend(v) k = [f"θ[{player_name}]" for player_name in df["Name"].values] keys.extend(k) if "kappa" in name: keys.append("κ") v = samples.get(key).numpy() values.append(v) if "phi" in name: keys.append("φ") v = samples.get(key).numpy() values.append(v) return dict(zip(keys, values)) def plot_partial_pooling_diagnostics( samples: MonteCarloSamples, df: pd.DataFrame, ) -> List[List[Figure]]: """ Plot the partial-pooling model diagnostics. :param samples: Bean Machine inference object. :type samples: MonteCarloSamples :param df: Dataframe of model data. :type df: pd.DataFrame :return: A nested list of Bokeh figures. :rtype: List[List[Figure]] """ # Prepare data for the figures. diagnostics_data = _sample_data_prep(samples, df) # Cycle through each query and create the diagnostics plots using arviz. diagnostic_plots = [] for key, value in diagnostics_data.items(): ac_plot = az.plot_autocorr({key: value}, show=False)[0].tolist() tr_plot = az.plot_trace( {key: value}, kind="rank_bars", show=False, )[0].tolist() ess = az.plot_ess({key: value}, kind="evolution", show=False)[0][0] post = az.plot_posterior({key: value}, show=False)[0][0] # Style the plots from arviz. for i, p in enumerate(tr_plot): if i == 0: p.plot_width = 300 for j, renderer in enumerate(p.renderers): renderer._property_values["glyph"].line_color = COLORS[j] renderer._property_values["glyph"].line_dash = "solid" renderer._property_values["glyph"].line_width = 2 renderer._property_values["glyph"].line_alpha = 0.6 else: p.plot_width = 600 p.plot_height = 300 plots.style(p) for p in ac_plot: p.plot_width = 300 p.plot_height = 300 plots.style(p) ess.plot_width = 300 ess.plot_height = 300 plots.style(ess) post.plot_width = 300 post.plot_height = 300 plots.style(post) diagnostic_plots.append([post, ess, tr_plot[0], tr_plot[1], *ac_plot]) return diagnostic_plots def plot_partial_pooling_model(samples: MonteCarloSamples, df: pd.DataFrame) -> Figure: """ Partial-pooling model plot. :param samples: Bean Machine inference object. :type samples: MonteCarloSamples :param df: Dataframe of the model data. :type df: pd.DataFrame :return: Bokeh figure of the partial-pooling model. :rtype: Figure """ # Prepare data for the figure. diagnostics_data = _sample_data_prep(samples, df) hdi_df = az.hdi(diagnostics_data, hdi_prob=0.89).to_dataframe() hdi_df = hdi_df.T.rename(columns={"lower": "hdi_11%", "higher": "hdi_89%"}) summary_df = az.summary(diagnostics_data, round_to=4).join(hdi_df) theta_index = summary_df[ summary_df.index.astype(str).str.contains("θ") ].index.values x = (df["Current hits"] / df["Current at-bats"]).values y = summary_df.loc[theta_index, "mean"] upper_hdi = summary_df.loc[theta_index, "hdi_89%"] lower_hdi = summary_df.loc[theta_index, "hdi_11%"] population_mean = (df["Current hits"] / df["Current at-bats"]).mean() # Create the figure data source. source = ColumnDataSource( { "x": x, "y": y, "upper_hdi": upper_hdi, "lower_hdi": lower_hdi, "name": df["Name"].values, } ) # Create the figure. plot = figure( plot_width=500, plot_height=500, title="Partial pooling", x_axis_label="Observed hits / at-bats", y_axis_label="Predicted chance of a hit", x_range=[0.14, 0.41], y_range=[0.05, 0.55], ) # Add the empirical mean at-bat hit chance to the figure. plot.line( x=[0, 1], y=[population_mean, population_mean], line_color="orange", line_width=3, level="underlay", legend_label="Population mean", ) # Add the standard deviation of the mean at-bat hit chance to the figure. std_band = Band( base="x", lower="lower_std", upper="upper_std", source=source, level="underlay", fill_alpha=0.2, fill_color="orange", line_width=0.2, line_color="orange", ) plot.add_layout(std_band) # Add the empirical at-bat hit chance to the figure. plot.line( x=x, y=(df["Current hits"] / df["Current at-bats"]).values, line_color="grey", line_alpha=0.7, line_width=2.0, legend_label="Current hits / Current at-bats", ) # Add the HDI whiskers to the figure. whiskers = Whisker( base="x", upper="upper_hdi", lower="lower_hdi", source=source, line_color="steelblue", ) whiskers.upper_head.line_color = "steelblue" whiskers.lower_head.line_color = "steelblue" plot.add_layout(whiskers) # Add the partial-pooling model data to the figure. glyph = plot.circle( x="x", y="y", source=source, size=10, line_color="white", fill_color="steelblue", legend_label="Players", ) tooltips = HoverTool( renderers=[glyph], tooltips=[ ("Name", "@name"), ("Posterior Upper HDI", "@upper_hdi{0.000}"), ("Posterior Mode", "@mode{0.000}"), ("Posterior Lower HDI", "@lower_hdi{0.000}"), ], ) plot.add_tools(tooltips) # Add a legend to the figure. plot.legend.location = "top_left" plot.legend.click_policy = "mute" # Style the figure. plots.style(plot) return plot def plot_shrinkage( no_pooling_samples: MonteCarloSamples, partial_pooling_samples: MonteCarloSamples, df: pd.DataFrame, ) -> Figure: """ Plot shrinkage due to partial-pooling model. :param no_pooling_samples: Bean Machine inference object for no-pooling. :type no_pooling_samples: MonteCarloSamples :param partial_pooling_samples: BM inference object for partial-pooling. :type partial_pooling_samples: MonteCarloSamples :param df: Dataframe with model data in it. :type df: pd.DataFrame :return: Bokeh plot showing shrinkage. :rtype: Figure """ # Prepare data for the figure. population_mean = (df["Current hits"] / df["Current at-bats"]).mean() population_std = (df["Current hits"] / df["Current at-bats"]).std() lower_std = [population_mean - population_std] * df.shape[0] upper_std = [population_mean + population_std] * df.shape[0] x = (df["Current hits"] / df["Current at-bats"]).values names = df["Name"].values pp_data = _sample_data_prep(partial_pooling_samples, df) pp_summary_df = az.summary(pp_data, round_to=4) pp_theta_index = pp_summary_df.index.astype(str).str.contains("θ") pp_y = pp_summary_df.loc[pp_theta_index, "mean"].values pp_source = ColumnDataSource({"x": x, "y": pp_y, "name": names}) no_pooling_data = _sample_data_prep(no_pooling_samples, df) np_summary_df = az.summary(no_pooling_data, round_to=4) np_theta_index = np_summary_df.index.astype(str).str.contains("θ") np_y = np_summary_df.loc[np_theta_index, "mean"].values np_source = ColumnDataSource({"x": x, "y": np_y, "name": names}) # Create the figure. plot = figure( plot_width=500, plot_height=500, title="Partial pooling shift", x_axis_label="Observed hits / at-bats", y_axis_label="Predicted chance of a hit", x_range=[0.14, 0.41], y_range=[0.05, 0.55], ) # Create the mean chance for at-bat hits line. plot.line( x=[0, 1], y=[population_mean, population_mean], line_color="orange", line_width=3, level="underlay", legend_label="Population mean", ) # Create a band that contains the standard deviation of the mean chance for # at-bat hits. source = ColumnDataSource( { "x": x, "lower_std": lower_std, "upper_std": upper_std, } ) std_band = Band( base="x", lower="lower_std", upper="upper_std", source=source, level="underlay", fill_alpha=0.2, fill_color="orange", line_width=0.2, line_color="orange", ) plot.add_layout(std_band) # Add the empirical current chances to the figure. plot.line( x=x, y=x, line_color="grey", line_alpha=0.7, line_width=2.0, legend_label="Current hits / Current at-bats", ) # Add the partial-pooling model chances. plot.circle( x="x", y="y", source=pp_source, size=10, line_color="white", fill_color="steelblue", legend_label="Partial-pooling", ) # Add the no-pooling model chances. plot.circle( x="x", y="y", source=np_source, size=10, line_color="steelblue", fill_color="white", legend_label="No-pooling", ) # Add arrows to show the shrinkage. for i in range(len(x)): plot.add_layout( Arrow( end=VeeHead(size=10), x_start=np_source.data["x"][i], y_start=np_source.data["y"][i], x_end=pp_source.data["x"][i], y_end=pp_source.data["y"][i], ) ) # Add a legend to the figure. plot.legend.location = "top_left" plot.legend.click_policy = "mute" # Style the figure. plots.style(plot) return plot
beanmachine-main
src/beanmachine/tutorials/utils/baseball.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ import matplotlib.pyplot as plt from datasets import transformations import torch import numpy as np def plot_x2_reconstructions( pairs, model, indices, train_set, save_name, ): """ Plots sample x2 reconstructions based on indices Args: pairs (datasets.Pairs): contains x1, x2, and params. model (function): callable f(x1) = x1_reconstruction indices (list of ints): indices for samples to plot train_set (bool): if true title is plotted with train otherwise test. save_name (str): indicates path where images should be saved. """ title = "Training Reconstruction" if train_set else "Test Reconstruction" fig, axs = plt.subplots(len(indices), 3, figsize=(6, 12)) fig.suptitle(title, fontsize=16) for i, sample_idx in enumerate(indices): x1, x2, params = pairs[sample_idx] n_pixels = x1.shape[1] try: # for weakly supervised autoencoder x2_reconstruction = model(x1.unsqueeze(0), x2.unsqueeze(0), params) except TypeError: # for real autoencoder x2_reconstruction = model(x1.unsqueeze(0), params) axs[i][0].imshow(x1.squeeze()) axs[i][0].set_title("x1") axs[i][1].imshow(x2.squeeze()) axs[i][1].set_title("x2") axs[i][2].imshow( x2_reconstruction.cpu().detach().numpy().reshape(n_pixels, n_pixels) ) axs[i][2].set_title("x2 from tranformed z1") if save_name: plt.savefig(f"{save_name}.png", dpi=300, bbox_inches="tight") plt.close() else: plt.show() def plot_x1_reconstructions(pairs, model, indices, train_set, save_name): """ Plots sample x2 reconstructions based on indices Args: pairs (datasets.Pairs): contains x1, x2, and params. model (function): callable f(x1) = x1_reconstruction indices (list of ints): indices for samples to plot train_set (bool): if true title is plotted with train otherwise test. save_name (str): indicates path where images should be saved. """ title = "Training Reconstructions" if train_set else "Test Reconstructions" fig, axs = plt.subplots(len(indices), 2, figsize=(5, 12)) fig.suptitle(title, fontsize=16) for i, sample_idx in enumerate(indices): x1, x2, params = pairs[sample_idx] n_pixels = x1.shape[1] x1_reconstruction = model(x1.unsqueeze(0)).cpu().detach().numpy() axs[i][0].imshow(x1.squeeze()) axs[i][0].set_title("x1") axs[i][1].imshow(x1_reconstruction.reshape(n_pixels, n_pixels)) axs[i][1].set_title("x1 reconstruction") if save_name: plt.savefig(f"{save_name}.png", dpi=300, bbox_inches="tight") plt.close() else: plt.show() def plot_rotations( X, model, n_transformations, title, save_name=None, param_name="angle", use_latent_op=True, ): """Plots all rotated reconstructions for given samples""" font_size = 18 degree_sign = "\N{DEGREE SIGN}" n_samples = X.shape[0] fig, axs = plt.subplots(n_samples, n_transformations + 2, figsize=(16, 12)) fig.suptitle(title, fontsize=16) for sample_i, x1 in enumerate(X): axs[sample_i, 0].imshow(x1.squeeze()) axs[sample_i, 0].set_title("original", fontsize=font_size) axs[sample_i, 0].set_xticks([]) axs[sample_i, 0].set_yticks([]) transformation_params = get_all_transformations(param_name, n_transformations) for i, param in enumerate(transformation_params): if use_latent_op: x2_reconstruction = model.reconstruct_x2(x1.unsqueeze(1), param) else: x2_reconstruction = model.reconstruct_transformed_x1( x1.unsqueeze(1), param ) axs[sample_i, i + 1].imshow(x2_reconstruction.squeeze()) if param_name == "angle": axs[sample_i, i + 1].set_title( f"{param.angle:0.0f}{degree_sign}", fontsize=font_size ) axs[sample_i, i + 1].set_xticks([]) axs[sample_i, i + 1].set_yticks([]) if save_name: plt.savefig(save_name, bbox_inches="tight", dpi=300) plt.close() else: plt.show() def plot_transformations_complex( X, model, title, save_name=None, param_name="angle", supervised=False, ): """Plots all rotated reconstructions for given samples""" font_size = 18 degree_sign = "\N{DEGREE SIGN}" n_samples = X.shape[0] transformation_params = transformations.get_transform_params(model.data.n_rotations, model.data.n_x_translations, model.data.n_y_translations, (1.0, )) n_transformations = len([i for i in transformation_params]) fig, axs = plt.subplots(n_samples, n_transformations + 1, figsize=(16, int(12/5.*len(X)))) for sample_i, x1 in enumerate(X): axs[sample_i, 0].imshow(x1.squeeze()) axs[sample_i, 0].set_title("original", fontsize=font_size) axs[sample_i, 0].set_xticks([]) axs[sample_i, 0].set_yticks([]) x1 = x1.to(model.device) z1 = model.encoder(x1) transformation_params = transformations.get_transform_params(model.data.n_rotations, model.data.n_x_translations, model.data.n_y_translations, (1.0, )) for i, param in enumerate(transformation_params): shifts = torch.LongTensor([[i]]) if supervised: z_transformed = model.transform(z1, [shifts]) else: z_transformed = model.transform(z1, torch.LongTensor([[i]])) x2_reconstruction = model.decoder(z_transformed).detach().cpu().numpy() axs[sample_i, i + 1].imshow(x2_reconstruction.squeeze()) if param_name == "angle": axs[sample_i, i + 1].set_title( f"{param.angle:0.0f}{degree_sign}", fontsize=font_size ) elif param_name == "tx": axs[sample_i, i + 1].set_title(f"{param.shift_x:0.0f}", fontsize=font_size) elif param_name == 'ty': axs[sample_i, i + 1].set_title(f"{param.shift_y:0.0f}", fontsize=font_size) else: axs[sample_i, i + 1].set_title(f"{param.shift_x:0.0f},{param.shift_y:0.0f}", fontsize=font_size) axs[sample_i, i + 1].set_xticks([]) axs[sample_i, i + 1].set_yticks([]) if save_name: plt.savefig(save_name, bbox_inches="tight", dpi=300) plt.close() else: plt.show() def get_all_transformations(param_name, n_transformations): if param_name == "angle": return transformations.get_transform_params(n_transformations, 0, 0, (1.0,)) elif param_name == "shift_x": return transformations.get_transform_params(0, n_transformations, 0, (1.0,)) elif param_name == "shift_y": return transformations.get_transform_params(0, 0, n_transformations, (1.0,)) def plot_rotations_translations(X, model, n_transformations, n_rot, n_x, n_y, save_name=None): degree_sign = "\N{DEGREE SIGN}" n_samples = X.shape[0] fig, axs = plt.subplots(n_samples, n_transformations + 2, figsize=(16, int(12/5.*len(X)))) for sample_i, x1 in enumerate(X): axs[sample_i, 0].imshow(x1.squeeze()) axs[sample_i, 0].set_title("original", fontsize=16) axs[sample_i, 0].set_xticks([]) axs[sample_i, 0].set_yticks([]) x1 = x1.to(model.device) transformation_params = [t for t in transformations.get_transform_params(n_rot, n_x, n_y, (1.0, ))] z = model.encoder(x1) angle = None shift_x = None shift_y = None t_list = [] i = 0 for _, t in enumerate(range(n_transformations+1)): j = np.random.randint(len(transformation_params)) param = transformation_params[j] if not t in t_list: shifts = model.return_shifts([param]) z_transformed = model.transform(z, shifts) x2_reconstruction = model.decoder(z_transformed).detach().cpu().numpy() axs[sample_i, i + 1].imshow(x2_reconstruction.squeeze()) axs[sample_i, i + 1].set_title(f"{param.angle:0.0f}{degree_sign}\n{param.shift_x:0.0f},{param.shift_y:0.0f}", fontsize=16) axs[sample_i, i + 1].set_xticks([]) axs[sample_i, i + 1].set_yticks([]) angle = param.angle shift_x = param.shift_x shift_y = param.shift_y i += 1 if i+1 >= n_transformations + 2: break if save_name: plt.savefig(save_name, bbox_inches="tight", dpi=300) plt.close() else: plt.show()
Addressing-the-Topological-Defects-of-Disentanglement-main
plot.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ """ Launches experiments locally or on the cluster python run_experiments.py [name] --cluster OPTIONS: python run_experiments.py linear-mnist-test --data mnist python run_experiments.py cci-autoencoder-shapes --architecture CCI """ import argparse import autoencoder import cci_variational_autoencoder import os import itertools from datasets import datasets from functools import partial import torch import shutil import submitit BASE_PARAMS = { "seed": [0, 10, 20, 30, 40], "n_epochs": [30], "learning_rate": [0.001, 0.0005], } device = "cuda" if torch.cuda.is_available() else "cpu" print(f"running on {device}") def run_cci_vae_shapes( beta=1000.0, c_max=36.0, z_dim=30, batch_size=16, n_epochs=10, learning_rate=0.0005, seed=0, folder=None, n_classes=300, architecture=None, n_rotations=9, n_x_translations=0, n_y_translations=0, distribution="gaussian", ): """Runs CCI VAE and variants on Simple Shapes. Note architecture kwarg is not used""" if folder is None: raise ValueError("Please provide an experiment folder") print("saving results to ", folder) shapes = datasets.SimpleShapes( batch_size, n_rotations=n_rotations, n_x_translations=n_x_translations, n_y_translations=n_y_translations, n_classes=n_classes, seed=seed, pairs=False, ) train_cci_vae_variants( shapes, beta, c_max, z_dim, n_epochs, learning_rate, distribution, seed, folder ) def run_cci_vae_mnist( beta=1000.0, c_max=36.0, z_dim=30, batch_size=16, n_epochs=10, learning_rate=0.0005, seed=0, folder=None, n_classes=300, proportion=0.01, architecture=None, n_rotations=9, n_x_translations=0, n_y_translations=0, distribution="gaussian", ): """Runs CCI VAE and variants on MNIST. Note architecture kwarg is not used""" if folder is None: raise ValueError("Please provide an experiment folder") print("saving results to ", folder) mnist = datasets.ProjectiveMNIST( batch_size, seed=seed, train_set_proportion=proportion, test_set_proportion=1.0, valid_set_proportion=proportion, n_rotations=n_rotations, n_x_translations=n_x_translations, n_y_translations=n_y_translations, pairs=False, ) train_cci_vae_variants( mnist, beta, c_max, z_dim, n_epochs, learning_rate, distribution, seed, folder ) def run_cci_vae_single_digit_mnist( beta=1000.0, c_max=36.0, z_dim=30, batch_size=16, n_epochs=10, learning_rate=0.0005, seed=0, folder=None, n_classes=300, proportion=0.01, architecture=None, n_rotations=9, n_x_translations=0, n_y_translations=0, distribution="gaussian", ): """Runs CCI VAE and variants on MNIST. Note architecture kwarg is not used""" if folder is None: raise ValueError("Please provide an experiment folder") print("saving results to ", folder) mnist = datasets.ProjectiveSingleDigitMNIST( batch_size, seed=seed, train_set_proportion=proportion, test_set_proportion=1.0, valid_set_proportion=proportion, n_rotations=n_rotations, n_x_translations=n_x_translations, n_y_translations=n_y_translations, pairs=False, ) train_cci_vae_variants( mnist, beta, c_max, z_dim, n_epochs, learning_rate, distribution, seed, folder ) def train_cci_vae_variants( data, beta, c_max, z_dim, n_epochs, learning_rate, distribution, seed, folder ): """Trains CCI, Beta, and standard VAE""" print("Training CCI VAE") cci_vae_folder = os.path.join(folder, "cci_vae") train_cci_vae( data, beta, c_max, z_dim, n_epochs, learning_rate, distribution, seed, cci_vae_folder, ) print("Training Beta VAE") beta_vae_folder = os.path.join(folder, "beta_vae") train_cci_vae( data, beta, 0.0, z_dim, n_epochs, learning_rate, distribution, seed, beta_vae_folder, ) print("Training VAE") vae_folder = os.path.join(folder, "vae") train_cci_vae( data, 1.0, 0.0, z_dim, n_epochs, learning_rate, distribution, seed, vae_folder ) def run_autoencoder_shapes( z_dim=1000, batch_size=16, n_epochs=30, learning_rate=0.0005, seed=0, folder=None, architecture="Linear", n_classes=300, n_rotations=9, n_x_translations=0, n_y_translations=0, distribution=None, use_latent_op=True, ): if folder is None: raise ValueError("Please provide an experiment folder") print("saving results to ", folder) shapes = datasets.SimpleShapes( batch_size, n_classes=n_classes, seed=seed, n_rotations=n_rotations, n_x_translations=n_x_translations, n_y_translations=n_y_translations, ) if use_latent_op: train_autoencoder( shapes, z_dim, n_epochs, learning_rate, seed, folder, architecture ) else: train_standard_autoencoder( shapes, z_dim, n_epochs, learning_rate, seed, folder, architecture ) def run_autoencoder_mnist( z_dim=1000, batch_size=16, n_epochs=2, learning_rate=0.0005, seed=0, folder=None, architecture="Linear", proportion=0.01, n_rotations=9, n_x_translations=0, n_y_translations=0, distribution=None, use_latent_op=True, ): if folder is None: raise ValueError("Please provide an experiment folder") print("saving results to ", folder) mnist = datasets.ProjectiveMNIST( batch_size, seed=seed, train_set_proportion=proportion, test_set_proportion=1.0, valid_set_proportion=proportion, n_rotations=n_rotations, n_x_translations=n_x_translations, n_y_translations=n_y_translations, ) if use_latent_op: print("using latent_op") train_autoencoder( mnist, z_dim, n_epochs, learning_rate, seed, folder, architecture ) else: train_standard_autoencoder( mnist, z_dim, n_epochs, learning_rate, seed, folder, architecture ) def train_standard_autoencoder( data, z_dim, n_epochs, learning_rate, seed, folder, architecture ): model = autoencoder.AutoEncoder( data, z_dim=z_dim, n_epochs=n_epochs, learning_rate=learning_rate, encoder_type=architecture, decoder_type=architecture, device=device, seed=seed, ) model.run() model.save_best_validation(os.path.join(folder, "standard-autoencoder")) def train_autoencoder(data, z_dim, n_epochs, learning_rate, seed, folder, architecture): model_disentangled_rotation = autoencoder.AutoEncoder( data, z_dim=z_dim, n_epochs=n_epochs, learning_rate=learning_rate, latent_operator_name="DisentangledRotation", encoder_type=architecture, decoder_type=architecture, device=device, seed=seed, ) model_disentangled_rotation.run() model_disentangled_rotation.save_best_validation( os.path.join(folder, "disentangled-operator") ) model_shift_operator = autoencoder.AutoEncoder( data, z_dim=z_dim, n_epochs=n_epochs, learning_rate=learning_rate, latent_operator_name="ShiftOperator", encoder_type=architecture, decoder_type=architecture, device=device, seed=seed, ) model_shift_operator.run() model_shift_operator.save_best_validation(os.path.join(folder, "shift-operator")) def train_cci_vae( data, beta, c_max, z_dim, n_epochs, learning_rate, distribution, seed, folder ): cci_vae = cci_variational_autoencoder.CCIVariationalAutoEncoder( data, beta=beta, c_max=c_max, z_dim=z_dim, seed=seed, learning_rate=learning_rate, n_epochs=n_epochs, distribution=distribution, ) cci_vae.train() cci_vae.save_best_validation(folder) def launch_single_job(experiment, base_dir, results_dir, **kwargs): log_folder = base_dir + "%j" executor = submitit.AutoExecutor(folder=log_folder) # executor.update_parameters(timeout_min=600, gpus_per_node=1) executor.update_parameters( timeout_min=240, gpus_per_node=1, ) job = executor.submit(experiment, folder=results_dir, **kwargs) print("job id", job.job_id) print(f"logging to: {base_dir + job.job_id}") print(f"results stored at: {results_dir}") result = job.result() print(f"job result: {result}") def launch_sweep(experiment, params, base_dir, experiment_dir): log_folder = base_dir + "%j" executor = submitit.AutoExecutor(folder=log_folder) # executor.update_parameters(timeout_min=600, gpus_per_node=1) executor.update_parameters( timeout_min=600, gpus_per_node=1, ) jobs = [] with executor.batch(): for i, param in enumerate(params): print("running with param ", param) param["folder"] = os.path.join(experiment_dir, f"{i}") job = executor.submit(experiment, **param) jobs.append(job) print(f"launched {len(params)} jobs") print("sweep id", jobs[0].job_id) print(f"logging to: {base_dir}{jobs[0].job_id}") results = [job.result() for job in jobs] print(f"job results: {results}") def get_params(args): params = BASE_PARAMS if args.data == "mnist": params["batch_size"] = [8, 16, 32, 64] elif args.data == "shapes": params["batch_size"] = [4, 8, 16, 32] if args.model == "cci_vae": params["n_epochs"] = [10, 20, 50] params["beta"] = [4.0, 10.0, 100.0, 1000.0] params["z_dim"] = [10, 30] return params def get_param_combinations(params): """Returns a list of dictionaries with all combinations""" keys, values = zip(*params.items()) params_combinations = [dict(zip(keys, v)) for v in itertools.product(*values)] return params_combinations def get_directories(args, cluster=False): user = os.environ["USER"] if cluster: RESULTS_DIR = f"/checkpoint/{user}/Equivariance/" base_dir = f"/checkpoint/{user}/jobs/{args.name}/" else: RESULTS_DIR = os.path.expanduser( "~/Dropbox/FAIR/Projects/Equivariance/experiments/results" ) base_dir = os.path.expanduser( "~/Dropbox/FAIR/Projects/Equivariance/experiments/jobs/{args.name}/" ) experiment_dir = os.path.join(RESULTS_DIR, args.name) # clean experimental directory if os.path.exists(experiment_dir): shutil.rmtree(experiment_dir) return base_dir, experiment_dir def get_experiment_function(args): experiments = { "run_autoencoder_shapes": run_autoencoder_shapes, "run_autoencoder_mnist": run_autoencoder_mnist, "run_cci_vae_shapes": run_cci_vae_shapes, "run_cci_vae_mnist": run_cci_vae_mnist, "run_cci_vae_single_digit_mnist": run_cci_vae_mnist, } experiment = experiments[f"run_{args.model}_{args.data}"] print(f"run_{args.model}_{args.data}") if args.data == "shapes": experiment = partial(experiment, n_classes=args.n_classes) elif args.data in {"mnist", "single_digit_mnist"}: experiment = partial(experiment, proportion=args.mnist_proportion) else: raise ValueError(f"dataset {args.data} not supported") # standard autoencoder if "autoencoder" == args.model and args.no_latent_op: experiment = partial(experiment, use_latent_op=False) n_rotations, n_x_translations, n_y_translations = get_n_transformations(args) experiment = partial( experiment, n_rotations=n_rotations, n_x_translations=n_x_translations, n_y_translations=n_y_translations, architecture=args.architecture, z_dim=args.z_dim, distribution=args.distribution, ) return experiment def get_n_transformations(args): n_rotations, n_x_translations, n_y_translations = 0, 0, 0 n_transformations = 9 if args.transformation == "rotation": n_rotations = n_transformations if args.transformation == "shift_x": n_x_translations = n_transformations if args.transformation == "shift_y": n_y_translations = n_transformations return n_rotations, n_x_translations, n_y_translations def init_argparse() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( usage="python run_experiments --cluster", description="runs experiments with specified parameters", ) parser.add_argument("name", help="name of experiment") parser.add_argument( "--model", help="model for experiments. Example: autoencoder, cci_vae", default="autoencoder", ) parser.add_argument( "--architecture", help="name of autoencoder architecture", default="Linear", ) parser.add_argument( "--data", help="dataset used for training: mnist, single_digit_mnist", default="shapes", ) parser.add_argument( "--mnist_proportion", help="proportion of mnist to use", default=0.01, type=float, ) parser.add_argument( "--n_classes", help="number of classes to use for simple shapes", default=300, type=int, ) parser.add_argument( "--z_dim", help="dataset used for training", default=1000, type=int ) parser.add_argument( "--transformation", choices=["rotation", "shift_x", "shift_y"], type=str.lower, default="rotation", ) parser.add_argument( "--distribution", help="likelihood distribution used for computing loss in CCI VAE", choices=["gaussian", "bernoulli"], type=str.lower, default="gaussian", ) parser.add_argument("--beta", help="beta used for CCI VAE", default=1000, type=int) parser.add_argument( "--no_latent_op", help="use standard autoencoder without latent operators", action="store_true", ) parser.add_argument("--cluster", action="store_true") parser.add_argument("--sweep", action="store_true") return parser if __name__ == "__main__": parser = init_argparse() args = parser.parse_args() experiment = get_experiment_function(args) base_dir, experiment_dir = get_directories(args, cluster=args.cluster) if args.cluster and args.sweep: params = get_params(args) params_combinations = get_param_combinations(params) launch_sweep(experiment, params_combinations, base_dir, experiment_dir) elif args.cluster: launch_single_job( experiment, base_dir, experiment_dir, ) else: print("running single local job") experiment(folder=experiment_dir)
Addressing-the-Topological-Defects-of-Disentanglement-main
run_experiments_real.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ import torch from torch import nn from collections import OrderedDict from abc import ABC class ResNetExplorer(nn.Module): """ Loads a pre-trained model and hook on one of its layer """ def __init__(self, path_to_model="pytorch/vision:v0.6.0", model="resnet152"): super().__init__() self.pretrained_model = torch.hub.load(path_to_model, model, pretrained=True) def create_full_model(self, layer_to_explore, layer_to_explore_size, image_size): all_layers = dict(list(self.pretrained_model.named_children())) all_keys = list( all_layers.keys() ) # TODO: I am not sure the order is preserved ? max_index = all_keys.index(layer_to_explore) ##### ENCODER # take all layers up to the one we want to explore for the encoder encoder_layers = [ (all_keys[i], all_layers[layer]) for i, layer in enumerate(all_layers) if i <= max_index ] layers = OrderedDict() for layer in encoder_layers: name = layer[0] layers[name] = layer[1] # create a new model with it (saves time during feed-forward if we take other layers than the last one) self.fixed_encoder = nn.Sequential(layers) ##### Linear layer to learn the mapping self.linear = nn.Linear(layer_to_explore_size, layer_to_explore_size) ##### DECODER self.decoder = nn.Linear(layer_to_explore_size, image_size) def forward(self, x): z = self.fixed_encoder(x) # feed flattened z to linear z_prime = self.linear(z.view(x.size(0), -1)) x_dec = self.decoder(z_prime) # sigmoid to have something between 0 and 1 x_dec = torch.sigmoid(x_dec) # map to image shape return x_dec.view(x.size()) class LinearEncoder(nn.Module): def __init__(self, n_pixels, n_channels, z_dim): super().__init__() self.fc1 = nn.Linear(n_pixels ** 2 * n_channels, z_dim, bias=False) def forward(self, x): out = x.flatten(start_dim=1) out = self.fc1(out) return out class LinearDecoder(nn.Module): def __init__(self, n_pixels, n_channels, z_dim): super().__init__() self.n_pixels = n_pixels self.n_channels = n_channels self.fc1 = nn.Linear(z_dim, n_pixels ** 2 * n_channels, bias=False) def forward(self, x): out = self.fc1(x) out = out.reshape(-1, self.n_channels, self.n_pixels, self.n_pixels) return out class ComplexLinearEncoder(nn.Module): def __init__(self, n_pixels, n_channels, z_dim): super().__init__() self.fc1r = torch.nn.Linear(n_pixels ** 2 * n_channels, z_dim, bias=False) self.fc1i = torch.nn.Linear(n_pixels ** 2 * n_channels, z_dim, bias=False) def forward(self, x): out = x.flatten(start_dim=1) outr = self.fc1r(out) outi = self.fc1i(out) return (outr, outi) class ComplexLinearDecoder(nn.Module): def __init__(self, n_pixels, n_channels, z_dim): super().__init__() self.n_pixels = n_pixels self.n_channels = n_channels self.fc1r = nn.Linear(z_dim, n_pixels ** 2 * n_channels, bias=False) self.fc1i = nn.Linear(z_dim, n_pixels ** 2 * n_channels, bias=False) def forward(self, x): r1 = self.fc1r(x[0]) r2 = -self.fc1i(x[1]) out_r = r1 + r2 out_r = out_r.reshape(-1, self.n_channels, self.n_pixels, self.n_pixels) return out_r class CCIEncoder(nn.Module): def __init__(self, n_pixels, n_channels, z_dim): super().__init__() self.encoder = nn.Sequential( nn.Conv2d(n_channels, n_pixels, kernel_size=4, stride=2), nn.ReLU(), nn.Conv2d(n_pixels, n_pixels, kernel_size=4, stride=2), nn.ReLU(), nn.Conv2d(n_pixels, n_pixels, kernel_size=4, stride=2), nn.ReLU(), nn.Conv2d(n_pixels, 256, kernel_size=1, stride=1), Lambda(lambda x: x.view(x.size(0), -1)), nn.Linear(256, z_dim), ) def forward(self, x): out = self.encoder(x) return out class CCIDecoder(nn.Module): def __init__(self, n_pixels, n_channels, z_dim): super().__init__() self.decoder = nn.Sequential( nn.Linear(z_dim, 256), nn.ReLU(), Lambda(lambda x: x.view(-1, 256, 1, 1)), nn.ConvTranspose2d(256, 64, 4), nn.ReLU(), nn.ConvTranspose2d(64, 64, 4, 2, 1), nn.ReLU(), nn.ConvTranspose2d(64, n_pixels, 4, 2, 1), nn.ReLU(), nn.ConvTranspose2d(n_pixels, n_channels, 4, 2, 1), Lambda(lambda x: x.view(x.size(0), -1)), nn.Linear(32 * 32, n_pixels * n_pixels), Lambda(lambda x: x.view(x.size(0), 1, n_pixels, n_pixels)), ) def forward(self, x): out = self.decoder(x) return out class NonLinearEncoder(nn.Module): def __init__(self, n_pixels, n_chanels, z_dim): super().__init__() self.fc1 = nn.Linear(n_pixels ** 2, n_pixels // 2) self.batch_norm = nn.BatchNorm1d(n_pixels // 2) self.fc2 = nn.Linear(n_pixels // 2, z_dim) def forward(self, x): out = x.flatten(start_dim=1) out = self.fc1(out) out = self.batch_norm(out) out = torch.relu(out) out = self.fc2(out) out = torch.relu(out) return out class NonLinearDecoder(nn.Module): def __init__(self, n_pixels, n_channels, z_dim): super().__init__() self.n_channels = n_channels self.n_pixels = n_pixels self.fc1 = nn.Linear(z_dim, (n_pixels ** 2) // 2) self.batch_norm = nn.BatchNorm1d((n_pixels ** 2) // 2) self.fc2 = nn.Linear((n_pixels ** 2) // 2, n_pixels ** 2) def forward(self, x): out = self.fc1(x) out = self.batch_norm(out) out = torch.relu(out) # reshape out = self.fc2(out) out = torch.relu(out) out = out.reshape(-1, self.n_channels, self.n_pixels, self.n_pixels) return out class VAEBase(ABC): @staticmethod def reparameterize(mu, log_var): """Returns z_sample from mu, var""" std = torch.exp(log_var / 2) # z_sample = torch.normal(mu, std) # eps = Variable(torch.randn_like(std)) eps = torch.randn_like(std) z_sample = mu + eps.mul(std) return z_sample @staticmethod def latent_sample(mu, log_var, num_std): """Generates sample based on mu, var that's num_std away from mean""" std = torch.exp(log_var / 2) z_sample = (num_std * std).add(mu) return z_sample class LinearCCIVAE(nn.Module, VAEBase): def __init__(self, n_pixels, n_channels, z_dim): super().__init__() self.z_dim = z_dim self.encoder = LinearEncoder(n_pixels, n_channels, 2 * z_dim) self.decoder = LinearDecoder(n_pixels, n_channels, z_dim) def forward(self, x): z_dist = self.encoder(x) mu, log_var = z_dist[:, : self.z_dim], z_dist[:, self.z_dim :] # reparameterize z_sample = LinearCCIVAE.reparameterize(mu, log_var) out = self.decoder(z_sample) return out, mu, log_var class Lambda(nn.Module): def __init__(self, func): super().__init__() self.func = func def forward(self, x): return self.func(x) class CCIVAE(nn.Module, VAEBase): """Model Architecture from CCI-VAE paper https://arxiv.org/abs/1804.03599 Encoder: 4 convolutional layers, each with 32 channels, 4x4 kernels, and a stride of 2. Followed by 2 fully connected layers, each of 256 units Latent Space: 20 units (10 for mean, 10 for variance) Decoder: transpose of encoder with ReLU activations """ def __init__(self, n_pixels, n_channels, z_dim, distribution="gaussian"): super().__init__() self.z_dim = z_dim self.n_channels = n_channels self.distribution = distribution self.encoder = nn.Sequential( nn.Conv2d(n_channels, n_pixels, kernel_size=4, stride=2), nn.ReLU(), nn.Conv2d(n_pixels, n_pixels, kernel_size=4, stride=2), nn.ReLU(), nn.Conv2d(n_pixels, n_pixels, kernel_size=4, stride=2), nn.ReLU(), nn.Conv2d(n_pixels, 256, kernel_size=1, stride=1), nn.ReLU(), Lambda(lambda x: x.view(x.size(0), -1)), nn.Linear(256, 2 * z_dim), ) self.decoder = nn.Sequential( nn.Linear(z_dim, 256), nn.ReLU(), Lambda(lambda x: x.view(-1, 256, 1, 1)), nn.ConvTranspose2d(256, 64, 4), nn.ReLU(), nn.ConvTranspose2d(64, 64, 4, 2, 1), nn.ReLU(), nn.ConvTranspose2d(64, n_pixels, 4, 2, 1), nn.ReLU(), nn.ConvTranspose2d(n_pixels, n_channels, 4, 2, 1), Lambda(lambda x: x.view(x.size(0), -1)), nn.ReLU(), nn.Linear(32 * 32, n_pixels * n_pixels), Lambda(lambda x: x.view(x.size(0), 1, n_pixels, n_pixels)), nn.Sigmoid(), ) def forward(self, x): z_dist = self.encoder(x) mu, log_var = z_dist[:, : self.z_dim], z_dist[:, self.z_dim :] # tanh log_var didn't seem to help # log_var = torch.tanh(log_var) z_sample = CCIVAE.reparameterize(mu, log_var) out = self.decoder(z_sample) return out, mu, log_var
Addressing-the-Topological-Defects-of-Disentanglement-main
models.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ import torch import numpy as np import functools import pdb class ShiftOperator: """Performs discrete shift based on n_rotations.""" def __init__(self, n_rotations, device): self.n_rotations = n_rotations self.device = device self.translation_matrices = self.generate_shift_operator_matrices( n_rotations + 1 ) def __call__(self, z_batch, angles): """Interface for Autoencoder""" return self.translate_batch(z_batch, angles) def translate_batch(self, z_batch, angles): """Applies shift operator to batch Args: angles (array of floats): counter-clockwise rotation in degrees. """ smallest_angle = 360 / (self.n_rotations + 1) if angles.dim() > 1: shifts = angles[:, 0] / smallest_angle else: shifts = angles / smallest_angle try: translated_batch = [ self.translate(z, shifts[i].long()) for i, z in enumerate(z_batch) ] except IndexError as e: print("===ANGLES ARE", angles) raise e return torch.stack(translated_batch) def translate(self, z, shift): """Translate latent Args: z (1-dim tensor): latent vector shift (int): amount by which to shift. shift of 0 corresponds to the identity. """ # reshape into 2D tensor z_2d = z.reshape(self.n_rotations + 1, -1) translation_matrix = self.translation_matrices[shift] # move to cpu if tensor is cpu. Used for eval if not z_2d.is_cuda: translation_matrix = translation_matrix.cpu() # translation z_2d_shifted = translation_matrix.matmul(z_2d) # reshape back z_shifted = z_2d_shifted.reshape(z.shape) return z_shifted def generate_shift_operator_matrices(self, n_rotations): """Generates family of shift operator matrices""" translation_matrix = np.zeros((n_rotations, n_rotations)) for i in range(n_rotations): translation_matrix[i, (i + 1) % n_rotations] = 1 translation_matrices = [np.eye(n_rotations, n_rotations)] T = np.eye(n_rotations, n_rotations) for i in range(n_rotations - 1): T = np.dot(translation_matrix, T) translation_matrices.append(T) translation_matrices = np.array(translation_matrices) _translation_matrices = torch.tensor( translation_matrices, dtype=torch.float32, device=self.device, ) return _translation_matrices class ComplexShiftOperator: """Performs discrete shift based on n_rotations""" def __init__(self, cardinals, z_dim, device, unique_transfo=False, index=None): self.cardinals = cardinals self.z_dim = z_dim self.device = device self.translation_matrices = self.generate_translation_matrices( self.cardinals, self.z_dim ) if unique_transfo: if (np.array(cardinals)>1).sum()==1: self.index = int((np.array(cardinals)>1).nonzero()[0]) elif (np.array(cardinals)>1).sum()>1: if index is None: print("Must provide the index of the operator !") else: self.index = index self.translate_batch = self.translate_batch_unique else: self.translate_batch = self.translate_batch_multiple def __call__(self, z_batch, shifts): """Interface for Autoencoder""" z_batch_r, z_batch_i = z_batch return self.translate_batch(z_batch_r, z_batch_i, shifts) def translate_batch_unique(self, z_batch_r, z_batch_i, shifts): """Translates batch in the case of a unique transformations (Faster)""" tr = self.translation_matrices[self.index][0][shifts[:, 0]] ti = self.translation_matrices[self.index][1][shifts[:, 0]] z_batch_r_shifted = tr * z_batch_r - ti * z_batch_i z_batch_i_shifted = tr * z_batch_i + ti * z_batch_r return ( z_batch_r_shifted, z_batch_i_shifted, ) def translate_batch_multiple(self, z_batch_r, z_batch_i, shifts): """Translates batch in the case of multiple transformations""" (Mr, Mi) = self.build_multipliers(shifts) z_batch_r_shifted = Mr * z_batch_r - Mi * z_batch_i z_batch_i_shifted = Mr * z_batch_i + Mi * z_batch_r return ( z_batch_r_shifted, z_batch_i_shifted, ) def build_multipliers(self, shifts): size_batch, n_transfo = shifts.shape Mr = torch.ones((size_batch, self.z_dim), device=self.device) Mi = torch.zeros((size_batch, self.z_dim), device=self.device) for i in range(n_transfo): tr = self.translation_matrices[i][0][shifts[:, i]] ti = self.translation_matrices[i][1][shifts[:, i]] Mr = Mr * tr - Mi * ti Mi = Mr * ti + Mi * tr return (Mr, Mi) def translate(self, zr, zi, shift): """Translate latent Args: z (1-dim tensor): latent vector shift (int): amount by which to shift """ for i in range(len(shift)): tr = self.translation_matrices[i][0][shift[i]] ti = self.translation_matrices[i][1][shift[i]] zr = zr * tr - zi * ti zi = zi * tr + zr * ti return (zr, zi) def generate_translation_matrices(self, cardinals, z_dim): """Generates family of translation matrices""" def DFT_matrix(cardinal, z_dim): i, j = np.meshgrid(np.arange(cardinal), np.arange(cardinal)) omega = np.exp(2 * np.pi * 1j / cardinal) W = np.power(omega, i * j) return W # Loop over all transformations that can happen to the sample XYZ = [] for i, t in enumerate(cardinals): K = self.cardinals[i] X_i = np.arange(K) if z_dim % K: # creates in shift operator an unfinished cycle second_dim = ( int(np.floor(z_dim / K)) + 1 ) # TODO: not sure this is the right way else: # creates in shift operator a finished cycle second_dim = int(z_dim / K) X_i = np.tile(X_i.flatten(), (second_dim))[:z_dim] XYZ.append(X_i) _all_translation_matrices = list() for i in range(len(cardinals)): translation_matrices = DFT_matrix(cardinals[i], z_dim) translation_matrices = translation_matrices[:, XYZ[i]] translation_matrices_r = np.real(translation_matrices) translation_matrices_i = np.imag(translation_matrices) _translation_matrices_r = torch.tensor( translation_matrices_r, dtype=torch.float32, device=self.device, ) _translation_matrices_i = torch.tensor( translation_matrices_i, dtype=torch.float32, device=self.device, ) _all_translation_matrices.append( (_translation_matrices_r, _translation_matrices_i,) ) return _all_translation_matrices class DisentangledRotation: """Performs rotation using rotation matrix of the form: [cos, -sin], [sin, cos] Args: n_rotations (int): discrete rotations needed before identity is reached """ def __init__(self, n_rotations, device): self.n_rotations = n_rotations self.device = device def __call__(self, z, angles): """Interface for Autoencoder""" return self.rotate_batch(z, angles) def rotate_batch(self, x_batch, angles): """Rotates batch""" rotated_batch = [] if angles.dim() > 1: angles = angles[:, 0] else: angles = angles for i, x in enumerate(x_batch): x_rotated = self.rotate(x, angles[i]) rotated_batch.append(x_rotated) return torch.stack(rotated_batch) def rotate(self, x, angle): """Clockwise rotation or translation Args: x (1D tensor): representing latent vector angle (float): rotation angle in degrees Returns: rotated tensor of same shape as x """ if x.dim() != 1: raise ValueError(f"x must be a flattened 1D vector. Got shape {x.shape}") rotation_matrix = self.get_rotation_matrix(angle, x.shape[0]) if not x.is_cuda: rotation_matrix = rotation_matrix.cpu() x_rotated = rotation_matrix.matmul(x) return x_rotated @functools.lru_cache() def get_rotation_matrix(self, angle, dim): """Angle is the rotation angle in degrees. Returns rotation matrix that operates on first two dimensions """ rotation_matrix = torch.diag(torch.ones(dim, device=self.device)) if angle == 0.0: return rotation_matrix radians = (angle / 360) * torch.tensor(2 * np.pi) matrix_2d = torch.tensor( [ [torch.cos(radians), -torch.sin(radians)], [torch.sin(radians), torch.cos(radians)], ] ) rotation_matrix[0][0] = matrix_2d[0][0] rotation_matrix[0][1] = matrix_2d[0][1] rotation_matrix[1][0] = matrix_2d[1][0] rotation_matrix[1][1] = matrix_2d[1][1] return rotation_matrix.to(device=self.device)
Addressing-the-Topological-Defects-of-Disentanglement-main
latent_operators.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """
Addressing-the-Topological-Defects-of-Disentanglement-main
__init__.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ import torch import numpy as np import random import matplotlib import matplotlib.pyplot as plt import models import latent_operators from datasets import datasets from datasets.data_utils import x_to_image import plot import pdb import os import shutil eps = 1e-20 class WeaklyComplexAutoEncoder: """Trains a weakly supervised shift operator. Args: data (AbstractDataset): contains train and test loaders with angles z_dim (int): dimension of latent space seed (int): for random number generation translation (bool): if true, uses an offset identity matrix for rotation """ def __init__( self, data, z_dim=405, seed=0, encoder_type="ComplexLinear", decoder_type="ComplexLinear", transformation_type=None, device="cpu", temperature=1.0, output_directory="output", save_name="", use_softmax=1, n_rotations = 0, n_x_translations = 0, n_y_translations = 0, scaling_factors = (1, ) ): self.z_dim = z_dim self.seed = seed self.set_seed() self.data = data self.device = device self.encoder = getattr(models, encoder_type + "Encoder")( self.data.n_pixels, self.data.n_channels, z_dim ).to(self.device) self.decoder = getattr(models, decoder_type + "Decoder")( self.data.n_pixels, self.data.n_channels, z_dim ).to(self.device) cardinals = [ n_rotations + 1, n_x_translations + 1, n_y_translations + 1, len(scaling_factors), ] self.cardinals = cardinals # SO FAR, THIS MODEL ONLY WORKS FOR 1 TRANSFO # We grab which one with the following line assert (np.array(cardinals) > 1).sum()==1 for i, cardinal in enumerate(cardinals): if cardinal > 1: self.K = cardinal self.transfo_index = i # function used for transformation self.use_softmax = use_softmax self.transform = self.get_transformation(transformation_type) self.temperature = temperature self.output_dir = output_directory self.save_name = save_name self.best_epoch = 0 self.best_mse = 0 def set_seed(self): """Sets seed for random number generation""" torch.manual_seed(self.seed) np.random.seed(self.seed) random.seed(self.seed) # Generate Dataset torch.autograd.set_detect_anomaly(True) def get_transformation(self, name): """Returns function to performance transformation based name""" if name is None: return None transformation = getattr(latent_operators, name) return transformation(self.cardinals, self.z_dim, self.device, unique_transfo = True) def train(self, loss_func, learning_rate, n_epochs, log_frequency): self.encoder.train() self.decoder.train() params = list(self.encoder.parameters()) + list(self.decoder.parameters()) optimizer = torch.optim.Adam(params, lr=learning_rate) train_losses = torch.FloatTensor(n_epochs) valid_losses = torch.FloatTensor(n_epochs) best_mse = np.inf N_pairs = len(self.data.train_loader.dataset) for epoch in range(n_epochs): epoch_loss = 0 for i, (x1, x2, angles) in enumerate(self.data.train_loader): x1 = x1.to(device=self.device) x2 = x2.to(device=self.device) optimizer.zero_grad() loss = loss_func(x1, x2, angles) loss.backward() optimizer.step() epoch_loss += loss.item() * x1.size(0) epoch_loss = epoch_loss / N_pairs print(f"Epoch {epoch} Train loss: {epoch_loss:0.3e}") valid_mse = ( self.compute_mean_loss(loss_func, self.data.valid_loader) .detach() .item() ) # train_mse = ( # self.compute_mean_loss(loss_func, self.data.train_loader) # .detach() # .item() # ) # train_losses[epoch] = train_mse train_losses[epoch] = epoch_loss if valid_mse < best_mse: self.update_state(mse=valid_mse, epoch=epoch) best_mse = valid_mse file_name = "checkpoint_{}.pth.tar".format(self.save_name) self.save_best_checkpoint( out_dir=self.output_dir, file_name=file_name, optimizer_state_dict=optimizer.state_dict(), ) print(f"Epoch {epoch} validation loss: {valid_mse:0.3e}") valid_losses[epoch] = valid_mse return train_losses.detach().numpy(), valid_losses.detach().numpy() def ifft(self, cps): second_dim = cps.size(1) K = len(self.transform.translation_matrices[self.transfo_index][0]) cps_r = cps[..., 0].to(device=self.device) cps_i = cps[..., 1].to(device=self.device) tr_r = self.transform.translation_matrices[self.transfo_index][0] tr_i = self.transform.translation_matrices[self.transfo_index][1] alternative = cps_r[:, None, ...] * tr_r - cps_i[:, None, ...] * tr_i alternative = alternative.mean(2) # mean over frequencies return alternative def reconstruct_x1(self, x1): """Reconstructs x1 using model""" self.encoder.eval() self.decoder.eval() x1 = x1.to(device=self.device) with torch.no_grad(): z1 = self.encoder(x1) x1_reconstruction_r = self.decoder(z1) return x1_reconstruction_r def reconstruct_x2(self, x1, x2, param=None): """Reconstructs x2 using model and latent transformation""" self.encoder.eval() self.decoder.eval() x1 = x1.to(device=self.device) x2 = x2.to(device=self.device) batch_size = x1.size(0) with torch.no_grad(): z1 = self.encoder(x1) z2 = self.encoder(x2) angles_probas = self.compute_angles_probas(x1, x2, z1, z2) predicted_angle = angles_probas.detach().argmax(-1, keepdims=True) z_transformed = self.transform(z1, predicted_angle) x2_reconstruction_r = self.decoder(z_transformed) return x2_reconstruction_r def plot_multiple_transformations(self, param_name='angle', indices=None, train_set=False, save_name=None): """Plots all rotated reconstructions for given samples""" if indices is None: n_samples = min(len(self.data.X_orig_train), len(self.data.X_orig_test)) indices = np.random.randint(low=0, high=n_samples, size=5) X = ( self.data.X_orig_train[indices] if train_set else self.data.X_orig_test[indices] ).float() title = ( "Translations" if param_name=='angle' != "angle" else "Rotations" ) plot.plot_transformations_complex( X, self, title, save_name=save_name, param_name=param_name, supervised=False, ) def plot_x1_reconstructions( self, indices=[10, 2092, 10299, 13290], train_set=False, save_name=None ): """Plots x1 autoencoder reconstruction from z1. Args: pairs (datasets.Pairs): contains x1, x2, and params. model (function): callable f(x1) = x1_reconstruction indices (list of ints): indices for samples to plot train_set (bool): if true title is plotted with train otherwise test. save_name (str): indicates path where images should be saved. """ pairs = self.data.X_train if train_set else self.data.X_test plot.plot_x1_reconstructions( pairs, self.reconstruct_x1, indices, train_set, save_name ) def plot_x2_reconstructions( self, indices=[10, 2092, 10299, 13290], train_set=False, save_name=None ): """Plots x1, x2 and x2 autoencoder reconstruction from z1 rotated. Args: pairs (datasets.Pairs): contains x1, x2, and params. model (function): callable f(x1) = x1_reconstruction indices (list of ints): indices for samples to plot train_set (bool): if true title is plotted with train otherwise test. save_name (str): indicates path where images should be saved. """ pairs = self.data.X_train if train_set else self.data.X_test plot.plot_x2_reconstructions( pairs, self.reconstruct_x2, indices, train_set, save_name ) def compute_angles_probas(self, x1, x2, z1, z2): cps = self.computes_cross_power_spectrum(z1[0], z1[1], z2[0], z2[1]) invfs_alter = self.ifft(cps) angles_probas = invfs_alter return angles_probas def reconstruction_mse_transformed_z1_weak(self, x1, x2, angles, use_argmax=False): """Computes reconstruction MSE of x1 from z1 + x2 from transformed(z1), not using ground-truth angles""" criterion = torch.nn.MSELoss(reduction="none") batch_size = x1.size(0) z1 = self.encoder(x1) z2 = self.encoder(x2) prod_size = np.prod(x1.size()) x1_reconstruction_r = self.decoder(z1) x1_reconstruction_loss = criterion(x1_reconstruction_r, x1) x1_reconstruction_loss = x1_reconstruction_loss.mean() # TODO this is not adapted to product of shift operators, it's looking only at the 1st cardinal # Transform according to all possible angles, weighted angles_probas = self.compute_angles_probas(x1, x2, z1, z2) if use_argmax: predicted_angle = angles_probas.detach().argmax( -1, keepdims=True ) z_transformed = self.transform(z1, predicted_angle) x2_reconstruction_r = self.decoder(z_transformed) x2_reconstruction_loss = criterion(x2_reconstruction_r, x2) x2_reconstruction_loss = x2_reconstruction_loss.mean() else: all_angles = torch.arange(self.K).repeat(1, batch_size).view(-1, 1) temp = self.temperature mask = torch.softmax(angles_probas / temp, dim=-1) repeat_z1 = ( z1[0][:, None, :].repeat(1, self.K, 1).view(batch_size * self.K, -1), z1[1][:, None, :].repeat(1, self.K, 1).view(batch_size * self.K, -1), ) x2_repeat = ( x2[:, None, ...] .repeat(1, self.K, 1, 1, 1) .view(batch_size * self.K, x2.size(1), x2.size(2), x2.size(3)) ) z_transformed = self.transform(repeat_z1, all_angles) x2_reconstruction_r = self.decoder(z_transformed) x2_reconstruction_transformed_loss = ( criterion(x2_reconstruction_r, x2_repeat) .sum((1, 2, 3)) # sums over image dim .view(batch_size, -1) ) x2_reconstruction_loss = (mask * x2_reconstruction_transformed_loss).sum() / prod_size loss = x1_reconstruction_loss + x2_reconstruction_loss return loss def computes_cross_power_spectrum( self, z_batch_r1, z_batch_i1, z_batch_r2, z_batch_i2 ): """Computes Cross Power spectrum (no FFT) """ batch_size = z_batch_r1.size(0) z1z2_batch_r = ( z_batch_r1 * z_batch_r2 + z_batch_i1 * z_batch_i2 ) # recall we use the conjugate of z_batch_2, hence the + here z1z2_batch_i = ( -z_batch_r1 * z_batch_i2 + z_batch_i1 * z_batch_r2 ) # recall we use the conjugate of z_batch_2, hence the - in front here norm_z1z2_batch = ((z1z2_batch_r ** 2 + z1z2_batch_i ** 2)) ** 0.5 cps_r = z1z2_batch_r / norm_z1z2_batch cps_i = z1z2_batch_i / norm_z1z2_batch cps = torch.cat([cps_r[..., None], cps_i[..., None]], -1) return cps def compute_test_loss(self, loss_func, data_loader): """Computes RMSE based on given loss function.""" self.encoder.eval() self.decoder.eval() losses = [] N = 0 with torch.no_grad(): for i, (x1, x2, angles) in enumerate(data_loader): x1 = x1.to(device=self.device) x2 = x2.to(device=self.device) bs = x1.size(0) loss_batch = loss_func(x1, x2, angles, True)*bs N += bs losses.append(loss_batch) test_loss = torch.stack(losses).sum() / float(N) self.encoder.train() self.decoder.train() return test_loss def compute_mean_loss(self, loss_func, data_loader): """Computes RMSE based on given loss function.""" self.encoder.eval() self.decoder.eval() losses = [] with torch.no_grad(): for i, (x1, x2, angles) in enumerate(data_loader): x1 = x1.to(device=self.device) x2 = x2.to(device=self.device) loss_batch = loss_func(x1, x2, angles, True) losses.append(loss_batch) mean_loss = torch.stack(losses).mean() self.encoder.train() self.decoder.train() return mean_loss def run( self, learning_rate=0.0005, n_epochs=10, log_frequency=50 ): """Runs experiment for autoencoder reconstruction.""" loss_func = self.reconstruction_mse_transformed_z1_weak train_loss, valid_loss = self.train( loss_func, learning_rate, n_epochs, log_frequency ) train_mse = self.compute_mean_loss(loss_func, self.data.train_loader) print(f"Train MSE: {train_mse}") valid_mse = self.compute_mean_loss(loss_func, self.data.valid_loader) print(f"Valid MSE: {valid_mse}") test_mse = self.compute_test_loss(loss_func, self.data.test_loader_batch_100) print(f"Test MSE: {test_mse}") return train_loss, valid_loss, train_mse, valid_mse, test_mse def update_state(self, mse, epoch): self.best_mse = mse self.best_epoch = epoch def load_model(self, path_to_checkpoint): checkpoint = torch.load(path_to_checkpoint) self.best_epoch = checkpoint["best_epoch"] self.encoder.load_state_dict(checkpoint["encoder_state_dict"]) self.decoder.load_state_dict(checkpoint["decoder_state_dict"]) self.best_mse = checkpoint["best_mse"] return checkpoint["best_mse"], checkpoint["best_epoch"] def get_current_state(self): return { "encoder_state_dict": self.encoder.state_dict(), "decoder_state_dict": self.decoder.state_dict(), "best_epoch": self.best_epoch, "best_mse": self.best_mse, } def save_best_checkpoint(self, out_dir, file_name, optimizer_state_dict): """ :param file_name: filename to save checkpoint in. :param optimizer_state_dict: state of the optimizer. :return: str to path where the model is saved. """ state = self.get_current_state() state["optimizer_state_dict"] = optimizer_state_dict best_path = os.path.join(out_dir, "best_" + file_name) torch.save(state, best_path)
Addressing-the-Topological-Defects-of-Disentanglement-main
weakly_complex_shift_autoencoder.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ import copy import torch import json import os import random import numpy as np import models import latent_operators import plot from datasets import datasets, transformations class AutoEncoder: """Trains an autoencoder on rotated shapes. Args: data (AbstractDataset): contains train and test loaders with transformation params z_dim (int): dimension of latent space seed (int): for random number generation translation (bool): if true, uses an offset identity matrix for rotation shift_x (bool): use shift values instead of angles in supervision. """ def __init__( self, data, z_dim=700, seed=0, encoder_type="Linear", decoder_type="Linear", latent_operator_name=None, device="cpu", learning_rate=0.0005, n_epochs=5, ): self.z_dim = z_dim self.seed = seed self.set_seed() self.data = data self.device = device self.encoder_type = encoder_type self.decoder_type = decoder_type self.encoder = getattr(models, encoder_type + "Encoder")( self.data.n_pixels, self.data.n_channels, z_dim ).to(self.device) self.decoder = getattr(models, decoder_type + "Decoder")( self.data.n_pixels, self.data.n_channels, z_dim ).to(self.device) self.encoder_best_valid = self.encoder self.decoder_best_valid = self.decoder self.learning_rate = learning_rate self.n_epochs = n_epochs self.transformation_param_name = self.get_transformation_param_name() # function used for latent transformation self.use_latent_op = False if latent_operator_name is None else True self.latent_operator_name = latent_operator_name self.latent_operator = self.get_latent_operator(latent_operator_name) self.train_losses = [] self.valid_losses = [] self.final_test_loss = None def __repr__(self): model = { "encoder_type": self.encoder_type, "decoder_type": self.decoder_type, "z_dim": self.z_dim, "latent_operator": self.latent_operator_name, "batch_size": self.data.batch_size, "learning_rate": self.learning_rate, "n_epochs": self.n_epochs, "data": str(self.data), } return json.dumps(model) def save(self, path, indices=None): os.makedirs(path, exist_ok=True) self.save_model_configs(path) self.save_models(path) self.save_losses(path) self.save_plots(path) def save_model_configs(self, path): model_configs_str = self.__repr__() model_configs = json.loads(model_configs_str) file_path = os.path.join(path, "model_configs.json") with open(file_path, "w") as outfile: json.dump(model_configs, outfile) def save_models(self, path): encoder_path = os.path.join(path, "encoder.pt") torch.save(self.encoder.state_dict(), encoder_path) decoder_path = os.path.join(path, "decoder.pt") torch.save(self.decoder.state_dict(), decoder_path) def load_models(self, path, device="cpu"): self.encoder.load_state_dict( torch.load(os.path.join(path, "encoder.pt"), map_location=device) ) self.decoder.load_state_dict( torch.load(os.path.join(path, "decoder.pt"), map_location=device) ) def save_losses(self, path): file_path = os.path.join(path, "train_losses.npy") np.save(file_path, self.train_losses) file_path = os.path.join(path, "valid_losses.npy") np.save(file_path, self.valid_losses) file_path = os.path.join(path, "test_loss.npy") np.save(file_path, self.final_test_loss) def save_plots(self, path): for train_set in [True, False]: set_name = "train" if train_set else "test" x1_plot_path = os.path.join(path, f"x1_{set_name}_reconstructions") self.plot_x1_reconstructions(save_name=x1_plot_path, train_set=train_set) # store x2 reconstructions only when using supervised latent operator if self.use_latent_op: x2_plot_path = os.path.join(path, f"x2_{set_name}_reconstructions") self.plot_x2_reconstructions( save_name=x2_plot_path, train_set=train_set ) transformation_name = ( "translations" if self.transformation_param_name != "angle" else "rotations" ) multiple_rotations_path = os.path.join( path, f"x_{set_name}_{transformation_name}" ) self.plot_multiple_rotations( save_name=multiple_rotations_path, train_set=train_set ) def save_best_validation(self, path, indices=None): self.encoder = self.encoder_best_valid self.decoder = self.decoder_best_valid self.save(path, indices=indices) def set_seed(self): """Sets seed for random number generation""" torch.manual_seed(self.seed) np.random.seed(self.seed) random.seed(self.seed) # Generate Dataset torch.autograd.set_detect_anomaly(True) def get_transformation_param_name(self): """Returns the parameter used for transformation""" if self.data.n_rotations > 1: return "angle" elif self.data.n_x_translations > 1: return "shift_x" elif self.data.n_y_translations > 1: return "shift_y" else: raise ValueError("No transformation found") def get_latent_operator(self, name): """Returns function to performance transformation based name""" if name is None: return None latent_operator = getattr(latent_operators, name) return latent_operator(self.n_transformations, self.device) @property def n_transformations(self): if self.data.n_rotations > 1: return self.data.n_rotations elif self.data.n_x_translations > 1: return self.data.n_x_translations elif self.data.n_y_translations > 1: return self.data.n_y_translations else: raise ValueError("No transformation found") def train(self, loss_func, stop_early=False, log_frequency=None): self.encoder.train().to(self.device) self.decoder.train().to(self.device) params = list(self.encoder.parameters()) + list(self.decoder.parameters()) optimizer = torch.optim.Adam(params, lr=self.learning_rate) if log_frequency is None: log_frequency = self.set_log_frequency() for epoch in range(self.n_epochs): running_loss = 0.0 print(f"Epoch {epoch}") self.log_train_val_loss(loss_func) for i, (x1, x2, params) in enumerate(self.data.train_loader): print(f"Training batch {i}", end="\r") x1 = x1.to(device=self.device) x2 = x2.to(device=self.device) angles = self.get_angles(params) angles = angles.to(device=self.device) optimizer.zero_grad() loss = loss_func(x1, x2, angles) loss.backward() optimizer.step() running_loss += loss.item() if i % log_frequency == (log_frequency - 1): print(f"Running loss: {running_loss / log_frequency:0.3e}") running_loss = 0.0 if stop_early: return None train_loss, valid_loss = self.log_train_val_loss(loss_func) self.copy_models_validation(valid_loss) # test loss per sample (using batch size 1) self.final_test_loss = self.compute_total_loss( self.data.test_loader_batch_1, loss_func ) print(f"Test Loss: {self.final_test_loss:0.3e}") def set_log_frequency(self): frequency = len(self.data.train_loader) // 10 return frequency def copy_models_validation(self, valid_loss): """Copies models with best validation""" if valid_loss < np.min(self.valid_losses): self.encoder_best_valid = copy.deepcopy(self.encoder) self.decoder_best_valid = copy.deepcopy(self.decoder) def log_train_val_loss(self, loss_func, show_print=True): train_loss = self.compute_total_loss(self.data.train_loader, loss_func) valid_loss = self.compute_total_loss(self.data.valid_loader, loss_func) self.train_losses.append(train_loss) self.valid_losses.append(valid_loss) if show_print: print(f"Total loss train: {train_loss:0.3e} validation: {valid_loss:0.3e}") return train_loss, valid_loss def compute_total_loss(self, loader, loss_func): self.encoder.eval() self.decoder.eval() losses = [] with torch.no_grad(): for x1, x2, params in loader: x1 = x1.to(device=self.device) x2 = x2.to(device=self.device) angles = self.get_angles(params) angles = angles.to(device=self.device) losses.append(loss_func(x1, x2, angles).cpu()) mean_loss = torch.stack(losses).mean() self.encoder.train() self.decoder.train() return mean_loss def reconstruction_mse_x1(self, x1, x2, angles): """Computes MSE x1 reconstruction loss""" criterion = torch.nn.MSELoss() z = self.encoder(x1) x1_reconstruction = self.decoder(z) loss = criterion(x1_reconstruction, x1) return loss def reconstruction_mse_transformed_z1(self, x1, x2, angles): """Computes reconstruction MSE of x1 from z1 + x2 from transformed(z1)""" criterion = torch.nn.MSELoss() z = self.encoder(x1) x1_reconstruction = self.decoder(z) x1_reconstruction_loss = criterion(x1_reconstruction, x1) z_transformed = self.latent_operator(z, angles) x2_reconstruction_loss = criterion(self.decoder(z_transformed), x2) loss = x1_reconstruction_loss + x2_reconstruction_loss return loss def reconstruction_mse_frozen_z1(self, x1, x2, angles): """Reconstruction loss of x2 from x1 without transformations""" criterion = torch.nn.MSELoss() z = self.encoder(x1) x2_reconstruction = self.decoder(z) loss = criterion(x2_reconstruction, x2) return loss def compute_mean_loss(self, loss_func, data_loader): """Computes RMSE based on given loss function.""" self.encoder.eval().cpu() self.decoder.eval().cpu() losses = [] for x1, x2, params in data_loader: angles = self.get_angles(params) losses.append(loss_func(x1, x2, angles).cpu()) mean_loss = torch.stack(losses).mean() return mean_loss def get_angles(self, params): """Returns tensor of angles for translations in x or rotations.""" param_name = self.transformation_param_name if param_name in ("shift_x", "shift_y"): angles = torch.tensor( [ transformations.shift_to_angle( getattr(p, param_name), self.n_transformations, ) for p in params ] ) else: angles = torch.tensor([p.angle for p in params]) return angles def run(self, log_frequency=None, stop_early=False): """Runs experiment for autoencoder reconstruction. Args: log_frequency (int): number of batches after which to print loss stop_early (bool): stop after a single log_frequency number of batches. Useful for testing without waiting for long training. """ if self.latent_operator_name is None: loss_func = self.reconstruction_mse_x1 elif self.latent_operator_name in ["ShiftOperator", "DisentangledRotation"]: loss_func = self.reconstruction_mse_transformed_z1 # TODO: what is frozen_rotation? elif self.latent_operator_name == "frozen_rotation": loss_func = self.reconstruction_mse_frozen_z1 else: raise ValueError( f"transformation type {self.transformation_type} not supported" ) self.train( loss_func, log_frequency=log_frequency, stop_early=stop_early, ) def reconstruct_x1(self, x1): """Reconstructs x1 using model""" self.encoder.eval().cpu() self.decoder.eval().cpu() with torch.no_grad(): z = self.encoder(x1) y = self.decoder(z) return y def reconstruct_transformed_x1(self, x1, param): """Reconstructs x1 transformed using model""" self.encoder.eval().cpu() self.decoder.eval().cpu() with torch.no_grad(): x_transformed = transformations.transform(x1.squeeze(0), param) z = self.encoder(x_transformed.unsqueeze(0)) y = self.decoder(z) return y def reconstruct_x2(self, x1, param): """Reconstructs x2 using model and latent transformation""" self.encoder.eval().cpu() self.decoder.eval().cpu() with torch.no_grad(): z = self.encoder(x1) angle = self.get_angles([param]).unsqueeze(0) z_transformed = self.latent_operator(z, angle) x2 = self.decoder(z_transformed) return x2 def plot_x1_reconstructions(self, indices=None, train_set=False, save_name=None): """Plots x1 autoencoder reconstruction from z1. Args: pairs (datasets.Pairs): contains x1, x2, and params. model (function): callable f(x1) = x1_reconstruction indices (list of ints): indices for samples to plot train_set (bool): if true title is plotted with train otherwise test. save_name (str): indicates path where images should be saved. """ pairs = self.data.X_train if train_set else self.data.X_test if indices is None: indices = random.sample(range(len(pairs)), k=4) plot.plot_x1_reconstructions( pairs, self.reconstruct_x1, indices, train_set, save_name ) def plot_x2_reconstructions(self, indices=None, train_set=False, save_name=None): """Plots x1, x2 and x2 autoencoder reconstruction from z1 rotated. Args: pairs (datasets.Pairs): contains x1, x2, and params. model (function): callable f(x1) = x1_reconstruction indices (list of ints): indices for samples to plot train_set (bool): if true title is plotted with train otherwise test. save_name (str): indicates path where images should be saved. """ pairs = self.data.X_train if train_set else self.data.X_test if indices is None: indices = random.sample(range(len(pairs)), k=4) plot.plot_x2_reconstructions( pairs, self.reconstruct_x2, indices, train_set, save_name ) def plot_multiple_rotations(self, indices=None, train_set=False, save_name=None): """Plots all rotated reconstructions for given samples""" if indices is None: n_samples = min(len(self.data.X_orig_train), len(self.data.X_orig_test)) indices = np.random.randint(low=0, high=n_samples, size=5) X = ( self.data.X_orig_train[indices] if train_set else self.data.X_orig_test[indices] ).float() title = ( "Translations" if self.transformation_param_name != "angle" else "Rotations" ) plot.plot_rotations( X, self, self.n_transformations, title, save_name=save_name, param_name=self.transformation_param_name, use_latent_op=self.use_latent_op, ) def load_data(configs, path): data_configs = json.loads(configs["data"]) if "shapes" and "2k-classes" in path: data = datasets.SimpleShapes( configs["batch_size"], n_rotations=data_configs["n_rotations"], n_x_translations=data_configs["n_x_translations"], n_y_translations=data_configs["n_y_translations"], n_classes=2000, seed=0, ) elif "mnist" in path: data = datasets.ProjectiveMNIST( configs["batch_size"], n_rotations=data_configs["n_rotations"], n_x_translations=data_configs["n_x_translations"], n_y_translations=data_configs["n_y_translations"], train_set_proportion=0.01, valid_set_proportion=0.01, test_set_proportion=1.0, seed=0, ) else: raise ValueError("data not found") return data def load(path): with open(os.path.join(path, "model_configs.json")) as f: configs = json.load(f) data = load_data(configs, path) model_type = "CCI" if "cci" in path else "Linear" model = AutoEncoder( data, z_dim=configs["z_dim"], latent_operator_name=configs["latent_operator"], encoder_type=model_type, decoder_type=model_type, ) model.load_models(path) return model if __name__ == "__main__": device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(f"running on {device}") n_epochs = 2 simple_shapes = datasets.SimpleShapes(16) print("Training Autoencder") model = AutoEncoder(simple_shapes, device=device, n_epochs=n_epochs) model.run() print("Training Autoencder with Latent Translation") model_with_rotation = AutoEncoder( simple_shapes, latent_operator_name="ShiftOperator", device=device, n_epochs=n_epochs, ) model_with_rotation.run()
Addressing-the-Topological-Defects-of-Disentanglement-main
autoencoder.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ """Implements CCI VAE https://arxiv.org/abs/1804.03599 """ import torch import os import numpy as np import models import json import plot import copy import random from datasets import datasets, transformations from datasets.data_utils import x_to_image from sklearn.decomposition import PCA import matplotlib import matplotlib.pyplot as plt class CCIVariationalAutoEncoder: """Trains an autoencoder on rotated shapes. Args: data (AbstractDataset): contains train and test loaders with angles model (CCIVAE model): contains forward funtion with encoder / decoder beta (float): beta in beta-VAE model c_max (float): maximum value for controlled capacity parameter in CCI VAE. z_dim (int): dimension of latent space seed (int): for random number generation translation (bool): if true, uses an offset identity matrix for rotation """ def __init__( self, data, model=models.CCIVAE, beta=1000.0, c_max=36.0, z_dim=30, seed=0, device="cpu", learning_rate=0.0005, n_epochs=5, distribution="gaussian", ): self.beta, self.c_max = beta, c_max self.c = 0.0 self.z_dim = z_dim self.data = data self.device = device self.model_cls = model self.model = model( self.data.n_pixels, self.data.n_channels, z_dim, distribution=distribution ) self.model.to(device=device) self.model_best_valid = self.model self.learning_rate = learning_rate self.n_epochs = n_epochs self.distribution = distribution self.seed = seed self.set_seed() self.train_losses = [] self.kl_losses = [] self.reconstruction_losses = [] self.valid_losses = [] self.final_test_loss = None def __repr__(self): model = { "model_class": str(self.model_cls), "beta": self.beta, "c_max": self.c_max, "distribution": self.distribution, "z_dim": self.z_dim, "batch_size": self.data.batch_size, "learning_rate": self.learning_rate, "n_epochs": self.n_epochs, "data": str(self.data), } return json.dumps(model) def set_seed(self): """Sets seed for random number generation""" torch.manual_seed(self.seed) np.random.seed(self.seed) random.seed(self.seed) # Generate Dataset torch.autograd.set_detect_anomaly(True) def compute_loss(self, x1): """Loss for controlled capacity beta vae (CCI VAE) https://arxiv.org/abs/1804.03599 """ if self.distribution == "gaussian": criterion = torch.nn.MSELoss(reduction="sum") elif self.distribution == "bernoulli": criterion = torch.nn.BCELoss(reduction="sum") else: raise ValueError(f"distribution {self.distribution} not supported") # assuming a Gaussian Distribution out, mu, log_var = self.model(x1) reconstruction_loss = criterion(out, x1) # https://arxiv.org/abs/1312.6114 # -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) kl_divergence = ( -0.5 * (1 + log_var - mu.pow(2) - log_var.exp()).mean(dim=0) ).sum() return reconstruction_loss, kl_divergence def train(self, stop_early=False, log_frequency=None, track_losses=True): """Trains controlled capacity beta vae (CCI VAE) https://arxiv.org/abs/1804.03599 Learning rate used in the paper is 5e-4 If verbose is False, previous loss print is overridden If stop_early is True, training stops after first logged loss. This is useful for testing. """ self.model.train().to(self.device) optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate) c_step_size = (self.c_max - self.c) / self.n_epochs if log_frequency is None: log_frequency = self.set_log_frequency() for epoch in range(self.n_epochs): running_loss = 0.0 print(f"Epoch {epoch}") if track_losses: self.log_train_val_loss() running_loss = 0.0 running_reconstruction_loss, running_kl_divergence = 0.0, 0.0 # update controlled capacity parameter self.c += c_step_size for i, (x1, _, _) in enumerate(self.data.train_loader): x1 = x1.to(device=self.device) optimizer.zero_grad() reconstruction_loss, kl_divergence = self.compute_loss(x1) loss = reconstruction_loss + self.beta * (kl_divergence - self.c).abs() loss.backward() optimizer.step() running_loss += loss.item() running_reconstruction_loss += ( reconstruction_loss.cpu().detach().numpy() ) running_kl_divergence += kl_divergence.cpu().detach().numpy() if i % log_frequency == (log_frequency - 1): normalized_loss = running_loss / log_frequency normalized_reconstruction_loss = ( running_reconstruction_loss / log_frequency ) normalized_kl_divergence = running_kl_divergence / log_frequency print(f"Running Total Loss: {normalized_loss:0.3e}") print( f"Running Reconstruction Loss: {normalized_reconstruction_loss:0.3e}" f" KL Divergence: {normalized_kl_divergence:0.3e}" ) self.kl_losses.append(normalized_kl_divergence) self.reconstruction_losses.append(normalized_reconstruction_loss) running_loss = 0.0 running_reconstruction_loss = 0.0 running_kl_divergence = 0.0 if stop_early: return None if track_losses: train_loss, valid_loss = self.log_train_val_loss() self.copy_models_validation(valid_loss) # compute test loss per sample self.final_test_loss = self.compute_total_loss( self.data.test_loader_batch_1 ) print(f"Test Loss: {self.final_test_loss:0.3e}") def set_log_frequency(self): frequency = len(self.data.train_loader) // 10 return frequency def copy_models_validation(self, valid_loss): """Copies models with best validation""" if valid_loss < np.min(self.valid_losses): self.model_vest_valid = copy.deepcopy(self.model) def log_train_val_loss(self, show_print=True): train_loss = self.compute_total_loss(self.data.train_loader) valid_loss = self.compute_total_loss(self.data.valid_loader) self.train_losses.append(train_loss) self.valid_losses.append(valid_loss) if show_print: print(f"Total loss train: {train_loss:0.3e} validation: {valid_loss:0.3e}") return train_loss, valid_loss def compute_total_loss(self, loader): """Computes total average loss on given loader""" self.model.eval() losses = [] with torch.no_grad(): for x1, x2, params in loader: x1 = x1.to(device=self.device) reconstruction_loss, kl_divergence = self.compute_loss(x1) loss = reconstruction_loss + self.beta * (kl_divergence - self.c).abs() losses.append(loss.item()) mean_loss = np.mean(losses) self.model.train() return mean_loss def reconstruct_x1(self, x1): """Reconstructs x1 using model""" self.model.eval().cpu() with torch.no_grad(): y, _, _ = self.model(x1) return y def reconstruct_mean(self, x1): self.model.eval().cpu() with torch.no_grad(): _, mu, _ = self.model(x1) out = self.model.decoder(mu) return out def save_best_validation(self, path, indices=None): """Saves results best for model with best validation loss""" self.model = self.model_best_valid self.save(path, indices=indices) def save(self, path, indices=None): os.makedirs(path, exist_ok=True) self.save_model_configs(path) self.save_model(path) self.save_losses(path) self.save_plots(path) def save_model_configs(self, path): model_configs_str = self.__repr__() model_configs = json.loads(model_configs_str) file_path = os.path.join(path, "model_configs.json") with open(file_path, "w") as outfile: json.dump(model_configs, outfile) def load_model(self, path): device = torch.device("cpu") model = self.model_cls(self.data.n_pixels, self.data.n_channels, self.z_dim) model.load_state_dict(torch.load(path, map_location=device)) self.model = model self.model.to(device=device) def save_model(self, path): full_path = os.path.join(path, "model.pt") torch.save(self.model.state_dict(), full_path) def save_losses(self, path): file_path = os.path.join(path, "kl_divergence.npy") np.save(file_path, self.kl_losses) file_path = os.path.join(path, "reconstruction_losses.npy") np.save(file_path, self.reconstruction_losses) file_path = os.path.join(path, "train_losses.npy") np.save(file_path, self.train_losses) file_path = os.path.join(path, "valid_losses.npy") np.save(file_path, self.valid_losses) file_path = os.path.join(path, "test_loss.npy") np.save(file_path, self.final_test_loss) def save_plots(self, path): matplotlib.use("Agg") for train_set in [True, False]: set_name = "train" if train_set else "test" x1_plot_path = os.path.join(path, f"x1_{set_name}_reconstructions") self.plot_x1_reconstructions(save_name=x1_plot_path, train_set=train_set) latent_traversal_path = os.path.join(path, f"x_{set_name}_latent_traversal") self.plot_latent_traversal( save_name=latent_traversal_path, train_set=train_set ) def plot_x1_reconstructions(self, indices=None, train_set=False, save_name=None): """Plots x1 autoencoder reconstruction from z1. Args: pairs (datasets.Pairs): contains x1, x2, and params. model (function): callable f(x1) = x1_reconstruction indices (list of ints): indices for samples to plot train_set (bool): if true title is plotted with train otherwise test. save_name (str): indicates path where images should be saved. """ pairs = self.data.X_train if train_set else self.data.X_test if indices is None: indices = random.sample(range(len(pairs)), k=4) plot.plot_x1_reconstructions( pairs, self.reconstruct_mean, indices, train_set, save_name ) def plot_latent_traversal( self, indices=None, num_std=6.0, train_set=True, save_name=None, fixed_range=True, ): """Traverses latent space from [mu - 3 * std, mu + 3 * std] for given indices. If fixed_range is True, then [-num_std, num_std] is the interval. """ self.model.eval().cpu() pairs = self.data.X_train if train_set else self.data.X_test if indices is None: indices = random.sample(range(len(pairs)), k=3) for index in indices: sample_save_name = save_name if save_name is not None: sample_save_name = save_name + "_sample_" + str(index) self._plot_latent_traversal_helper( pairs, index, num_std, train_set, sample_save_name, fixed_range ) def plot_single_latent_traversal( self, index=3, train_set=True, latent_dim=0, save_name=None, num_std=6.0, ): self.model.eval().cpu() pairs = self.data.X_train if train_set else self.data.X_test sample_save_name = save_name if save_name is not None: sample_save_name = save_name + "_sample_" + str(index) x1, x2, p = pairs[index] title = "Training" if train_set else "Test" traversal_path = CCIVariationalAutoEncoder.get_std_path(num_std) num_subplots = len(traversal_path) + 1 fig, axs = plt.subplots(1, num_subplots, figsize=(12, 16)) axs[0].imshow(x1.squeeze()) axs[0].set_title(f"{title}: x1, latent {latent_dim}") axs[0].set_xticks([]) axs[0].set_yticks([]) with torch.no_grad(): _, mu, log_var = self.model(x1.unsqueeze(0)) z = mu for i, step in enumerate(traversal_path): z_shifted = z.clone().cpu().detach() z_shifted[0][latent_dim] = step with torch.no_grad(): reconstruction = self.model.decoder(z_shifted) axs[i + 1].imshow(reconstruction.squeeze().detach().numpy()) axs[i + 1].set_xticks([]) axs[i + 1].set_yticks([]) fig.tight_layout() if save_name: # close figure to speed up saving plt.savefig(sample_save_name, bbox_inches="tight", dpi=100) plt.close(fig) @staticmethod def get_std_path(num_std): """Returns list of std steps. [-3, -2, -1, 0, 1, 2, 3] """ step_size = num_std / 3.0 positive_steps = [i * step_size for i in range(1, 4)] negative_steps = sorted(list(-1 * np.array(positive_steps))) path = negative_steps + [0] + positive_steps return path def _plot_latent_traversal_helper( self, X, index, num_std, train_set, save_name, fixed_range ): title = "Training" if train_set else "Test" traversal_path = CCIVariationalAutoEncoder.get_std_path(num_std) num_subplots = len(traversal_path) + 1 x1, x2, p = X[index] fig, axs = plt.subplots(self.z_dim, num_subplots, figsize=(20, 60)) for dim in range(self.z_dim): axs[dim, 0].imshow(x1.squeeze()) axs[dim, 0].set_title(f"{title}: x1, latent {dim}") axs[dim, 0].set_xticks([]) axs[dim, 0].set_yticks([]) with torch.no_grad(): _, mu, log_var = self.model(x1.unsqueeze(0)) z = mu for i, step in enumerate(traversal_path): if not fixed_range: z_shifted = CCIVariationalAutoEncoder.shift_latent( z, dim, step, log_var ) else: z_shifted = z.clone().cpu().detach() z_shifted[0][dim] = step with torch.no_grad(): reconstruction = self.model.decoder(z_shifted) axs[dim, i + 1].imshow(reconstruction.squeeze().detach().numpy()) if not fixed_range: axs[dim, i + 1].set_title(f"std {step:.1f}") else: axs[dim, i + 1].set_title(f"{step:.1f}") axs[dim, i + 1].set_xticks([]) axs[dim, i + 1].set_yticks([]) fig.tight_layout() if save_name: # close figure to speed up saving plt.savefig(save_name, bbox_inches="tight", dpi=100) plt.close(fig) @staticmethod def shift_latent(z, dim, num_std, log_var): """Shifts latent by num_std along index of latent dimension""" std = torch.exp(log_var / 2.0) z_shifted = z.clone().cpu().detach() z_shifted[0][dim] += num_std * std[0][dim] return z_shifted def get_latents(self, train_set=False, num_batches=1000): """Returns latent representation for random indices""" self.model.eval().cpu() loader = self.data.train_loader if train_set else self.data.test_loader Z = [] for i, (x1, x2, p) in enumerate(loader): z = self.get_latent(x1) Z.append(z) if i == num_batches: break Z = torch.cat(Z) return Z def get_latent(self, x): with torch.no_grad(): _, mu, var = self.model(x) z = self.model.reparameterize(mu, var) return z def compute_latent_variances(self, n_samples=None): """Computes variance of latents across transformations of a sample""" if n_samples is None: n_samples = len(self.data.X_orig_test) variances = [] for i in range(n_samples): x1 = self.data.X_orig_test[i] self.model.eval().cpu() with torch.no_grad(): sample_latents = [] for param in self.data.transform_params: x_transformed = transformations.transform(x1, param) _, mu, log_var = self.model(x_transformed.unsqueeze(0)) # use mean of latent z = mu sample_latents.append(z) sample_latents = torch.cat(sample_latents) sample_var = sample_latents.var(dim=0) variances.append(sample_var) variances = torch.stack(variances).numpy() return variances def compute_latents_per_shape(self, n_samples=None): """Computes variance of latents across transformations of a sample""" if n_samples is None: n_samples = len(self.data.X_orig_test) latents = [] for i in range(n_samples): x1 = self.data.X_orig_test[i] self.model.eval().cpu() with torch.no_grad(): sample_latents = [] for param in self.data.transform_params: x_transformed = transformations.transform(x1, param) _, mu, log_var = self.model(x_transformed.unsqueeze(0)) # use mean of latent z = mu sample_latents.append(z) sample_latents = torch.cat(sample_latents) latents.append(sample_latents) latents = torch.stack(latents).numpy() return latents def pca_ranked_eigenvalues(self, n_samples=None): """Returns average of ranked normalized eigenvalues for latents""" latents = self.compute_latents_per_shape(n_samples=n_samples) n_components = self.data.n_rotations + 1 aggregate_ranked_normalized_eigenvalues = [] for latent in latents: pca = PCA(n_components=n_components) pca.fit(latents[1]) ranked_normalized_eigenvalues = np.sort(pca.explained_variance_ratio_)[::-1] aggregate_ranked_normalized_eigenvalues.append( ranked_normalized_eigenvalues ) aggregate_ranked_normalized_eigenvalues = np.stack( aggregate_ranked_normalized_eigenvalues ) average_var_explained = np.mean(aggregate_ranked_normalized_eigenvalues, axis=0) return average_var_explained def compute_mutual_info(variances): """Variances is a numpy array with shape (n_samples, z_dim)""" n = variances.shape[0] m_info = np.log(2 * np.pi * variances).sum(0) / (2.0 * n) return m_info def load_data(configs, path): data_configs = json.loads(configs["data"]) if "shapes" and "2k-classes" in path: data = datasets.SimpleShapes( configs["batch_size"], n_rotations=data_configs["n_rotations"], n_x_translations=data_configs["n_x_translations"], n_y_translations=data_configs["n_y_translations"], n_classes=2000, seed=0, ) elif "mnist" in path: data = datasets.ProjectiveSingleDigitMNIST( configs["batch_size"], n_rotations=data_configs["n_rotations"], n_x_translations=data_configs["n_x_translations"], n_y_translations=data_configs["n_y_translations"], train_set_proportion=0.1, valid_set_proportion=0.1, test_set_proportion=1.0, seed=0, ) else: raise ValueError("data not found") return data def load(path): with open(os.path.join(path, "model_configs.json")) as f: configs = json.load(f) data = load_data(configs, path) model = CCIVariationalAutoEncoder( data, z_dim=configs["z_dim"], beta=configs["beta"], c_max=configs["c_max"], distribution=configs["distribution"], learning_rate=configs["learning_rate"], n_epochs=configs["n_epochs"], ) model.load_model(os.path.join(path, "model.pt")) return model if __name__ == "__main__": device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(f"running on {device}") n_epochs = 2 batch_size = 16 simple_shapes = datasets.SimpleShapes(batch_size) vae = CCIVariationalAutoEncoder( simple_shapes, beta=0.0, c_max=0.0, device=device, n_epochs=n_epochs ) vae.train()
Addressing-the-Topological-Defects-of-Disentanglement-main
cci_variational_autoencoder.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ import torch import numpy as np import random import matplotlib import matplotlib.pyplot as plt import models import latent_operators from datasets import datasets from datasets.data_utils import x_to_image import plot import pdb import os import shutil import numpy as np eps = 1e-20 class ComplexAutoEncoder: """Trains a shift operator. Args: data (AbstractDataset): contains train and test loaders with angles z_dim (int): dimension of latent space seed (int): for random number generation translation (bool): if true, uses an offset identity matrix for rotation """ def __init__( self, data, z_dim=405, seed=0, encoder_type="ComplexLinear", decoder_type="ComplexLinear", transformation_types=None, indexes=None, device="cpu", output_directory="output", save_name="", n_rotations = 0, n_x_translations = 0, n_y_translations = 0, scaling_factors = (1, ) ): self.z_dim = z_dim self.seed = seed self.set_seed() self.data = data self.device = device self.encoder = getattr(models, encoder_type + "Encoder")( self.data.n_pixels, self.data.n_channels, z_dim ).to(self.device) self.decoder = getattr(models, decoder_type + "Decoder")( self.data.n_pixels, self.data.n_channels, z_dim ).to(self.device) self.transformation_types = transformation_types self.W_r = torch.nn.ModuleList() self.W_i = torch.nn.ModuleList() for i in range(len(self.transformation_types)-1): self.W_r.append(torch.nn.Linear(z_dim, z_dim, bias=False).to(self.device)) self.W_i.append(torch.nn.Linear(z_dim, z_dim, bias=False).to(self.device)) cardinals = [ n_rotations + 1, n_x_translations + 1, n_y_translations + 1, len(scaling_factors), ] self.cardinals = cardinals # function used for transformation # indexes 0, 1, 2 self.transforms = [] for i in range(len(transformation_types)): self.transforms.append(self.get_transformation(transformation_types[i], indexes[i])) self.output_dir = output_directory self.save_name = save_name self.best_epoch = 0 self.best_mse = 0 def set_seed(self): """Sets seed for random number generation""" torch.manual_seed(self.seed) np.random.seed(self.seed) random.seed(self.seed) # Generate Dataset torch.autograd.set_detect_anomaly(True) def get_transformation(self, name, index): """Returns function to performance transformation based name""" if name is None: return None transformation = getattr(latent_operators, name) return transformation(self.cardinals, self.z_dim, self.device, unique_transfo = True, index=index) def return_shifts(self, params): smallest_angle = 360 / (self.data.n_rotations + 1) int_x = round(self.data.n_pixels / (self.data.n_x_translations + 1)) int_y = round(self.data.n_pixels / (self.data.n_y_translations + 1)) shifts_x = torch.LongTensor([[param.shift_x/int_x for param in params]]).t() shifts_y = torch.LongTensor([[param.shift_y/int_y for param in params]]).t() shifts_r = torch.LongTensor([[int(param.angle/smallest_angle) for param in params]]).t() shifts = [] if self.data.n_rotations > 0: shifts.append(shifts_r) if self.data.n_x_translations > 0: shifts.append(shifts_x) if self.data.n_y_translations > 0: shifts.append(shifts_y) return shifts def transform(self, z1, shifts): N_transfo = len(self.transforms) # shifts is now a tuple z_r = z1[0] z_i = z1[1] for i in range(0,N_transfo-1,1): z_transformed = self.transforms[i]((z_r,z_i), shifts[i]) z_r = z_transformed[0] z_i = z_transformed[1] z_r = self.W_r[i](z_r) - self.W_i[i](z_i) z_i= self.W_r[i](z_i) + self.W_i[i](z_r) z_transformed = self.transforms[N_transfo-1]((z_r,z_i), shifts[N_transfo-1]) return z_transformed def train(self, loss_func, learning_rate, n_epochs, log_frequency): self.encoder.train() self.decoder.train() params = list(self.encoder.parameters()) + list(self.decoder.parameters()) + \ list(self.W_r.parameters()) + list(self.W_i.parameters()) optimizer = torch.optim.Adam(params, lr=learning_rate) train_losses = torch.FloatTensor(n_epochs) valid_losses = torch.FloatTensor(n_epochs) best_mse = np.inf N_pairs = len(self.data.train_loader.dataset) for epoch in range(n_epochs): epoch_loss = 0 for i, (x1, x2, angles) in enumerate(self.data.train_loader): x1 = x1.to(device=self.device) x2 = x2.to(device=self.device) optimizer.zero_grad() loss = loss_func(x1, x2, angles) loss.backward() optimizer.step() epoch_loss += loss.item() * x1.size(0) epoch_loss = epoch_loss / N_pairs print(f"Epoch {epoch} Train loss: {epoch_loss:0.3e}") valid_mse = ( self.compute_mean_loss(loss_func, self.data.valid_loader) .detach() .item() ) train_losses[epoch] = epoch_loss if valid_mse < best_mse: self.update_state(mse=valid_mse, epoch=epoch) best_mse = valid_mse file_name = "checkpoint_{}.pth.tar".format(self.save_name) self.save_best_checkpoint( out_dir=self.output_dir, file_name=file_name, optimizer_state_dict=optimizer.state_dict(), ) print(f"Epoch {epoch} validation loss: {valid_mse:0.3e}") valid_losses[epoch] = valid_mse return train_losses.detach().numpy(), valid_losses.detach().numpy() def reconstruct_x1(self, x1): """Reconstructs x1 using model""" self.encoder.eval() self.decoder.eval() x1 = x1.to(device=self.device) with torch.no_grad(): z1 = self.encoder(x1) x1_reconstruction_r = self.decoder(z1) return x1_reconstruction_r def reconstruct_x2(self, x1, param): """Reconstructs x2 using model and latent transformation""" self.encoder.eval() self.decoder.eval() x1 = x1.to(device=self.device) batch_size = x1.size(0) with torch.no_grad(): z1 = self.encoder(x1) shifts = self.return_shifts([param]) z_transformed = self.transform(z1, shifts) x2_reconstruction_r = self.decoder(z_transformed) return x2_reconstruction_r def plot_x1_reconstructions( self, indices=[10, 2092, 10299, 13290], train_set=False, save_name=None ): """Plots x1 autoencoder reconstruction from z1. Args: pairs (datasets.Pairs): contains x1, x2, and params. model (function): callable f(x1) = x1_reconstruction indices (list of ints): indices for samples to plot train_set (bool): if true title is plotted with train otherwise test. save_name (str): indicates path where images should be saved. """ pairs = self.data.X_train if train_set else self.data.X_test plot.plot_x1_reconstructions( pairs, self.reconstruct_x1, indices, train_set, save_name ) def plot_x2_reconstructions( self, indices=[10, 2092, 10299, 13290], train_set=False, save_name=None ): """Plots x1, x2 and x2 autoencoder reconstruction from z1 rotated. Args: pairs (datasets.Pairs): contains x1, x2, and params. model (function): callable f(x1) = x1_reconstruction indices (list of ints): indices for samples to plot train_set (bool): if true title is plotted with train otherwise test. save_name (str): indicates path where images should be saved. """ pairs = self.data.X_train if train_set else self.data.X_test plot.plot_x2_reconstructions( pairs, self.reconstruct_x2, indices, train_set, save_name ) def reconstruction_mse_transformed_z1(self, x1, x2, params): """Computes reconstruction MSE of x1 from z1 + x2 from transformed(z1), not using ground-truth angles""" criterion = torch.nn.MSELoss(reduction="none") batch_size = x1.size(0) z1 = self.encoder(x1) x1_reconstruction_r = self.decoder(z1) x1_reconstruction_loss = criterion(x1_reconstruction_r, x1) x1_reconstruction_loss = x1_reconstruction_loss.mean() shifts = self.return_shifts(params) z_transformed = self.transform(z1, shifts) x2_reconstruction_r = self.decoder(z_transformed) x2_reconstruction_loss = criterion(x2_reconstruction_r, x2) x2_reconstruction_loss = x2_reconstruction_loss.mean() loss = x1_reconstruction_loss + x2_reconstruction_loss return loss def compute_test_loss(self, loss_func, data_loader): """Computes RMSE based on given loss function.""" self.encoder.eval() self.decoder.eval() losses = [] N = 0 with torch.no_grad(): for i, (x1, x2, angles) in enumerate(data_loader): x1 = x1.to(device=self.device) x2 = x2.to(device=self.device) bs = x1.size(0) loss_batch = loss_func(x1, x2, angles)*bs N += bs losses.append(loss_batch) test_loss = torch.stack(losses).sum() / float(N) self.encoder.train() self.decoder.train() return test_loss def compute_mean_loss(self, loss_func, data_loader): """Computes RMSE based on given loss function.""" self.encoder.eval() self.decoder.eval() losses = [] with torch.no_grad(): for i, (x1, x2, angles) in enumerate(data_loader): x1 = x1.to(device=self.device) x2 = x2.to(device=self.device) loss_batch = loss_func(x1, x2, angles) losses.append(loss_batch) mean_loss = torch.stack(losses).mean() self.encoder.train() self.decoder.train() return mean_loss def run( self, learning_rate=0.0005, n_epochs=10, log_frequency=50 ): """Runs experiment for autoencoder reconstruction.""" loss_func = self.reconstruction_mse_transformed_z1 train_loss, valid_loss = self.train( loss_func, learning_rate, n_epochs, log_frequency ) train_mse = self.compute_mean_loss(loss_func, self.data.train_loader) print(f"Train MSE: {train_mse}") valid_mse = self.compute_mean_loss(loss_func, self.data.valid_loader) print(f"Valid MSE: {valid_mse}") test_mse = self.compute_test_loss(loss_func, self.data.test_loader_batch_100) print(f"Test MSE: {test_mse}") return train_loss, valid_loss, train_mse, valid_mse, test_mse def update_state(self, mse, epoch): self.best_mse = mse self.best_epoch = epoch def load_model(self, path_to_checkpoint): checkpoint = torch.load(path_to_checkpoint) self.best_epoch = checkpoint["best_epoch"] self.encoder.load_state_dict(checkpoint["encoder_state_dict"]) self.decoder.load_state_dict(checkpoint["decoder_state_dict"]) for t in range(len(self.transformation_types) - 1): self.W_r[t].load_state_dict(checkpoint["W_r"][t]) self.W_i[t].load_state_dict(checkpoint["W_i"][t]) self.best_mse = checkpoint["best_mse"] return checkpoint["best_mse"], checkpoint["best_epoch"] def get_current_state(self): W_r = {} W_i = {} for t in range(len(self.transformation_types)-1): W_r[t] = self.W_r[t].state_dict() W_i[t] = self.W_i[t].state_dict() return { "encoder_state_dict": self.encoder.state_dict(), "decoder_state_dict": self.decoder.state_dict(), "W_r": W_r, "W_i": W_i, "best_epoch": self.best_epoch, "best_mse": self.best_mse, } def save_best_checkpoint(self, out_dir, file_name, optimizer_state_dict): """ :param file_name: filename to save checkpoint in. :param optimizer_state_dict: state of the optimizer. :return: str to path where the model is saved. """ state = self.get_current_state() state["optimizer_state_dict"] = optimizer_state_dict best_path = os.path.join(out_dir, "best_" + file_name) torch.save(state, best_path) def plot_multiple_transformations_stacked(self, indices, n_plots, train_set=False, save_name=None): degree_sign = "\N{DEGREE SIGN}" if indices is None: n_samples = min(len(self.data.X_orig_train), len(self.data.X_orig_test)) indices = np.random.randint(low=0, high=n_samples, size=5) X = ( self.data.X_orig_train[indices] if train_set else self.data.X_orig_test[indices] ).float() plot.plot_rotations_translations( X, self, n_plots, self.data.n_rotations, self.data.n_x_translations, self.data.n_y_translations, save_name=save_name ) def plot_multiple_transformations(self, param_name='angle', indices=None, train_set=False, save_name=None): """Plots all rotated reconstructions for given samples""" if indices is None: n_samples = min(len(self.data.X_orig_train), len(self.data.X_orig_test)) indices = np.random.randint(low=0, high=n_samples, size=5) X = ( self.data.X_orig_train[indices] if train_set else self.data.X_orig_test[indices] ).float() title = ( "Translations" if param_name=='angle' != "angle" else "Rotations" ) plot.plot_transformations_complex( X, self, title, save_name=save_name, param_name=param_name, supervised=True, )
Addressing-the-Topological-Defects-of-Disentanglement-main
complex_shift_autoencoder.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. --- Saves model/plots for best validation MSE """ import math import numpy as np import os from distutils.dir_util import copy_tree def save_best_validation_helper(folder, operator): min_valid_loss = math.inf for sweep in os.listdir(folder): if sweep.startswith("best") or sweep.startswith(".DS_Store"): continue path = os.path.join(folder, sweep, operator) try: valid_loss = np.min(np.load(os.path.join(path, "valid_losses.npy"))) except FileNotFoundError: print(f"run {sweep} missing for {operator}") continue if min_valid_loss >= valid_loss: min_valid_loss = valid_loss destination = os.path.join(folder, "best-validation", operator) copy_tree(path, destination) def save_all_best_validation(parent_folder): for experiment in os.listdir(parent_folder): experiment_path = os.path.join(parent_folder, experiment) if experiment.endswith("-sweep") and "autoencoder" in experiment and "standard" not in experiment: save_best_validation_helper(experiment_path, "disentangled-operator") save_best_validation_helper(experiment_path, "shift-operator") elif experiment.endswith("-sweep") and "standard-autoencoder" in experiment: save_best_validation_helper(experiment_path, "standard-autoencoder") elif experiment.endswith("-sweep") and "cci-vae" in experiment: save_best_validation_helper(experiment_path, "cci_vae") save_best_validation_helper(experiment_path, "beta_vae") save_best_validation_helper(experiment_path, "vae") if __name__ == "__main__": user = os.environ["USER"] parent_folder = f"/checkpoint/{user}/Equivariance/" save_all_best_validation(parent_folder)
Addressing-the-Topological-Defects-of-Disentanglement-main
save_best_validation.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ """ Transformations applied to the input images """ import torch import itertools import numpy as np import skimage.transform from dataclasses import dataclass # TODO: set automaticlaly based on n_pixels TRANSLATION_INTERVAL = [0, 28] @dataclass class Params: """ angle (float): counter-clockwise rotation angle in degrees shift_x (float): shift value to the right shift_y (float): shift value to upwards scale (float): scaling factor """ angle: float = 0.0 shift_x: float = 0.0 shift_y: float = 0.0 scale: float = 1.0 def transform(image, params): """ Applies transformations on a single image based on params. Order of transformation is: rotate, translate, scale Args: image (np.array or torch.tensor): of shape [n_pixels, n_pixels] params (Params): contains parameters for rotations, scaling etc. Returns: image with transformations applied """ assert ( image.ndim == 3 ), f"image must be of shape [n_channels, n_pixels, n_pixels] not {image.shape}" image_transformed = image.squeeze() # Rotate if params.angle not in (0.0, 360.0): # cval is the fill value. image_transformed = skimage.transform.rotate( image_transformed, params.angle, cval=image_transformed.min() ) # Translate # if edge is reached cut-off portion appears on other side if params.shift_x != 0.0: image_transformed = np.roll(image_transformed, int(params.shift_x), axis=1) if params.shift_y != 0.0: image_transformed = np.roll(image_transformed, -int(params.shift_y), axis=0) # Scale if params.scale != 1.0: image_transformed = rescale(image_transformed, params.scale) image_transformed = to_torch(image, image_transformed) return image_transformed def rescale(image, scale): """Rescales images based on given scale factor""" scale_transform = skimage.transform.SimilarityTransform(scale=scale) image = skimage.transform.warp( image, scale_transform.inverse, mode="constant", cval=image.min(), ) return image def to_torch(image, image_transformed): """Converts numpy matrix to torch tensor with correct shape""" image_transformed = image_transformed.reshape(image.shape) if torch.is_tensor(image_transformed): return image_transformed.float() if torch.is_tensor(image): image_transformed = torch.from_numpy(image_transformed).float() return image_transformed def get_transform_params( n_rotations, n_x_translations, n_y_translations, scaling_factors, ): """Returns transform params corresponding given values. Translations subdivide translation interval. Args: n_rotations (int): number of subdivisions of 360 to apply. n_x_translations (int): number of shifts along x-axis n_y_translations (int): number of shifts along y-axis scaling_factors (list or tuple floats): representing the scaling factors to use Returns: Params object """ shifts_x = get_shifts(n_x_translations, TRANSLATION_INTERVAL) shifts_y = get_shifts(n_y_translations, TRANSLATION_INTERVAL) for angle in get_rotation_angles(n_rotations): for shift_x, shift_y in itertools.product(shifts_x, shifts_y): for scale in scaling_factors: params = Params( angle=angle, shift_x=shift_x, shift_y=shift_y, scale=scale ) yield params def get_shifts(n_translations, interval): """Returns shifts along given axis by dividing interval. Args: interval (list of ints): [0, n_pixels] n_translations (int): should be divisible by n_pixels """ if n_translations == 0: return [0] elif n_translations == 1: return [0, interval[1] // 2] min_shift = round(interval[1] / (n_translations + 1)) steps = [n * min_shift for n in range(n_translations + 1)] return steps def get_rotation_angles(n_rotations): """Yields rotation angles based on subdivisions given. Example: >>> get_rotation_angles(2) => [0.0, 120.0, 240.0] """ min_angle = 360.0 / (n_rotations + 1) for n in range(n_rotations + 1): yield min_angle * n def shift_to_angle(shift_val, n_transformations): """Returns the angle corresponding to the shift_val. Example: [0, 32], shift_val = 4, we should get 4 / 32 * 360 """ if shift_val == TRANSLATION_INTERVAL[1]: return 0.0 shift_ratio = float(shift_val) / TRANSLATION_INTERVAL[1] angle = 360.0 * shift_ratio return angle
Addressing-the-Topological-Defects-of-Disentanglement-main
datasets/transformations.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ import torch from torch.utils.data import Dataset from torchvision import transforms from sklearn.model_selection import StratifiedShuffleSplit import torchvision from . import data_utils from abc import ABC, abstractmethod from datasets import transformations import numpy as np import random import json class AbstractDataset(ABC): """ Defines common fields needed for datasets Attributes: batch_size (int): batch size used for dataloaders train_load (torch.utils.data.Dataset): X1, X2, Angle(s) test_load (torch.utils.data.Dataset): X1, X2, Angle(s) pairs (bool): indicates whether to use Pairs dataset where both x1 and x2 are transformed. Otherwise, Single dataset is used where only x1 is transformed. """ def __init__( self, batch_size, n_rotations=0, n_x_translations=0, n_y_translations=0, scaling_factors=(1.0,), seed=0, pairs=True, ): AbstractDataset.set_seed(seed) self.batch_size = batch_size self.n_x_translations, self.n_y_translations = ( n_x_translations, n_y_translations, ) self.n_rotations, self.scaling_factors = n_rotations, scaling_factors self.X_orig_train, self.X_orig_valid, self.X_orig_test = self.get_original() self.transform_params = list( transformations.get_transform_params( n_rotations=self.n_rotations, n_x_translations=self.n_x_translations, n_y_translations=self.n_y_translations, scaling_factors=self.scaling_factors, ) ) data_cls = Pairs if pairs else Single self.X_train = data_cls(self.X_orig_train, self.transform_params) self.train_loader = torch.utils.data.DataLoader( self.X_train, batch_size=self.batch_size, shuffle=True, collate_fn=Pairs.collate, ) #  For validation and test, use shuffle = False to have SequentialSampler(dataset) by default # (see https://github.com/pytorch/pytorch/blob/bfa94487b968ccb570ef8cd9547029b967e76ed0/torch/utils/data/dataloader.py#L257) self.X_valid = data_cls(self.X_orig_valid, self.transform_params) self.valid_loader = torch.utils.data.DataLoader( self.X_valid, batch_size=self.batch_size, shuffle=False, collate_fn=Pairs.collate, ) self.X_test = data_cls(self.X_orig_test, self.transform_params) self.test_loader = torch.utils.data.DataLoader( self.X_test, batch_size=self.batch_size, shuffle=False, collate_fn=Pairs.collate, ) self.test_loader_batch_1 = torch.utils.data.DataLoader( self.X_test, batch_size=1, shuffle=False, collate_fn=Pairs.collate, ) self.test_loader_batch_100 = torch.utils.data.DataLoader( self.X_test, batch_size=100, shuffle=False, collate_fn=Pairs.collate, ) def __repr__(self): attributes = { "n_rotations": self.n_rotations, "n_x_translations": self.n_x_translations, "n_y_translations": self.n_y_translations, "scaling_factors": self.scaling_factors, } return json.dumps(attributes) @abstractmethod def get_original(self): """Sets X_train and X_test to images in original dataset""" pass @property def total_n_transformations(self): """Computes the total number of transformations""" n_translations = (1 + self.n_x_translations) * (1 + self.n_y_translations) n = n_translations * (1 + self.n_rotations) * len(self.scaling_factors) return n @staticmethod def set_seed(seed): torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) @classmethod def __subclasshook__(cls, C): """Verifies dataset has loader of correct type""" for loader in ["train_loader", "test_loader"]: is_valid = hasattr(cls, loader) and isinstance( (getattr(cls, loader)), Dataset ) if not is_valid: return False return True class ProjectiveMNIST(AbstractDataset): """Builds MNIST dataset with transformations applied lazly. Loader contains: (digit, rotated_digit, angle) Shape of Data: (batch_size, 1, 28, 28) Args: batch_size (int): batch size to user for dataloaders n_rotations (int): number discrete rotations per image train_set_proportion (float): proportion of training set to keep valid_set_proportion (float): proportion of training set to keep test_set_proportion (float): proportion of training set to keep """ def __init__( self, batch_size, n_rotations=4, n_x_translations=0, n_y_translations=0, scaling_factors=(1.0,), train_set_proportion=0.1, valid_set_proportion=1.0, test_set_proportion=1.0, seed=0, pairs=True, ): self.train_set_proportion = train_set_proportion self.valid_set_proportion = valid_set_proportion self.test_set_proportion = test_set_proportion super().__init__( batch_size, n_rotations, n_x_translations, n_y_translations, scaling_factors, seed, pairs, ) self.n_pixels = self.X_orig_train[0].shape[1] self.n_channels = 1 def get_original(self): """Returns original training and test images""" mnist_train, mnist_val, mnist_test = self.download_mnist() # normalize MNIST so values are between [0, 1] x_train = mnist_train.data.unsqueeze(1) / 255.0 x_val = mnist_val.data.unsqueeze(1) / 255.0 x_test = mnist_test.data.unsqueeze(1) / 255.0 return x_train, x_val, x_test @staticmethod def stratified_sample(X, y, size): """Returns a stratified sample""" if size == 1.0: return X test_size = 1 - size sampler = StratifiedShuffleSplit( n_splits=1, test_size=test_size, random_state=0 ) indices, _ = next(sampler.split(X, y)) X_sample = X[indices] return X_sample @staticmethod def split_train_valid(train_set, split=10000): num_train = len(train_set) indices = list(range(num_train)) train_idx, valid_idx = indices[split:], indices[:split] train_data = train_set.data[train_idx] valid_data = train_set.data[valid_idx] train_targets = train_set.targets[train_idx] valid_targets = train_set.targets[valid_idx] return train_data, train_targets, valid_data, valid_targets def download_mnist(self): """Skips download if cache is available""" train_set = torchvision.datasets.MNIST( "/tmp/", train=True, download=True, transform=transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] ), ) test_set = torchvision.datasets.MNIST( "/tmp/", train=False, download=True, transform=transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] ), ) ( train_data, train_targets, valid_data, valid_targets, ) = ProjectiveMNIST.split_train_valid(train_set) # stratified samples train_data = ProjectiveMNIST.stratified_sample( train_data, train_targets, self.train_set_proportion ) valid_data = ProjectiveMNIST.stratified_sample( valid_data, valid_targets, self.valid_set_proportion ) test_data = ProjectiveMNIST.stratified_sample( test_set.data, test_set.targets, self.test_set_proportion ) return train_data, valid_data, test_data class ProjectiveSingleDigitMNIST(AbstractDataset): """Builds MNIST dataset with transformations applied lazly. Loader contains: (digit, rotated_digit, angle) Shape of Data: (batch_size, 1, 28, 28) Args: batch_size (int): batch size to user for dataloaders n_rotations (int): number discrete rotations per image train_set_proportion (float): proportion of training set to keep valid_set_proportion (float): proportion of training set to keep test_set_proportion (float): proportion of training set to keep """ def __init__( self, batch_size, n_rotations=4, n_x_translations=0, n_y_translations=0, scaling_factors=(1.0,), train_set_proportion=0.1, valid_set_proportion=1.0, test_set_proportion=1.0, seed=0, pairs=True, digit=4, ): self.train_set_proportion = train_set_proportion self.valid_set_proportion = valid_set_proportion self.test_set_proportion = test_set_proportion self.digit = digit super().__init__( batch_size, n_rotations, n_x_translations, n_y_translations, scaling_factors, seed, pairs, ) self.n_pixels = self.X_orig_train[0].shape[1] self.n_channels = 1 def get_original(self): """Returns original training and test images""" mnist_train, mnist_val, mnist_test = self.download_mnist() # normalize MNIST so values are between [0, 1] x_train = mnist_train.data.unsqueeze(1) / 255.0 x_val = mnist_val.data.unsqueeze(1) / 255.0 x_test = mnist_test.data.unsqueeze(1) / 255.0 return x_train, x_val, x_test @staticmethod def split_train_valid(train_set, split=10000): num_train = len(train_set) indices = list(range(num_train)) train_idx, valid_idx = indices[split:], indices[:split] train_data = train_set.data[train_idx] valid_data = train_set.data[valid_idx] train_targets = train_set.targets[train_idx] valid_targets = train_set.targets[valid_idx] return train_data, train_targets, valid_data, valid_targets def sample_single_digit(self, x, targets, proportion): idx = targets == self.digit x_digit = x[idx] sample_size = int(len(idx) * proportion) return x_digit[:sample_size] def download_mnist(self): """Skips download if cache is available""" train_set = torchvision.datasets.MNIST( "/tmp/", train=True, download=True, transform=transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] ), ) test_set = torchvision.datasets.MNIST( "/tmp/", train=False, download=True, transform=transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] ), ) ( train_data, train_targets, valid_data, valid_targets, ) = ProjectiveMNIST.split_train_valid(train_set) # stratified samples train_data = self.sample_single_digit( train_data, train_targets, self.train_set_proportion ) valid_data = self.sample_single_digit( valid_data, valid_targets, self.valid_set_proportion ) test_data = self.sample_single_digit( test_set.data, test_set.targets, self.test_set_proportion ) return train_data, valid_data, test_data class SimpleShapes(AbstractDataset): def __init__( self, batch_size, n_pixels=28, n_classes=300, n_points=5, n_rotations=9, n_x_translations=0, n_y_translations=0, scaling_factors=(1.0,), n_channels=1, seed=0, pairs=True, ): self.n_pixels, self.n_classes = n_pixels, n_classes self.n_points, self.n_channels = n_points, n_channels super().__init__( batch_size, n_rotations, n_x_translations, n_y_translations, scaling_factors, seed, pairs, ) @staticmethod def normalize(X): return torch.clamp(X + 1, 0.0, 1.0) def get_original(self): np.random.seed(1) # Sets seed data = data_utils.generate_dataset(self.n_pixels, self.n_classes, self.n_points) (X_train, _), (X_test, _) = data X_trainvalid = torch.from_numpy(X_train).unsqueeze(1).float() N = X_trainvalid.size(0) Nvalid = int(N * 0.2) # Keeps 20% for validation X_valid = SimpleShapes.normalize(X_trainvalid[:Nvalid, ...]) X_train = SimpleShapes.normalize(X_trainvalid[Nvalid:, ...]) X_test = SimpleShapes.normalize(torch.from_numpy(X_test).unsqueeze(1).float()) return X_train, X_valid, X_test class Single(Dataset): """Contains x1 transformed with parameters. Total number of samples == x1 transformed """ def __init__(self, X, params): self.X = X self.params = params def __len__(self): return self.X.shape[0] * len(self.params) @staticmethod def collate(batch): """Used for dataloader""" X1 = torch.stack([item[0] for item in batch]) X2 = torch.stack([item[1] for item in batch]) params = [item[2] for item in batch] return X1, X2, params def get_x_idx(self, idx): """Returns the idx of the original image x.""" return idx // len(self.params) def get_x1(self, idx, x_idx): x = self.X[x_idx] p = len(self.params) x1_params_idx = idx % p x1_params = self.params[x1_params_idx] x1 = transformations.transform(x, x1_params) return x1, x1_params def __getitem__(self, idx): x_idx = self.get_x_idx(idx) x1, x1_params = self.get_x1(idx, x_idx) x2 = self.X[x_idx] return x1, x2, x1_params class Pairs(Dataset): """Contains x1, x2, and transformation params. Total of n_samples * num_params^2 pairs: (x0, t0) => x1 (x1, t0) => x2 (x0, t0) => x1 (x1, t1) => x2 Args: X (original images): [n_samples, n_pixels, n_pixels] params (list of transformations.Params): parameters for transformations """ def __init__(self, X, params): self.X = X self.params = params def __len__(self): return self.X.shape[0] * (len(self.params) ** 2) @staticmethod def collate(batch): """Used for dataloader""" X1 = torch.stack([item[0] for item in batch]) X2 = torch.stack([item[1] for item in batch]) params = [item[2] for item in batch] return X1, X2, params def get_x_idx(self, idx): """Returns the idx of the original image x.""" return idx // (len(self.params) ** 2) def get_x1(self, idx, x_idx): x = self.X[x_idx] p = len(self.params) x1_params_idx = (idx - (x_idx) * p * p) // p x1_params = self.params[x1_params_idx] x1 = transformations.transform(x, x1_params) return x1 def get_x2_params(self, idx, x_idx): p = len(self.params) x1_params_idx = (idx - (x_idx) * p * p) // p x2_params_idx = idx - ((x_idx * p * p) + (x1_params_idx * p)) return self.params[x2_params_idx] def __getitem__(self, idx): x_idx = self.get_x_idx(idx) x1 = self.get_x1(idx, x_idx) x2_params = self.get_x2_params(idx, x_idx) x2 = transformations.transform(x1, x2_params) x1, x2 = x1, x2 return x1, x2, x2_params class ShapeNet(AbstractDataset): pass class ShapeNetIterator(Dataset): """ShapeNet Iterator""" def __init__(self, V, transform=None): self.V = V self.preprocess = transforms.Compose( [ # transforms.Resize(256), # transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ), ] ) def __len__(self): return len(self.V[0]) def __getitem__(self, idx): return tuple([self.preprocess(self.V[v][idx]) for v in range(len(self.V))])
Addressing-the-Topological-Defects-of-Disentanglement-main
datasets/datasets.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """
Addressing-the-Topological-Defects-of-Disentanglement-main
datasets/__init__.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ """Script demonstrating drawing of anti-aliased lines using Xiaolin Wu's line algorithm usage: python xiaolinwu.py [output-file] """ from __future__ import division import sys from PIL import Image def _fpart(x): return x - int(x) def _rfpart(x): return 1 - _fpart(x) def putpixel(img, xy, color, alpha=1): """Paints color over the background at the point xy in img. Use alpha for blending. alpha=1 means a completely opaque foreground. """ c = tuple(map(lambda bg, fg: int(round(alpha * fg + (1-alpha) * bg)), img.getpixel(xy), color)) img.putpixel(xy, c) def draw_line(img, p1, p2, color): """Draws an anti-aliased line in img from p1 to p2 with the given color.""" x1, y1 = p1 x2, y2 = p2 dx, dy = x2-x1, y2-y1 steep = abs(dx) < abs(dy) p = lambda px, py: ((px,py), (py,px))[steep] if steep: x1, y1, x2, y2, dx, dy = y1, x1, y2, x2, dy, dx if x2 < x1: x1, x2, y1, y2 = x2, x1, y2, y1 grad = dy/dx intery = y1 + _rfpart(x1) * grad def draw_endpoint(pt): x, y = pt xend = round(x) yend = y + grad * (xend - x) xgap = _rfpart(x + 0.5) px, py = int(xend), int(yend) putpixel(img, p(px, py), color, _rfpart(yend) * xgap) putpixel(img, p(px, py+1), color, _fpart(yend) * xgap) return px xstart = draw_endpoint(p(*p1)) + 1 xend = draw_endpoint(p(*p2)) for x in range(xstart, xend): y = int(intery) putpixel(img, p(x, y), color, _rfpart(intery)) putpixel(img, p(x, y+1), color, _fpart(intery)) intery += grad
Addressing-the-Topological-Defects-of-Disentanglement-main
datasets/xiaolinwu.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ from torch.utils.data import Dataset, DataLoader import numpy as np from PIL import Image from .xiaolinwu import draw_line blue = (0, 0, 255) yellow = (255, 255, 0) white = (255, 255, 255) black = (0, 0, 0) def generate_images_from_coords(NPX, NP, C, cols): images = list() for c in range(C.shape[2]): img = Image.new("RGB", (NPX, NPX), white) for p in range(NP - 1): if (C[0, p + 1, c] != C[0, p, c]) or (C[1, p + 1, c] != C[1, p, c]): draw_line( img, (C[0, p + 1, c], C[1, p + 1, c]), (C[0, p, c], C[1, p, c]), cols[c], ) draw_line( img, (C[0, p, c], C[1, p, c]), (C[0, p + 1, c], C[1, p + 1, c]), cols[c], ) if (C[0, p + 1, c] != C[0, 0, c]) or (C[1, p + 1, c] != C[1, 0, c]): draw_line( img, (C[0, p + 1, c], C[1, p + 1, c]), (C[0, 0, c], C[1, 0, c]), cols[c] ) draw_line( img, (C[0, 0, c], C[1, 0, c]), (C[0, p + 1, c], C[1, p + 1, c]), cols[c] ) images.append(np.array(img)) return images # Draw images correspoding to different classes def plot_and_save_grid(NPX, images, margin=1, name="FIGS/junk.png"): grid = np.zeros((NPX + 2 * margin, NPX * NC + margin * NC + margin, 3)) pointer = 0 for img in images: grid[ margin : NPX + margin, 0 + pointer + margin : NPX + pointer + margin, : ] = img pointer += NPX + margin im = Image.fromarray(np.uint8((grid))) im.save(name) return im class MyDataset(Dataset): """Face Landmarks dataset.""" def __init__(self, V, transform=None): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ # self.root = ts.root # self.transform = transforms.ToTensor() self.V = V def __len__(self): return len(self.V[0]) def __getitem__(self, idx): try: return tuple([self.V[v][idx] for v in range(len(self.V))]) except: pdb.set_trace() # return (self.transform(self.train_data[idx,:,:,:]),self.train_labels[idx]) # return Dataset.__getitem__(self, idx) # super() def pytorch_dataset(V, batch_size): # order = np.random.permutation(NS) ts = MyDataset(V) loader = torch.utils.data.DataLoader(ts, batch_size=batch_size, shuffle=True) return loader def generate_dataset(NPX, NC, NP): NS = NC * 2 # number of samples # coordinates of each classes of objects C = np.random.randint(0 + NPX / 6, NPX - 1 - NPX / 6, (2, NP, NC)) cols = np.zeros((NS, 3)) # Generate images corresponding to different classes using Xiaolin Wu's line algorithm for anti-aliasing cols = np.zeros((NS, 3)) X = np.array( generate_images_from_coords(NPX, NP, C[:, :, :].reshape((2, NP, NC)), cols) ) X = 1 - np.mean(X, axis=3) # normalize (negative sign ensure background is min) X = X / -X.mean() y = np.arange(NC) y = y.flatten() Y = y.astype(int) split = NS // 4 Xtrain = X[:split] Ytrain = Y[:split] Xtest = X[split:] Ytest = Y[split:] return ((Xtrain, Ytrain), (Xtest, Ytest)) def generate_angles(NT1, NT2, NC): # create pairs of shape with all angles NT = NT1 * NT2 ** 2 [ind1, ind2] = np.meshgrid(range(NT), range(NT)) s1 = ind1.flatten() s2 = ind2.flatten() alphas = (s1 - s2) % (NT1) sangle1 = np.floor(s1 / NT2 ** 2) sangle2 = np.floor(s2 / NT2 ** 2) strans1 = s1 % NT2 ** 2 strans2 = s2 % NT2 ** 2 stransx1 = np.floor(strans1 / NT2) stransx2 = np.floor(strans2 / NT2) stransy1 = strans1 % NT2 stransy2 = strans2 % NT2 alphas1 = (sangle1 - sangle2) % (NT1) alphas2 = (stransx1 - stransx2) % (NT2) alphas3 = (stransy1 - stransy2) % (NT2) s1_all_shapes = ( np.tile(s1, (int(NC / 2))) + NT * np.tile(np.arange(int(NC / 2)).T, (NT * NT, 1)).T.flatten() ) s2_all_shapes = ( np.tile(s2, (int(NC / 2))) + NT * np.tile(np.arange(int(NC / 2)).T, (NT * NT, 1)).T.flatten() ) alphas_all_shapes1 = np.tile(alphas1, int(NC / 2)) alphas_all_shapes2 = np.tile(alphas2, int(NC / 2)) alphas_all_shapes3 = np.tile(alphas3, int(NC / 2)) alphas = (alphas1, alphas2, alphas3) alphas_all_shapes = (alphas_all_shapes1, alphas_all_shapes2, alphas_all_shapes3) return s1, s2, s1_all_shapes, s2_all_shapes, alphas, alphas_all_shapes def x_to_image(x): """Takes a single input x and transforms it into image for im.show""" if x.dim() == 2: n_channels = 1 else: n_channels = x.shape[0] n_pixels = x.shape[1] x_image = x.reshape(n_channels, n_pixels, n_pixels) x_image = x_image.permute(1, 2, 0) # sequeeze to remove in case of a singel channel x_image = x_image.squeeze() return x_image
Addressing-the-Topological-Defects-of-Disentanglement-main
datasets/data_utils.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ import pytest from datasets import datasets from cci_variational_autoencoder import CCIVariationalAutoEncoder BATCH_SIZE = 16 @pytest.fixture(scope="module") def rotated_mnist(): rotated_mnist = datasets.ProjectiveMNIST( BATCH_SIZE, n_rotations=9, train_set_proportion=0.001, test_set_proportion=0.001, valid_set_proportion=0.001, ) return rotated_mnist @pytest.fixture(scope="module") def simple_shapes(): batch_size = 16 return datasets.SimpleShapes(batch_size, n_classes=10) class TestCCIVariationalAutoEncoder: def test_vae(self, simple_shapes): n_epochs, learning_rate = 1, 0.001 model = CCIVariationalAutoEncoder( simple_shapes, beta=1.0, c_max=0.0, device="cpu", n_epochs=n_epochs, learning_rate=learning_rate, ) model.train() def test_beta_vae(self, simple_shapes): n_epochs, learning_rate = 1, 0.001 model = CCIVariationalAutoEncoder( simple_shapes, beta=1.0, c_max=0.0, device="cpu", n_epochs=n_epochs, learning_rate=learning_rate, ) model.train() def test_cci_vae(self, simple_shapes): n_epochs, learning_rate = 1, 0.001 model = CCIVariationalAutoEncoder( simple_shapes, beta=100.0, c_max=36.0, device="cpu", n_epochs=n_epochs, learning_rate=learning_rate, ) model.train() class TestProjectiveMNISTVAE: def test_vae(self, rotated_mnist): n_epochs, learning_rate = 1, 0.001 model = CCIVariationalAutoEncoder( rotated_mnist, beta=1.0, c_max=0.0, device="cpu", n_epochs=n_epochs, learning_rate=learning_rate, ) model.train(stop_early=True) def test_cci_vae(self, rotated_mnist): n_epochs, learning_rate = 1, 0.001 model = CCIVariationalAutoEncoder( rotated_mnist, beta=100.0, c_max=36.0, device="cpu", n_epochs=n_epochs, learning_rate=learning_rate, ) model.train(stop_early=True)
Addressing-the-Topological-Defects-of-Disentanglement-main
tests/test_cci_vae.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ import torch import math from datasets import transformations from datasets import datasets class TestSimpleShapes: def test_train_loader(self): simple_shapes = datasets.SimpleShapes(16, n_classes=3) assert hasattr(simple_shapes, "train_loader") assert hasattr(simple_shapes, "test_loader") assert len(simple_shapes.train_loader) > 0 assert len(simple_shapes.test_loader) > 0 def test_transformations(self): simple_shapes = datasets.SimpleShapes( 16, n_classes=3, n_rotations=9, n_x_translations=5, n_y_translations=10, scaling_factors=(1.0, 1.2), ) assert simple_shapes.total_n_transformations > 50 class TestProjectiveMNIST: def test_creation(self): """Verifies rotated mnist is created properly""" n_rotations = 9 batch_size = 16 train_size = 5000 rotated_mnist = datasets.ProjectiveMNIST(batch_size, n_rotations=n_rotations) expected_n_batches = math.ceil( (rotated_mnist.total_n_transformations ** 2) * train_size / batch_size ) assert len(rotated_mnist.train_loader) == expected_n_batches # test shape of x2 assert rotated_mnist.X_train[3][1].shape == torch.Size([1, 28, 28]) def test_proportion(self): n_rotations = 9 batch_size = 16 train_proportion = 0.001 test_proportion = 0.005 # 10k for validation full_train_size = 50000 full_test_size = 10000 rotated_mnist = datasets.ProjectiveMNIST( batch_size, n_rotations=n_rotations, train_set_proportion=train_proportion, valid_set_proportion=train_proportion, test_set_proportion=test_proportion, ) expected_train_size = ( full_train_size * train_proportion * (n_rotations + 1) ** 2 ) expected_test_size = full_test_size * test_proportion * (n_rotations + 1) ** 2 assert len(rotated_mnist.X_train) == expected_train_size assert len(rotated_mnist.X_test) == expected_test_size class TestTransformations: def test_transform(self): shape = (1, 30, 30) image = torch.rand(shape) params = transformations.Params(angle=45.0) rotated_X = transformations.transform(image, params) assert torch.is_tensor(rotated_X) assert rotated_X.shape == image.shape
Addressing-the-Topological-Defects-of-Disentanglement-main
tests/test_datasets.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ import pytest from datasets import datasets from autoencoder import AutoEncoder class TestAutoencoder: @pytest.fixture(scope="module") def simple_shapes(self): batch_size = 4 return datasets.SimpleShapes(batch_size, n_classes=10, n_rotations=3) def test_autoencoder(self, simple_shapes): n_epochs, learning_rate = 1, 0.001 model = AutoEncoder( simple_shapes, device="cpu", n_epochs=n_epochs, learning_rate=learning_rate ) model.run(stop_early=True) def test_autoencoder_with_shift_operator(self, simple_shapes): """Tests autoencoder with latent rotation""" n_epochs, learning_rate = 1, 0.001 model = AutoEncoder( simple_shapes, device="cpu", n_epochs=n_epochs, learning_rate=learning_rate, latent_operator_name="ShiftOperator", ) model.run(stop_early=True) def test_autoencoder_with_disentangled_rotation(self, simple_shapes): """Tests autoencoder with latent rotation""" n_epochs, learning_rate = 1, 0.001 model = AutoEncoder( simple_shapes, device="cpu", n_epochs=n_epochs, learning_rate=learning_rate, latent_operator_name="DisentangledRotation", ) model.run(stop_early=True) class TestProjectiveMnistAutoencoder: def __init__(self): self.n_epochs = 1 self.learning_rate = 0.01 def test_standard_autoencoder(self, rotated_mnist): model = AutoEncoder( rotated_mnist, n_epochs=self.n_epochs, learning_rate=self.learning_rate ) model.run(stop_early=True) def test_rotated_autoencoder(self, rotated_mnist): model = AutoEncoder( rotated_mnist, z_dim=400, latent_operator_name="DisentangledRotation", n_epochs=self.n_epochs, learning_rate=self.learning_rate, ) model.run(stop_early=True) def test_shift_operator_autoencoder(self, rotated_mnist): model = AutoEncoder( rotated_mnist, z_dim=400, latent_operator_name="ShiftOperator", n_epochs=self.n_epochs, learning_rate=self.learning_rate, ) model.run(stop_early=True)
Addressing-the-Topological-Defects-of-Disentanglement-main
tests/test_autoencoder.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """
Addressing-the-Topological-Defects-of-Disentanglement-main
complex_shift_operator/__init__.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ import argparse import torch import sys sys.path.append("..") from datasets import datasets from weakly_complex_shift_autoencoder import WeaklyComplexAutoEncoder from complex_shift_autoencoder import ComplexAutoEncoder import sys import os import numpy as np import random import torch.backends.cudnn as cudnn use_cuda = True if torch.cuda.is_available() else False parser = argparse.ArgumentParser( description="Fully/Weakly supervised version of shift operator" ) # General arguments parser.add_argument("--seed", type=int, default=0) parser.add_argument( "--output_directory", type=str, default="output", help="In this directory the models will be " "saved. Will be created if doesn't exist.", ) parser.add_argument("--n_epochs", type=int, default="10", help="Number of epochs.") parser.add_argument("--lr", type=float, default="0.001", help="Learning rate.") parser.add_argument("--bs", type=int, default="16", help="Batch size.") parser.add_argument( "--n_rot", type=int, default="9", help="Number of rotations (for the model)." ) parser.add_argument( "--data_n_rot", type=int, default="9", help="Number of rotations (for the data)." ) parser.add_argument( "--n_x", type=int, default="0", help="Number of x translations in x (for the model).", ) parser.add_argument( "--data_n_x", type=int, default="0", help="Number of x translations in x (for the data).", ) parser.add_argument( "--n_y", type=int, default="0", help="Number of y translations in y (for the model).", ) parser.add_argument( "--data_n_y", type=int, default="0", help="Number of y translations in y (for the data).", ) parser.add_argument("--tr_prop", type=float, default="0.01", help="Train proportion.") parser.add_argument("--te_prop", type=float, default="0.01", help="Test proportion.") parser.add_argument("--val_prop", type=float, default="0.01", help="Valid proportion.") parser.add_argument("--n_classes", type=int, default="300", help="Number of classes.") parser.add_argument("--dataset", type=str, default="mnist", help="Dataset") parser.add_argument( "--sftmax", type=int, default="1", help="If 1, switches to weighting and summing (deprecated softmax is always used)" ) parser.add_argument("--tau", type=float, default=0.1, help="Temperature of softmax.") parser.add_argument("--mode", type=str, default="train", help="training or test mode") parser.add_argument("--supervised", type=int, default=0, help="Switches between weakly and fully supervised.") def main(params): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(f"running on {device}") args = parser.parse_args(params) SEED = int(args.seed) random.seed(SEED) torch.manual_seed(SEED) np.random.seed(SEED) torch.cuda.manual_seed_all(SEED) if args.dataset == "simpleshapes": data = datasets.SimpleShapes( batch_size=args.bs, n_x_translations=args.data_n_x, n_y_translations=args.data_n_y, n_rotations=args.data_n_rot, n_classes=args.n_classes, n_pixels=28, ) elif args.dataset == "mnist": data = datasets.ProjectiveMNIST( batch_size=args.bs, n_x_translations=args.data_n_x, n_y_translations=args.data_n_y, n_rotations=args.data_n_rot, train_set_proportion=args.tr_prop, test_set_proportion=args.te_prop, valid_set_proportion=args.val_prop, ) if args.mode == "train": print("Training") if args.mode == "test": print("Testing") # automatically set z_dim to image size image_size = data.n_pixels ** 2 if not os.path.exists(args.output_directory): os.mkdir(args.output_directory) dict_args = vars(args) save_name = "_".join( [ "{0}_{1}".format(key, dict_args[key]) for key in dict_args if key not in ["output_directory", "mode"] ] ) if args.supervised: transformation_types = [] indexes = [] if args.n_rot > 0: transformation_types.append("ComplexShiftOperator") indexes.append(0) if args.n_x > 0: transformation_types.append("ComplexShiftOperator") indexes.append(1) if args.n_y > 0: transformation_types.append("ComplexShiftOperator") indexes.append(2) model_with_rotation = ComplexAutoEncoder( data, transformation_types=transformation_types, indexes=indexes, device=device, z_dim=image_size, seed=SEED, output_directory=args.output_directory, save_name=save_name, n_rotations=args.n_rot, n_x_translations=args.n_x, n_y_translations=args.n_y, ) n_transfos = len(indexes) else: model_with_rotation = WeaklyComplexAutoEncoder( data, transformation_type="ComplexShiftOperator", device=device, z_dim=image_size, seed=SEED, temperature=args.tau, output_directory=args.output_directory, save_name=save_name, use_softmax=args.sftmax, n_rotations=args.n_rot, n_x_translations=args.n_x, n_y_translations=args.n_y, ) if args.mode == "train": ( train_loss, valid_loss, train_mse, valid_mse, test_mse, ) = model_with_rotation.run(n_epochs=args.n_epochs, learning_rate=args.lr) perf = np.array([train_mse, valid_mse, test_mse]) torch.save(perf, os.path.join(args.output_directory, "final_mse_" + save_name)) torch.save( train_loss, os.path.join(args.output_directory, "train_loss_" + save_name) ) torch.save( valid_loss, os.path.join(args.output_directory, "valid_loss_" + save_name) ) file_name = "best_checkpoint_{}.pth.tar".format(model_with_rotation.save_name) path_to_model = os.path.join(args.output_directory, file_name) best_mse, best_epoch = model_with_rotation.load_model(path_to_model) ##### Plots train reconstructions samples_pairs = np.random.randint( 0, len(model_with_rotation.data.X_train), size=(10,) ).tolist() model_with_rotation.plot_x2_reconstructions( indices=samples_pairs, train_set=True, save_name=os.path.join(args.output_directory, "plots_train_reconstructions_" + save_name), ) ##### Plots train rotations of samples train_indices = np.random.randint( 0, len(model_with_rotation.data.X_orig_train), size=(10,) ).tolist() figsave_name=os.path.join(args.output_directory, "plots_train_rotations_" + save_name + '.png') if args.supervised: if n_transfos == 1: if args.data_n_x > 0: param_name = 'tx' elif args.data_n_y > 0: param_name = 'ty' if args.data_n_rot > 0: param_name = 'angle' model_with_rotation.plot_multiple_transformations(indices=train_indices, train_set = True, param_name=param_name, save_name=figsave_name ) else: model_with_rotation.plot_multiple_transformations_stacked(indices=train_indices, train_set = True, n_plots = 10, save_name=figsave_name ) else: if args.data_n_x > 0: param_name = 'tx' elif args.data_n_y > 0: param_name = 'ty' if args.data_n_rot > 0: param_name = 'angle' model_with_rotation.plot_multiple_transformations(indices=train_indices, train_set = True, param_name=param_name,save_name=figsave_name ) ##### Plots test reconstructions samples_pairs = np.random.randint( 0, len(model_with_rotation.data.X_test), size=(10,) ).tolist() model_with_rotation.plot_x2_reconstructions( indices=samples_pairs, train_set=False, save_name=os.path.join(args.output_directory, "plots_test_reconstructions_" + save_name), ) ##### Plots test rotations of samples test_indices = np.random.randint( 0, len(model_with_rotation.data.X_orig_test), size=(10,) ).tolist() figsave_name=os.path.join(args.output_directory, "plots_test_rotations_" + save_name + '.png') if args.supervised: if n_transfos == 1: if args.data_n_x > 0: param_name = 'tx' elif args.data_n_y > 0: param_name = 'ty' if args.data_n_rot > 0: param_name = 'angle' model_with_rotation.plot_multiple_transformations(indices=test_indices, train_set = False, param_name=param_name, save_name=figsave_name ) else: model_with_rotation.plot_multiple_transformations_stacked(indices=test_indices, train_set = False, n_plots = 10, save_name=figsave_name ) else: if args.data_n_x > 0: param_name = 'tx' elif args.data_n_y > 0: param_name = 'ty' if args.data_n_rot > 0: param_name = 'angle' model_with_rotation.plot_multiple_transformations(indices=test_indices, train_set = False, param_name=param_name, save_name=figsave_name ) elif args.mode == "test": file_name = "best_checkpoint_{}.pth.tar".format(model_with_rotation.save_name) path_to_model = os.path.join(args.output_directory, file_name) model_with_rotation.load_model(path_to_model) if args.supervised: loss_func = model_with_rotation.reconstruction_mse_transformed_z1 else: loss_func = model_with_rotation.reconstruction_mse_transformed_z1_weak test_mse = model_with_rotation.compute_test_loss( loss_func, model_with_rotation.data.test_loader_batch_100 ) torch.save( torch.FloatTensor([test_mse]), os.path.join( args.output_directory, "test_mse_" + model_with_rotation.save_name ), ) if __name__ == "__main__": main(sys.argv[1:])
Addressing-the-Topological-Defects-of-Disentanglement-main
complex_shift_operator/__main__.py
# Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the CC-by-NC license found in the # LICENSE file in the root directory of this source tree. # import torch import torch.nn as nn from functools import partial from convit import VisionTransformer from timm.models.efficientnet import EfficientNet from timm.models.vision_transformer import _cfg from timm.models.registry import register_model @register_model def convit_tiny(pretrained=False, **kwargs): num_heads = 4 kwargs['embed_dim'] *= num_heads model = VisionTransformer( num_heads=num_heads, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/convit/convit_tiny.pth", map_location="cpu", check_hash=True ) model.load_state_dict(checkpoint) return model @register_model def convit_small(pretrained=False, **kwargs): num_heads = 9 kwargs['embed_dim'] *= num_heads model = VisionTransformer( num_heads=num_heads, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/convit/convit_small.pth", map_location="cpu", check_hash=True ) model.load_state_dict(checkpoint) return model @register_model def convit_base(pretrained=False, **kwargs): num_heads = 16 kwargs['embed_dim'] *= num_heads model = VisionTransformer( num_heads=num_heads, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://dl.fbaipublicfiles.com/convit/convit_base.pth", map_location="cpu", check_hash=True ) model.load_state_dict(checkpoint) return model
convit-main
models.py
# Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the CC-by-NC license found in the # LICENSE file in the root directory of this source tree. # """ A script to run multinode training with submitit. """ import argparse import os import uuid from pathlib import Path import time import shutil import itertools import main as classification import submitit def parse_args(): classification_parser = classification.get_args_parser() parser = argparse.ArgumentParser("Submitit for ConViT", parents=[classification_parser]) parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node") parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request") parser.add_argument("--timeout", default=1000, type=int, help="Duration of the job") parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.") parser.add_argument("--partition", default="dev,learnfair,scavenge", type=str, help="Partition where to submit") parser.add_argument("--use_volta32", action='store_true', help="Big models? Use this") parser.add_argument('--comment', default="icml", type=str, help='Comment to pass to scheduler, e.g. priority message') return parser.parse_args() def get_shared_folder() -> Path: user = os.getenv("USER") if Path("/checkpoint/").is_dir(): p = Path(f"/checkpoint/{user}/convit") # p = p / str(int(time.time())) p = p / str(1614800338) p.mkdir(exist_ok=True) return p raise RuntimeError("No shared folder available") def get_init_file(shared_folder): # Init file must not exist, but it's parent dir must exist. init_file = shared_folder / f"{uuid.uuid4().hex}_init" if init_file.exists(): os.remove(str(init_file)) return init_file class Trainer(object): def __init__(self, args): self.args = args def __call__(self): import main as classification self._setup_gpu_args() classification.main(self.args) def checkpoint(self): import os import submitit self.args.dist_url = get_init_file(self.args.shared_dir).as_uri() checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth") if os.path.exists(checkpoint_file): self.args.resume = checkpoint_file print("Requeuing ", self.args) empty_trainer = type(self)(self.args) return submitit.helpers.DelayedSubmission(empty_trainer) def _setup_gpu_args(self): import submitit from pathlib import Path job_env = submitit.JobEnvironment() self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id))) self.args.gpu = job_env.local_rank self.args.rank = job_env.global_rank self.args.world_size = job_env.num_tasks print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}") def copy_py(dst_folder, root='.'): if not os.path.exists(dst_folder): print("Folder doesn't exist!") return for f in os.listdir(root): if f.endswith('.py'): shutil.copy2(f, dst_folder) def main(): args = parse_args() shared_folder = get_shared_folder() copy_py(shared_folder) os.chdir(shared_folder) grid = { 'model': ['convit_base'], } def dict_product(d): keys = d.keys() for element in itertools.product(*d.values()): yield dict(zip(keys, element)) for params in dict_product(grid): name = '_'.join(['{}_{}'.format(k,v) for k,v in params.items()]) args.shared_dir = shared_folder args.job_dir = shared_folder / name if os.path.exists(args.job_dir / 'checkpoint.pth'): args.resume = args.job_dir / 'checkpoint.pth' # Note that the folder will depend on the job_id, to easily track experiments executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30) num_gpus_per_node = args.ngpus nodes = args.nodes timeout_min = args.timeout partition = args.partition args.use_volta32 = True kwargs = {} if args.use_volta32: kwargs['slurm_constraint'] = 'volta32gb' if args.comment: kwargs['slurm_comment'] = args.comment executor.update_parameters( mem_gb= 80 * num_gpus_per_node, gpus_per_node=num_gpus_per_node, tasks_per_node=num_gpus_per_node, # one task per GPU cpus_per_task=10, nodes=nodes, timeout_min=timeout_min, # max is 60 * 72 slurm_partition=partition, slurm_signal_delay_s=120, **kwargs ) for k,v in params.items(): setattr(args,k,v) executor.update_parameters(name=name) args.dist_url = get_init_file(shared_folder).as_uri() args.output_dir = args.job_dir trainer = Trainer(args) job = executor.submit(trainer) print("Submitted job_id:", job.job_id) if __name__ == "__main__": main()
convit-main
run_with_submitit.py
# Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the CC-by-NC license found in the # LICENSE file in the root directory of this source tree. # import os import json import random from torchvision import datasets, transforms from torchvision.datasets.folder import ImageFolder, DatasetFolder, default_loader from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.data import create_transform from typing import Any, Callable, cast, Dict, List, Optional, Tuple IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp') def has_file_allowed_extension(filename: str, extensions: Tuple[str, ...]) -> bool: return filename.lower().endswith(extensions) def make_subsampled_dataset( directory, class_to_idx, extensions=None,is_valid_file=None, sampling_ratio=1., nb_classes=None): instances = [] directory = os.path.expanduser(directory) both_none = extensions is None and is_valid_file is None both_something = extensions is not None and is_valid_file is not None if both_none or both_something: raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time") if extensions is not None: def is_valid_file(x: str) -> bool: return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions)) is_valid_file = cast(Callable[[str], bool], is_valid_file) for i, target_class in enumerate(sorted(class_to_idx.keys())): if nb_classes is not None and i>=nb_classes: break class_index = class_to_idx[target_class] target_dir = os.path.join(directory, target_class) if not os.path.isdir(target_dir): continue num_imgs = int(len(os.listdir(target_dir))*sampling_ratio) imgs=0 for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)): for fname in sorted(fnames): if imgs==num_imgs : break path = os.path.join(root, fname) if is_valid_file(path): item = path, class_index instances.append(item) imgs+=1 return instances class INatDataset(ImageFolder): def __init__(self, root, train=True, year=2018, transform=None, target_transform=None, category='name', loader=default_loader): self.transform = transform self.loader = loader self.target_transform = target_transform self.year = year # assert category in ['kingdom','phylum','class','order','supercategory','family','genus','name'] path_json = os.path.join(root, f'{"train" if train else "val"}{year}.json') with open(path_json) as json_file: data = json.load(json_file) with open(os.path.join(root, 'categories.json')) as json_file: data_catg = json.load(json_file) path_json_for_targeter = os.path.join(root, f"train{year}.json") with open(path_json_for_targeter) as json_file: data_for_targeter = json.load(json_file) targeter = {} indexer = 0 for elem in data_for_targeter['annotations']: king = [] king.append(data_catg[int(elem['category_id'])][category]) if king[0] not in targeter.keys(): targeter[king[0]] = indexer indexer += 1 self.nb_classes = len(targeter) self.samples = [] for elem in data['images']: cut = elem['file_name'].split('/') target_current = int(cut[2]) path_current = os.path.join(root, cut[0], cut[2], cut[3]) categors = data_catg[target_current] target_current_true = targeter[categors[category]] self.samples.append((path_current, target_current_true)) # __getitem__ and __len__ inherited from ImageFolder class SubsampledDatasetFolder(DatasetFolder): def __init__(self, root, loader, extensions=None, transform=None, target_transform=None, is_valid_file=None, sampling_ratio=1., nb_classes=None): super(DatasetFolder, self).__init__(root, transform=transform, target_transform=target_transform) classes, class_to_idx = self._find_classes(self.root) samples = make_subsampled_dataset(self.root, class_to_idx, extensions, is_valid_file, sampling_ratio=sampling_ratio, nb_classes=nb_classes) if len(samples) == 0: msg = "Found 0 files in subfolders of: {}\n".format(self.root) if extensions is not None: msg += "Supported extensions are: {}".format(",".join(extensions)) raise RuntimeError(msg) self.loader = loader self.extensions = extensions self.classes = classes self.class_to_idx = class_to_idx self.samples = samples self.targets = [s[1] for s in samples] # __getitem__ and __len__ inherited from DatasetFolder class ImageNetDataset(SubsampledDatasetFolder): def __init__(self, root, loader=default_loader, is_valid_file=None, **kwargs): super(ImageNetDataset, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None, is_valid_file=is_valid_file, **kwargs) self.imgs = self.samples def build_dataset(is_train, args): transform = build_transform(is_train, args) if args.data_set == 'CIFAR10': args.data_path = "/datasets01/cifar-pytorch/11222017/" dataset = datasets.CIFAR10(args.data_path, train=is_train, transform=transform) nb_classes = 10 if args.data_set == 'CIFAR100': args.data_path = "/datasets01/cifar100/022818/data/" dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform) nb_classes = 100 elif args.data_set == 'IMNET': root = os.path.join(args.data_path, 'train' if is_train else 'val') dataset = ImageNetDataset(root, transform=transform, sampling_ratio= (args.sampling_ratio if is_train else 1.), nb_classes=args.nb_classes) nb_classes = args.nb_classes if args.nb_classes is not None else 1000 elif args.data_set == 'INAT': dataset = INatDataset(args.data_path, train=is_train, year=2018, category=args.inat_category, transform=transform) nb_classes = dataset.nb_classes elif args.data_set == 'INAT19': args.data_path = "/datasets01/inaturalist/090619/" dataset = INatDataset(args.data_path, train=is_train, year=2019, category=args.inat_category, transform=transform) nb_classes = dataset.nb_classes return dataset, nb_classes def build_transform(is_train, args): resize_im = args.input_size > 32 if is_train: # this should always dispatch to transforms_imagenet_train transform = create_transform( input_size=args.input_size, is_training=True, color_jitter=args.color_jitter, auto_augment=args.aa, interpolation=args.train_interpolation, re_prob=args.reprob, re_mode=args.remode, re_count=args.recount, ) if not resize_im: # replace RandomResizedCropAndInterpolation with # RandomCrop transform.transforms[0] = transforms.RandomCrop( args.input_size, padding=4) return transform t = [] if resize_im: size = int((256 / 224) * args.input_size) t.append( transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images ) t.append(transforms.CenterCrop(args.input_size)) t.append(transforms.ToTensor()) t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)) return transforms.Compose(t)
convit-main
datasets.py
# Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the CC-by-NC license found in the # LICENSE file in the root directory of this source tree. # """ Train and eval functions used in main.py """ import math import sys from typing import Iterable, Optional import torch from timm.data import Mixup from timm.utils import accuracy, ModelEma import utils def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, max_norm: float = 0, model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None): # TODO fix this for finetuning model.train() criterion.train() metric_logger = utils.MetricLogger(delimiter=" ") metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) header = 'Epoch: [{}]'.format(epoch) print_freq = 10 for samples, targets in metric_logger.log_every(data_loader, print_freq, header): samples = samples.to(device, non_blocking=True) targets = targets.to(device, non_blocking=True) if mixup_fn is not None: samples, targets = mixup_fn(samples, targets) with torch.cuda.amp.autocast(): outputs = model(samples) loss = criterion(outputs, targets) loss_value = loss.item() if not math.isfinite(loss_value): print("Loss is {}, stopping training".format(loss_value)) sys.exit(1) optimizer.zero_grad() # this attribute is added by timm on one optimizer (adahessian) is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=is_second_order) torch.cuda.synchronize() if model_ema is not None: model_ema.update(model) metric_logger.update(loss=loss_value) metric_logger.update(lr=optimizer.param_groups[0]["lr"]) # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) return {k: meter.global_avg for k, meter in metric_logger.meters.items()} @torch.no_grad() def evaluate(data_loader, model, device): criterion = torch.nn.CrossEntropyLoss() metric_logger = utils.MetricLogger(delimiter=" ") header = 'Test:' # switch to evaluation mode model.eval() for images, target in metric_logger.log_every(data_loader, 10, header): images = images.to(device, non_blocking=True) target = target.to(device, non_blocking=True) # compute output with torch.cuda.amp.autocast(): output = model(images) loss = criterion(output, target) acc1, acc5 = accuracy(output, target, topk=(1, 5)) batch_size = images.shape[0] metric_logger.update(loss=loss.item()) metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}' .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss)) return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
convit-main
engine.py
# Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the CC-by-NC license found in the # LICENSE file in the root directory of this source tree. # """ Misc functions, including distributed helpers. Mostly copy-paste from torchvision references. """ import io import os import time from collections import defaultdict, deque import time import datetime import torch import torch.distributed as dist class SmoothedValue(object): """Track a series of values and provide access to smoothed values over a window or the global series average. """ def __init__(self, window_size=20, fmt=None): if fmt is None: fmt = "{median:.4f} ({global_avg:.4f})" self.deque = deque(maxlen=window_size) self.total = 0.0 self.count = 0 self.fmt = fmt def update(self, value, n=1): self.deque.append(value) self.count += n self.total += value * n def synchronize_between_processes(self): """ Warning: does not synchronize the deque! """ if not is_dist_avail_and_initialized(): return t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') dist.barrier() dist.all_reduce(t) t = t.tolist() self.count = int(t[0]) self.total = t[1] @property def median(self): d = torch.tensor(list(self.deque)) return d.median().item() @property def avg(self): d = torch.tensor(list(self.deque), dtype=torch.float32) return d.mean().item() @property def global_avg(self): return self.total / self.count @property def max(self): return max(self.deque) @property def value(self): return self.deque[-1] def __str__(self): return self.fmt.format( median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value) class MetricLogger(object): def __init__(self, delimiter="\t"): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for k, v in kwargs.items(): if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.meters[k].update(v) def __getattr__(self, attr): if attr in self.meters: return self.meters[attr] if attr in self.__dict__: return self.__dict__[attr] raise AttributeError("'{}' object has no attribute '{}'".format( type(self).__name__, attr)) def __str__(self): loss_str = [] for name, meter in self.meters.items(): loss_str.append( "{}: {}".format(name, str(meter)) ) return self.delimiter.join(loss_str) def synchronize_between_processes(self): for meter in self.meters.values(): meter.synchronize_between_processes() def add_meter(self, name, meter): self.meters[name] = meter def log_every(self, iterable, print_freq, header=None): i = 0 if not header: header = '' start_time = time.time() end = time.time() iter_time = SmoothedValue(fmt='{avg:.4f}') data_time = SmoothedValue(fmt='{avg:.4f}') space_fmt = ':' + str(len(str(len(iterable)))) + 'd' log_msg = [ header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}' ] if torch.cuda.is_available(): log_msg.append('max mem: {memory:.0f}') log_msg = self.delimiter.join(log_msg) MB = 1024.0 * 1024.0 for obj in iterable: data_time.update(time.time() - end) yield obj iter_time.update(time.time() - end) if i % print_freq == 0 or i == len(iterable) - 1: eta_seconds = iter_time.global_avg * (len(iterable) - i) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if torch.cuda.is_available(): print(log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=torch.cuda.max_memory_allocated() / MB)) else: print(log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time))) i += 1 end = time.time() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('{} Total time: {} ({:.4f} s / it)'.format( header, total_time_str, total_time / len(iterable))) def _load_checkpoint_for_ema(model_ema, checkpoint): """ Workaround for ModelEma._load_checkpoint to accept an already-loaded object """ mem_file = io.BytesIO() torch.save(checkpoint, mem_file) mem_file.seek(0) model_ema._load_checkpoint(mem_file) def setup_for_distributed(is_master): """ This function disables printing when not in master process """ import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop('force', False) if is_master or force: builtin_print(*args, **kwargs) __builtin__.print = print def is_dist_avail_and_initialized(): if not dist.is_available(): return False if not dist.is_initialized(): return False return True def get_world_size(): if not is_dist_avail_and_initialized(): return 1 return dist.get_world_size() def get_rank(): if not is_dist_avail_and_initialized(): return 0 return dist.get_rank() def is_main_process(): return get_rank() == 0 def save_on_master(*args, **kwargs): if is_main_process(): torch.save(*args, **kwargs) def init_distributed_mode(args): if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: args.rank = int(os.environ["RANK"]) args.world_size = int(os.environ['WORLD_SIZE']) args.gpu = int(os.environ['LOCAL_RANK']) elif 'SLURM_PROCID' in os.environ: args.rank = int(os.environ['SLURM_PROCID']) args.gpu = args.rank % torch.cuda.device_count() else: print('Not using distributed mode') args.distributed = False return args.distributed = True torch.cuda.set_device(args.gpu) args.dist_backend = 'nccl' print('| distributed init (rank {}): {}'.format( args.rank, args.dist_url), flush=True) torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) torch.distributed.barrier() setup_for_distributed(args.rank == 0) @torch.no_grad() def compute_throughput(model, batch_size=128, resolution=224): torch.cuda.empty_cache() warmup_iters = 3 num_iters = 30 model.eval() model.to("cuda") timing = [] inputs = torch.randn(batch_size, 3, resolution, resolution, device="cuda") # warmup for _ in range(warmup_iters): model(inputs) torch.cuda.synchronize() for _ in range(num_iters): start = time.time() model(inputs) torch.cuda.synchronize() timing.append(time.time() - start) timing = torch.as_tensor(timing, dtype=torch.float32) return batch_size / timing.mean()
convit-main
utils.py
# Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the CC-by-NC license found in the # LICENSE file in the root directory of this source tree. # import argparse import datetime import numpy as np import time import torch import torch.backends.cudnn as cudnn import json import gc from pathlib import Path from timm.data import Mixup from timm.models import create_model from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy from timm.scheduler import create_scheduler from timm.optim import create_optimizer from timm.utils import NativeScaler, get_state_dict, ModelEma from datasets import build_dataset from engine import train_one_epoch, evaluate from samplers import RASampler import models import utils def get_args_parser(): parser = argparse.ArgumentParser('ConViT training and evaluation script', add_help=False) parser.add_argument('--batch-size', default=64, type=int) parser.add_argument('--epochs', default=300, type=int) # Model parameters parser.add_argument('--model', default='convit_small', type=str, metavar='MODEL', help='Name of model to train') parser.add_argument('--pretrained', action='store_true') parser.add_argument('--input-size', default=224, type=int, help='images input size') parser.add_argument('--embed_dim', default=48, type=int, help='embedding dimension per head') parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)') parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)') parser.add_argument('--drop-block', type=float, default=None, metavar='PCT', help='Drop block rate (default: None)') parser.add_argument('--model-ema', action='store_true') parser.add_argument('--no-model-ema', action='store_false', dest='model_ema') parser.set_defaults(model_ema=False) parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='') parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='') # Optimizer parameters parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"') parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: 1e-8)') parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)') parser.add_argument('--weight-decay', type=float, default=0.05, help='weight decay (default: 0.05)') # Learning rate schedule parameters parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER', help='LR scheduler (default: "cosine"') parser.add_argument('--lr', type=float, default=5e-4, metavar='LR', help='learning rate (default: 5e-4)') parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', help='learning rate noise on/off epoch percentages') parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', help='learning rate noise limit percent (default: 0.67)') parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', help='learning rate noise std-dev (default: 1.0)') parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR', help='warmup learning rate (default: 1e-6)') parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') parser.add_argument('--decay-epochs', type=float, default=30, metavar='N', help='epoch interval to decay LR') parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports') parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N', help='epochs to cooldown LR at min_lr, after cyclic schedule ends') parser.add_argument('--patience-epochs', type=int, default=10, metavar='N', help='patience epochs for Plateau LR scheduler (default: 10') parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', help='LR decay rate (default: 0.1)') # Augmentation parameters parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)') parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', help='Use AutoAugment policy. "v0" or "original". " + \ "(default: rand-m9-mstd0.5-inc1)'), parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)') parser.add_argument('--train-interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")') parser.add_argument('--repeated-aug', action='store_true') parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug') parser.set_defaults(repeated_aug=True) # * Random Erase params parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob (default: 0.25)') parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")') parser.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)') parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split') # * Mixup params parser.add_argument('--mixup', type=float, default=0.8, help='mixup alpha, mixup enabled if > 0. (default: 0.8)') parser.add_argument('--cutmix', type=float, default=1.0, help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)') parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') parser.add_argument('--mixup-prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled') parser.add_argument('--mixup-switch-prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled') parser.add_argument('--mixup-mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') # Dataset parameters parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', type=str, help='dataset path') parser.add_argument('--data-set', default='IMNET', choices=['CIFAR10', 'CIFAR100', 'IMNET', 'INAT', 'INAT19'], type=str, help='Image Net dataset path') parser.add_argument('--sampling_ratio', default=1., type=float, help='fraction of samples to keep in the training set of imagenet') parser.add_argument('--nb_classes', default=None, type=int, help='number of classes in imagenet') parser.add_argument('--inat-category', default='name', choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'], type=str, help='semantic granularity') parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--save_every', default=None, type=int, help='save model every epochs') parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--eval', action='store_true', help='Perform evaluation only') parser.add_argument('--num_workers', default=10, type=int) parser.add_argument('--pin-mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem', help='') parser.set_defaults(pin_mem=True) # distributed training parameters parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') # locality parameters parser.add_argument('--local_up_to_layer', default=10, type=int, help='number of GPSA layers') parser.add_argument('--locality_strength', default=1., type=float, help='Determines how focused each head is around its attention center') return parser def main(args): utils.init_distributed_mode(args) print(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) cudnn.benchmark = True dataset_train, args.nb_classes = build_dataset(is_train=True, args=args) dataset_val, _ = build_dataset(is_train=False, args=args) if True: # args.distributed: num_tasks = utils.get_world_size() global_rank = utils.get_rank() if args.repeated_aug: sampler_train = RASampler( dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True ) else: sampler_train = torch.utils.data.DistributedSampler( dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True ) else: sampler_train = torch.utils.data.RandomSampler(dataset_train) data_loader_train = torch.utils.data.DataLoader( dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True, ) data_loader_val = torch.utils.data.DataLoader( dataset_val, batch_size=int(1.5 * args.batch_size), shuffle=False, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False ) mixup_fn = None mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None if mixup_active: mixup_fn = Mixup( mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.nb_classes) print(f"Creating model: {args.model}") model = create_model( args.model, pretrained=args.pretrained, num_classes=args.nb_classes, drop_rate=args.drop, drop_path_rate=args.drop_path, drop_block_rate=args.drop_block, local_up_to_layer=args.local_up_to_layer, locality_strength=args.locality_strength, embed_dim = args.embed_dim, ) print(model) model.to(device) model_ema = None if args.model_ema: model_ema = ModelEma( model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else '', resume='') model_without_ddp = model if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) model_without_ddp = model.module n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) print('number of params:', n_parameters) linear_scaled_lr = args.lr * args.batch_size * utils.get_world_size() / 512.0 args.lr = linear_scaled_lr optimizer = create_optimizer(args, model) loss_scaler = NativeScaler() lr_scheduler, _ = create_scheduler(args, optimizer) criterion = LabelSmoothingCrossEntropy() if args.mixup > 0.: # smoothing is handled with mixup label transform criterion = SoftTargetCrossEntropy() elif args.smoothing: criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing) else: criterion = torch.nn.CrossEntropyLoss() output_dir = Path(args.output_dir) torch.save(args, output_dir / "args.pyT") if args.resume: if str(args.resume).startswith('https'): checkpoint = torch.hub.load_state_dict_from_url( args.resume, map_location='cpu', check_hash=True) else: checkpoint = torch.load(args.resume, map_location='cpu') model_without_ddp.load_state_dict(checkpoint['model']) if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint: optimizer.load_state_dict(checkpoint['optimizer']) lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) args.start_epoch = checkpoint['epoch'] + 1 if args.model_ema: utils._load_checkpoint_for_ema(model_ema, checkpoint['model_ema']) if args.eval: throughput = utils.compute_throughput(model, resolution=args.input_size) print(f"Throughput : {throughput:.2f}") test_stats = evaluate(data_loader_val, model, device) print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%") return print("Start training") start_time = time.time() max_accuracy = 0.0 for epoch in range(args.start_epoch, args.epochs): gc.collect() if args.distributed: data_loader_train.sampler.set_epoch(epoch) train_stats = train_one_epoch( model, criterion, data_loader_train, optimizer, device, epoch, loss_scaler, args.clip_grad, model_ema, mixup_fn ) lr_scheduler.step(epoch) if args.output_dir: checkpoint_paths = [output_dir / 'checkpoint.pth'] if args.save_every is not None: if epoch % args.save_every == 0: checkpoint_paths.append(output_dir / 'checkpoint_{}.pth'.format(epoch)) for checkpoint_path in checkpoint_paths: utils.save_on_master({ 'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'epoch': epoch, 'model_ema': get_state_dict(model_ema) if model_ema else None, 'args': args, }, checkpoint_path) test_stats = evaluate(data_loader_val, model, device) print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%") max_accuracy = max(max_accuracy, test_stats["acc1"]) print(f'Max accuracy: {max_accuracy:.2f}%') nonlocality = {} gating_params = {} distances = {} batch = next(iter(data_loader_val))[0] batch = batch.to(device) batch = model_without_ddp.patch_embed(batch) for l in range(len(model_without_ddp.blocks)): attn = model_without_ddp.blocks[l].attn nonlocality[l] = attn.get_attention_map(batch).detach().cpu().numpy().tolist() if 'convit' in args.model and l<args.local_up_to_layer: p = attn.pos_proj.weight span = -1/p.data[:,-1] dist_x = p.data[:,0]*span/2 dist_y = p.data[:,1]*span/2 dist = (dist_x**2+dist_y**2)**.5 distances[l] = dist.cpu().numpy().tolist() gating_params[l] = attn.gating_param.data.cpu().numpy().tolist() log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, **{f'test_{k}': v for k, v in test_stats.items()}, **{f'nonlocality_{k}': v for k, v in nonlocality.items()}, **{f'distances_{k}': v for k, v in distances.items()}, **{f'gating_params_{k}': v for k, v in gating_params.items()}, 'epoch': epoch, 'n_parameters': n_parameters} print(log_stats) if args.output_dir and utils.is_main_process(): with (output_dir / "log.txt").open("a") as f: f.write(json.dumps(log_stats) + "\n") total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str)) if __name__ == '__main__': parser = argparse.ArgumentParser('ConViT training and evaluation script', parents=[get_args_parser()]) args = parser.parse_args() if args.output_dir: Path(args.output_dir).mkdir(parents=True, exist_ok=True) main(args)
convit-main
main.py
# Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the CC-by-NC license found in the # LICENSE file in the root directory of this source tree. # import torch import torch.distributed as dist import math class RASampler(torch.utils.data.Sampler): """Sampler that restricts data loading to a subset of the dataset for distributed, with repeated augmentation. It ensures that different each augmented version of a sample will be visible to a different process (GPU) Heavily based on torch.utils.data.DistributedSampler """ def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") rank = dist.get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.num_samples = int(math.ceil(len(self.dataset) * 3.0 / self.num_replicas)) self.total_size = self.num_samples * self.num_replicas # self.num_selected_samples = int(math.ceil(len(self.dataset) / self.num_replicas)) self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas)) self.shuffle = shuffle def __iter__(self): # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.epoch) if self.shuffle: indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = list(range(len(self.dataset))) # add extra samples to make it evenly divisible indices = [ele for ele in indices for i in range(3)] indices += indices[:(self.total_size - len(indices))] assert len(indices) == self.total_size # subsample indices = indices[self.rank:self.total_size:self.num_replicas] assert len(indices) == self.num_samples return iter(indices[:self.num_selected_samples]) def __len__(self): return self.num_selected_samples def set_epoch(self, epoch): self.epoch = epoch
convit-main
samplers.py
# Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the CC-by-NC license found in the # LICENSE file in the root directory of this source tree. # '''These modules are adapted from those of timm, see https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py ''' import torch import torch.nn as nn from functools import partial import torch.nn.functional as F from timm.models.helpers import load_pretrained from timm.models.layers import DropPath, to_2tuple, trunc_normal_ from timm.models.registry import register_model import torch import torch.nn as nn import matplotlib.pyplot as plt class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class GPSA(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., locality_strength=1., use_local_init=True): super().__init__() self.num_heads = num_heads self.dim = dim head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qk = nn.Linear(dim, dim * 2, bias=qkv_bias) self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.pos_proj = nn.Linear(3, num_heads) self.proj_drop = nn.Dropout(proj_drop) self.locality_strength = locality_strength self.gating_param = nn.Parameter(torch.ones(self.num_heads)) self.apply(self._init_weights) if use_local_init: self.local_init(locality_strength=locality_strength) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def forward(self, x): B, N, C = x.shape if not hasattr(self, 'rel_indices') or self.rel_indices.size(1)!=N: self.get_rel_indices(N) attn = self.get_attention(x) v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x def get_attention(self, x): B, N, C = x.shape qk = self.qk(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k = qk[0], qk[1] pos_score = self.rel_indices.expand(B, -1, -1,-1) pos_score = self.pos_proj(pos_score).permute(0,3,1,2) patch_score = (q @ k.transpose(-2, -1)) * self.scale patch_score = patch_score.softmax(dim=-1) pos_score = pos_score.softmax(dim=-1) gating = self.gating_param.view(1,-1,1,1) attn = (1.-torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score attn /= attn.sum(dim=-1).unsqueeze(-1) attn = self.attn_drop(attn) return attn def get_attention_map(self, x, return_map = False): attn_map = self.get_attention(x).mean(0) # average over batch distances = self.rel_indices.squeeze()[:,:,-1]**.5 dist = torch.einsum('nm,hnm->h', (distances, attn_map)) dist /= distances.size(0) if return_map: return dist, attn_map else: return dist def local_init(self, locality_strength=1.): self.v.weight.data.copy_(torch.eye(self.dim)) locality_distance = 1 #max(1,1/locality_strength**.5) kernel_size = int(self.num_heads**.5) center = (kernel_size-1)/2 if kernel_size%2==0 else kernel_size//2 for h1 in range(kernel_size): for h2 in range(kernel_size): position = h1+kernel_size*h2 self.pos_proj.weight.data[position,2] = -1 self.pos_proj.weight.data[position,1] = 2*(h1-center)*locality_distance self.pos_proj.weight.data[position,0] = 2*(h2-center)*locality_distance self.pos_proj.weight.data *= locality_strength def get_rel_indices(self, num_patches): img_size = int(num_patches**.5) rel_indices = torch.zeros(1, num_patches, num_patches, 3) ind = torch.arange(img_size).view(1,-1) - torch.arange(img_size).view(-1, 1) indx = ind.repeat(img_size,img_size) indy = ind.repeat_interleave(img_size,dim=0).repeat_interleave(img_size,dim=1) indd = indx**2 + indy**2 rel_indices[:,:,:,2] = indd.unsqueeze(0) rel_indices[:,:,:,1] = indy.unsqueeze(0) rel_indices[:,:,:,0] = indx.unsqueeze(0) device = self.qk.weight.device self.rel_indices = rel_indices.to(device) class MHSA(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def get_attention_map(self, x, return_map = False): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn_map = (q @ k.transpose(-2, -1)) * self.scale attn_map = attn_map.softmax(dim=-1).mean(0) img_size = int(N**.5) ind = torch.arange(img_size).view(1,-1) - torch.arange(img_size).view(-1, 1) indx = ind.repeat(img_size,img_size) indy = ind.repeat_interleave(img_size,dim=0).repeat_interleave(img_size,dim=1) indd = indx**2 + indy**2 distances = indd**.5 distances = distances.to('cuda') dist = torch.einsum('nm,hnm->h', (distances, attn_map)) dist /= N if return_map: return dist, attn_map else: return dist def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_gpsa=True, **kwargs): super().__init__() self.norm1 = norm_layer(dim) self.use_gpsa = use_gpsa if self.use_gpsa: self.attn = GPSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs) else: self.attn = MHSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, **kwargs) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class PatchEmbed(nn.Module): """ Image to Patch Embedding, from timm """ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) self.apply(self._init_weights) def forward(self, x): B, C, H, W = x.shape assert H == self.img_size[0] and W == self.img_size[1], \ f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." x = self.proj(x).flatten(2).transpose(1, 2) return x def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) class HybridEmbed(nn.Module): """ CNN Feature Map Embedding, from timm """ def __init__(self, backbone, img_size=224, feature_size=None, in_chans=3, embed_dim=768): super().__init__() assert isinstance(backbone, nn.Module) img_size = to_2tuple(img_size) self.img_size = img_size self.backbone = backbone if feature_size is None: with torch.no_grad(): training = backbone.training if training: backbone.eval() o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))[-1] feature_size = o.shape[-2:] feature_dim = o.shape[1] backbone.train(training) else: feature_size = to_2tuple(feature_size) feature_dim = self.backbone.feature_info.channels()[-1] self.num_patches = feature_size[0] * feature_size[1] self.proj = nn.Linear(feature_dim, embed_dim) self.apply(self._init_weights) def forward(self, x): x = self.backbone(x)[-1] x = x.flatten(2).transpose(1, 2) x = self.proj(x) return x class VisionTransformer(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=48, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=nn.LayerNorm, global_pool=None, local_up_to_layer=10, locality_strength=1., use_pos_embed=True): super().__init__() self.num_classes = num_classes self.local_up_to_layer = local_up_to_layer self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.locality_strength = locality_strength self.use_pos_embed = use_pos_embed if hybrid_backbone is not None: self.patch_embed = HybridEmbed( hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim) else: self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.num_patches = num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.pos_drop = nn.Dropout(p=drop_rate) if self.use_pos_embed: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) trunc_normal_(self.pos_embed, std=.02) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, use_gpsa=True, locality_strength=locality_strength) if i<local_up_to_layer else Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, use_gpsa=False) for i in range(depth)]) self.norm = norm_layer(embed_dim) # Classifier head self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')] self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() trunc_normal_(self.cls_token, std=.02) self.head.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): B = x.shape[0] x = self.patch_embed(x) cls_tokens = self.cls_token.expand(B, -1, -1) if self.use_pos_embed: x = x + self.pos_embed x = self.pos_drop(x) for u,blk in enumerate(self.blocks): if u == self.local_up_to_layer : x = torch.cat((cls_tokens, x), dim=1) x = blk(x) x = self.norm(x) return x[:, 0] def forward(self, x): x = self.forward_features(x) x = self.head(x) return x
convit-main
convit.py
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. import os import argparse # run each job single-threaded, paralellize using pathos os.environ["OMP_NUM_THREADS"] = "1" os.environ["MKL_NUM_THREADS"] = "1" os.environ["NUMEXPR_NUM_THREADS"] = "1" # multi-socket friendly args os.environ["KMP_AFFINITY"] = "granularity=fine,compact,1,0" os.environ["KMP_BLOCKTIME"] = "1" import torch # force torch to 1 thread too just in case torch.set_num_interop_threads(1) torch.set_num_threads(1) import time from copy import deepcopy from pathlib import Path from aepsych.benchmark import BenchmarkLogger, PathosBenchmark, combine_benchmarks from problems import ( DiscrimHighDim, Hartmann6Binary, ContrastSensitivity6d, # This takes a few minutes to instantiate due to fitting the model ) chunks = 5 reps_per_chunk = 20 log_frequency = 10 large_opt_size = 750 nproc = 124 global_seed = 1000 inits = [100, 250, 500] if __name__ == "__main__": out_fname_base = Path("../data/init_sensitivity") out_fname_base.mkdir( parents=True, exist_ok=True ) # make an output folder if not exist problems = [ DiscrimHighDim(), Hartmann6Binary(), ContrastSensitivity6d(), ] bench_config = { "common": { "outcome_type": "single_probit", "strategy_names": "[init_strat, opt_strat]", }, "init_strat": {"generator": "SobolGenerator"}, "opt_strat": { "model": "GPClassificationModel", "generator": "OptimizeAcqfGenerator", "refit_every": 10, }, "GPClassificationModel": { "inducing_size": 100, "mean_covar_factory": "default_mean_covar_factory", "inducing_point_method": "auto", }, "default_mean_covar_factory": { "fixed_mean": False, "lengthscale_prior": "gamma", "outputscale_prior": "gamma", "kernel": "RBFKernel", }, "OptimizeAcqfGenerator": { "acqf": [ "LocalMI", "GlobalMI", "EAVC", ], "restarts": 2, "samps": 100, }, # Add the probit transform for non-probit-specific acqfs "MCLevelSetEstimation": {"objective": "ProbitObjective"}, "BernoulliMCMutualInformation": {"objective": "ProbitObjective"}, "MCPosteriorVariance": {"objective": "ProbitObjective"}, } for chunk in range(chunks): for problem in problems: out_fname = Path(f"{out_fname_base}/{problem.name}_chunk{chunk}_out.csv") intermediate_fname = Path( f"{out_fname_base}/{problem.name}_chunk{chunk}_checkpoint.csv" ) print(f"starting {problem.name} benchmark... chunk {chunk} ") logger = BenchmarkLogger(log_every=log_frequency) benches = [] for init in inits: local_config = deepcopy(bench_config) local_config["common"]["lb"] = str(problem.lb.tolist()) local_config["common"]["ub"] = str(problem.ub.tolist()) local_config["common"]["target"] = problem.threshold local_config["init_strat"]["n_trials"] = init local_config["opt_strat"]["n_trials"] = large_opt_size - init benches.append( PathosBenchmark( nproc=nproc, problem=problem, logger=logger, configs=local_config, global_seed=global_seed, n_reps=reps_per_chunk, ) ) bench = combine_benchmarks(*benches) bench.start_benchmarks() # checkpoint every minute in case something breaks while not bench.is_done: time.sleep(60) collate_start = time.time() print( f"Checkpointing bench {problem} chunk {chunk}..., {len(bench.futures)}/{bench.num_benchmarks} alive" ) bench.collate_benchmarks(wait=False) temp_results = bench.logger.pandas() if len(temp_results) > 0: temp_results["rep"] = temp_results["rep"] + reps_per_chunk * chunk temp_results["problem"] = problem.name temp_results.to_csv(intermediate_fname) print( f"Collate done in {time.time()-collate_start} seconds, {len(bench.futures)}/{bench.num_benchmarks} left" ) print(f"Problem {problem} chunk {chunk} fully done!") final_results = bench.logger.pandas() final_results["rep"] = final_results["rep"] + reps_per_chunk * chunk final_results["problem"] = problem.name final_results.to_csv(out_fname)
bernoulli_lse-main
init_sensitivity_study.py
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. import os import argparse from copy import deepcopy from pathlib import Path global_seed = 1000 n_reps = 20 if __name__ == "__main__": parser = argparse.ArgumentParser(description="Gentime Benchmarks") parser.add_argument("--nproc", type=int, default=1) parser.add_argument( "--output_path", type=Path, default=Path("../data/gentime_bench") ) args = parser.parse_args() os.environ["OMP_NUM_THREADS"] = str(args.nproc) os.environ["MKL_NUM_THREADS"] = str(args.nproc) os.environ["NUMEXPR_NUM_THREADS"] = str(args.nproc) # multi-socket friendly args os.environ["KMP_AFFINITY"] = "granularity=fine,compact,1,0" os.environ["KMP_BLOCKTIME"] = str(args.nproc) import torch torch.set_num_interop_threads(args.nproc) torch.set_num_threads(args.nproc) from aepsych.benchmark import BenchmarkLogger, Benchmark from problems import ( DiscrimLowDim, DiscrimHighDim, Hartmann6Binary, ) out_fname_base = args.output_path out_fname_base.mkdir( parents=True, exist_ok=True ) # make an output folder if not exist problems = [ DiscrimLowDim(), DiscrimHighDim(), Hartmann6Binary(), ] bench_config = { "common": { "outcome_type": "single_probit", "strategy_names": "[init_strat, opt_strat]", }, "init_strat": {"n_trials": [10, 250, 500, 750], "generator": "SobolGenerator"}, "opt_strat": { "n_trials": 2, "model": "GPClassificationModel", "generator": "OptimizeAcqfGenerator", "refit_every": 1, }, "GPClassificationModel": { "inducing_size": 100, "mean_covar_factory": "default_mean_covar_factory", "inducing_point_method": "auto", }, "default_mean_covar_factory": { "fixed_mean": False, "lengthscale_prior": "gamma", "outputscale_prior": "gamma", "kernel": "RBFKernel", }, "OptimizeAcqfGenerator": { "acqf": [ "LocalMI", "MCLevelSetEstimation", # Straddle "EAVC", ], "restarts": 2, "samps": 100, }, # Add the probit transform for non-probit-specific acqfs "MCLevelSetEstimation": {"objective": "ProbitObjective"}, "BernoulliMCMutualInformation": {"objective": "ProbitObjective"}, "MCPosteriorVariance": {"objective": "ProbitObjective"}, } problem = problems[0] for problem in problems: out_fname = Path(f"{out_fname_base}/{problem.name}_{args.nproc}threads_out.csv") print(f"starting {problem.name} benchmark...") local_config = deepcopy(bench_config) local_config["common"]["lb"] = str(problem.lb.tolist()) local_config["common"]["ub"] = str(problem.ub.tolist()) local_config["common"]["target"] = problem.threshold logger = BenchmarkLogger(log_every=1) bench = Benchmark( problem=problem, logger=logger, configs=local_config, global_seed=global_seed, n_reps=n_reps, ) bench.run_benchmarks() print(f"Problem {problem} fully done!") final_results = bench.logger.pandas() final_results["problem"] = problem.name final_results["nproc"] = args.nproc final_results.to_csv(out_fname)
bernoulli_lse-main
gentime_bench.py
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. import numpy as np import torch from aepsych.benchmark.test_functions import ( modified_hartmann6, discrim_highdim, novel_discrimination_testfun, ) from aepsych.models import GPClassificationModel from aepsych.benchmark.problem import LSEProblem class LSEProblemWithEdgeLogging(LSEProblem): eps = 0.05 def evaluate(self, strat): metrics = super().evaluate(strat) # add number of edge samples to the log # get the trials selected by the final strat only n_opt_trials = strat.strat_list[-1].n_trials lb, ub = strat.lb, strat.ub r = ub - lb lb2 = lb + self.eps * r ub2 = ub - self.eps * r near_edge = ( np.logical_or( (strat.x[-n_opt_trials:, :] <= lb2), (strat.x[-n_opt_trials:, :] >= ub2) ) .any(axis=-1) .double() ) metrics["prop_edge_sampling_mean"] = near_edge.mean().item() metrics["prop_edge_sampling_err"] = (2 * near_edge.std() / np.sqrt(len(near_edge))).item() return metrics class DiscrimLowDim(LSEProblemWithEdgeLogging): name = "discrim_lowdim" bounds = torch.tensor([[-1, 1], [-1, 1]], dtype=torch.double).T threshold = 0.75 def f(self, x: torch.Tensor) -> torch.Tensor: return torch.tensor(novel_discrimination_testfun(x), dtype=torch.double) class DiscrimHighDim(LSEProblemWithEdgeLogging): name = "discrim_highdim" threshold = 0.75 bounds = torch.tensor( [ [-1, 1], [-1, 1], [0.5, 1.5], [0.05, 0.15], [0.05, 0.2], [0, 0.9], [0, 3.14 / 2], [0.5, 2], ], dtype=torch.double, ).T def f(self, x: torch.Tensor) -> torch.Tensor: return torch.tensor(discrim_highdim(x), dtype=torch.double) class Hartmann6Binary(LSEProblemWithEdgeLogging): name = "hartmann6_binary" threshold = 0.5 bounds = torch.stack( ( torch.zeros(6, dtype=torch.double), torch.ones(6, dtype=torch.double), ) ) def f(self, X: torch.Tensor) -> torch.Tensor: y = torch.tensor([modified_hartmann6(x) for x in X], dtype=torch.double) f = 3 * y - 2.0 return f class ContrastSensitivity6d(LSEProblemWithEdgeLogging): """ Uses a surrogate model fit to real data from a constrast sensitivity study. """ name = "contrast_sensitivity_6d" threshold = 0.75 bounds = torch.tensor( [[-1.5, 0], [-1.5, 0], [0, 20], [0.5, 7], [1, 10], [0, 10]], dtype=torch.double, ).T def __init__(self): # Load the data self.data = np.loadtxt("data/csf_dataset.csv", delimiter=",", skiprows=1) y = torch.LongTensor(self.data[:, 0]) x = torch.Tensor(self.data[:, 1:]) # Fit a model, with a large number of inducing points self.m = GPClassificationModel( lb=self.bounds[0], ub=self.bounds[1], inducing_size=100, inducing_point_method="kmeans++", ) self.m.fit( x, y, ) def f(self, X: torch.Tensor) -> torch.Tensor: # clamp f to 0 since we expect p(x) to be lower-bounded at 0.5 return torch.clamp(self.m.predict(torch.tensor(X))[0], min=0)
bernoulli_lse-main
problems.py
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. import os import argparse # run each job single-threaded, paralellize using pathos os.environ["OMP_NUM_THREADS"] = "1" os.environ["MKL_NUM_THREADS"] = "1" os.environ["NUMEXPR_NUM_THREADS"] = "1" # multi-socket friendly args os.environ["KMP_AFFINITY"] = "granularity=fine,compact,1,0" os.environ["KMP_BLOCKTIME"] = "1" import torch # force torch to 1 thread too just in case torch.set_num_interop_threads(1) torch.set_num_threads(1) import time from copy import deepcopy from pathlib import Path from aepsych.benchmark import BenchmarkLogger, PathosBenchmark from problems import ( DiscrimLowDim, DiscrimHighDim, Hartmann6Binary, ContrastSensitivity6d, # This takes a few minutes to instantiate due to fitting the model ) problem_map = { "discrim_lowdim": DiscrimLowDim, "discrim_highdim": DiscrimHighDim, "hartmann6_binary": Hartmann6Binary, "contrast_sensitivity_6d": ContrastSensitivity6d, } def make_argparser(): parser = argparse.ArgumentParser(description="Lookahead LSE Benchmarks") parser.add_argument("--nproc", type=int, default=124) parser.add_argument("--reps_per_chunk", type=int, default=20) parser.add_argument("--chunks", type=int, default=15) parser.add_argument("--large_opt_size", type=int, default=740) parser.add_argument("--small_opt_size", type=int, default=490) parser.add_argument("--init_size", type=int, default=10) parser.add_argument("--global_seed", type=int, default=1000) parser.add_argument("--log_frequency", type=int, default=10) parser.add_argument("--output_path", type=Path, default=Path("../data/cameraready")) parser.add_argument( "--problem", type=str, choices=[ "discrim_highdim", "discrim_lowdim", "hartmann6_binary", "contrast_sensitivity_6d", "all", ], default="all", ) return parser if __name__ == "__main__": parser = make_argparser() args = parser.parse_args() out_fname_base = args.output_path out_fname_base.mkdir( parents=True, exist_ok=True ) # make an output folder if not exist if args.problem == "all": problems = [ DiscrimLowDim(), DiscrimHighDim(), Hartmann6Binary(), ContrastSensitivity6d(), ] else: problems = [problem_map[args.problem]()] bench_config = { "common": { "outcome_type": "single_probit", "strategy_names": "[init_strat, opt_strat]", }, "init_strat": {"n_trials": args.init_size, "generator": "SobolGenerator"}, "opt_strat": { "model": "GPClassificationModel", "generator": "OptimizeAcqfGenerator", "refit_every": args.log_frequency, }, "GPClassificationModel": { "inducing_size": 100, "mean_covar_factory": "default_mean_covar_factory", "inducing_point_method": "auto", }, "default_mean_covar_factory": { "fixed_mean": False, "lengthscale_prior": "gamma", "outputscale_prior": "gamma", "kernel": "RBFKernel", }, "OptimizeAcqfGenerator": { "acqf": [ "LocalMI", "MCLevelSetEstimation", # Straddle "LocalSUR", "GlobalMI", "GlobalSUR", "EAVC", "ApproxGlobalSUR", "MCPosteriorVariance", # BALV "BernoulliMCMutualInformation", # BALD ], "restarts": 2, "samps": 100, }, # Add the probit transform for non-probit-specific acqfs "MCLevelSetEstimation": {"objective": "ProbitObjective"}, "BernoulliMCMutualInformation": {"objective": "ProbitObjective"}, "MCPosteriorVariance": {"objective": "ProbitObjective"}, } for chunk in range(args.chunks): for problem in problems: out_fname = Path(f"{out_fname_base}/{problem.name}_chunk{chunk}_out.csv") intermediate_fname = Path( f"{out_fname_base}/{problem.name}_chunk{chunk}_checkpoint.csv" ) print(f"starting {problem.name} benchmark... chunk {chunk} ") local_config = deepcopy(bench_config) local_config["common"]["lb"] = str(problem.lb.tolist()) local_config["common"]["ub"] = str(problem.ub.tolist()) local_config["common"]["target"] = problem.threshold local_config["opt_strat"]["n_trials"] = ( args.small_opt_size if problem.name == "discrim_lowdim" else args.large_opt_size ) logger = BenchmarkLogger(log_every=args.log_frequency) acq_bench = PathosBenchmark( nproc=args.nproc, problem=problem, logger=logger, configs=local_config, global_seed=args.global_seed, n_reps=args.reps_per_chunk, ) sobol_config = deepcopy(local_config) sobol_config["opt_strat"]["generator"] = "SobolGenerator" del sobol_config["OptimizeAcqfGenerator"] sobol_bench = PathosBenchmark( nproc=args.nproc, problem=problem, logger=logger, configs=sobol_config, global_seed=args.global_seed, n_reps=args.reps_per_chunk, ) bench = acq_bench + sobol_bench bench.start_benchmarks() # checkpoint every minute in case something breaks while not bench.is_done: time.sleep(60) collate_start = time.time() print( f"Checkpointing bench {problem} chunk {chunk}..., {len(bench.futures)}/{bench.num_benchmarks} alive" ) bench.collate_benchmarks(wait=False) temp_results = bench.logger.pandas() if len(temp_results) > 0: temp_results["rep"] = ( temp_results["rep"] + args.reps_per_chunk * chunk ) temp_results["problem"] = problem.name temp_results.to_csv(intermediate_fname) print( f"Collate done in {time.time()-collate_start} seconds, {len(bench.futures)}/{bench.num_benchmarks} left" ) print(f"Problem {problem} chunk {chunk} fully done!") final_results = bench.logger.pandas() final_results["rep"] = final_results["rep"] + args.reps_per_chunk * chunk final_results["problem"] = problem.name final_results.to_csv(out_fname)
bernoulli_lse-main
run_experiments.py
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. import os import argparse # run each job single-threaded, paralellize using pathos os.environ["OMP_NUM_THREADS"] = "1" os.environ["MKL_NUM_THREADS"] = "1" os.environ["NUMEXPR_NUM_THREADS"] = "1" # multi-socket friendly args os.environ["KMP_AFFINITY"] = "granularity=fine,compact,1,0" os.environ["KMP_BLOCKTIME"] = "1" import torch # force torch to 1 thread too just in case torch.set_num_interop_threads(1) torch.set_num_threads(1) import time from copy import deepcopy from pathlib import Path from aepsych.benchmark import BenchmarkLogger, PathosBenchmark, combine_benchmarks from problems import ( DiscrimHighDim, Hartmann6Binary, ContrastSensitivity6d, # This takes a few minutes to instantiate due to fitting the model ) chunks = 5 reps_per_chunk = 20 log_frequency = 10 large_opt_size = 750 nproc = 124 global_seed = 1000 if __name__ == "__main__": out_fname_base = Path("../data/thresh_sensitivity") out_fname_base.mkdir( parents=True, exist_ok=True ) # make an output folder if not exist problems = [ DiscrimHighDim(), Hartmann6Binary(), ContrastSensitivity6d(), ] bench_config = { "common": { "outcome_type": "single_probit", "strategy_names": "[init_strat, opt_strat]", "target": [0.5, 0.65, 0.95] }, "init_strat": {"n_trials": 10, "generator": "SobolGenerator"}, "opt_strat": { "n_trials": 740, "model": "GPClassificationModel", "generator": "OptimizeAcqfGenerator", "refit_every": 10, }, "GPClassificationModel": { "inducing_size": 100, "mean_covar_factory": "default_mean_covar_factory", "inducing_point_method": "auto", }, "default_mean_covar_factory": { "fixed_mean": False, "lengthscale_prior": "gamma", "outputscale_prior": "gamma", "kernel": "RBFKernel", }, "OptimizeAcqfGenerator": { "acqf": [ "LocalMI", "GlobalMI", "EAVC", ], "restarts": 2, "samps": 100, }, # Add the probit transform for non-probit-specific acqfs "MCLevelSetEstimation": {"objective": "ProbitObjective"}, "BernoulliMCMutualInformation": {"objective": "ProbitObjective"}, "MCPosteriorVariance": {"objective": "ProbitObjective"}, } for chunk in range(chunks): for problem in problems: out_fname = Path(f"{out_fname_base}/{problem.name}_chunk{chunk}_out.csv") intermediate_fname = Path( f"{out_fname_base}/{problem.name}_chunk{chunk}_checkpoint.csv" ) print(f"starting {problem.name} benchmark... chunk {chunk} ") local_config = deepcopy(bench_config) local_config["common"]["lb"] = str(problem.lb.tolist()) local_config["common"]["ub"] = str(problem.ub.tolist()) logger = BenchmarkLogger(log_every=log_frequency) bench = PathosBenchmark( nproc=nproc, problem=problem, logger=logger, configs=local_config, global_seed=global_seed, n_reps=reps_per_chunk, ) bench.start_benchmarks() # checkpoint every minute in case something breaks while not bench.is_done: time.sleep(60) collate_start = time.time() print( f"Checkpointing bench {problem} chunk {chunk}..., {len(bench.futures)}/{bench.num_benchmarks} alive" ) bench.collate_benchmarks(wait=False) temp_results = bench.logger.pandas() if len(temp_results) > 0: temp_results["rep"] = temp_results["rep"] + reps_per_chunk * chunk temp_results["problem"] = problem.name temp_results.to_csv(intermediate_fname) print( f"Collate done in {time.time()-collate_start} seconds, {len(bench.futures)}/{bench.num_benchmarks} left" ) print(f"Problem {problem} chunk {chunk} fully done!") final_results = bench.logger.pandas() final_results["rep"] = final_results["rep"] + reps_per_chunk * chunk final_results["problem"] = problem.name final_results.to_csv(out_fname)
bernoulli_lse-main
thresh_sensitivity_study.py
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. from pathlib import Path import pandas as pd import numpy as np from plot_config import * run_data = list(Path("../data/gentime_bench/").glob("*out.csv")) import re def make_figure(): alld = [] for f in run_data: dlocal = pd.read_csv(f) dlocal["nthreads"] = re.findall(".*(\d+)threads_out.csv", str(f))[0] alld.append(dlocal) df = pd.concat(alld) df["method"] = df.OptimizeAcqfGenerator_acqf.fillna("Quasi-random").astype( "category" ) df["method"] = df.method.cat.rename_categories( { "MCLevelSetEstimation": "Straddle", "MCPosteriorVariance": "BALV", "BernoulliMCMutualInformation": "BALD", } ) df = df[df.trial_id.isin(([251]))] methods = [ "EAVC", "LocalMI", "Straddle", ] fig = plt.figure(figsize=(6.75, 2.3)) # Plot each problem prob = "hartmann6_binary" ax = fig.add_subplot(131) for method in methods: df_m = df[(df['problem'] == prob) & (df['method'] == method)] res = df_m.groupby('nthreads').agg({'gen_time': ['mean', 'std', 'count']}) res = res.droplevel(axis=1, level=0).reset_index() ymean = res['mean'] yerr = 2 * res['std'] / np.sqrt(res['count']) color, ls = method_styles[method] ax.errorbar(res['nthreads'], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls) ax.plot(res['nthreads'], ymean, lw=1, label=method, color=color, ls=ls) ax.set_ylim([0., 3.2]) ax.set_yticks([0, 1, 2, 3]) ax.grid(alpha=0.1) ax.set_title("Binarized Hartmann6 (6-d)") ax.set_xlabel("Number of threads") ax.set_ylabel("Acquisition wall time (s)") ax.legend(loc="lower left", bbox_to_anchor=(0.9, -0.63), ncol=4) prob = "discrim_lowdim" ax = fig.add_subplot(132) for method in methods: df_m = df[(df['problem'] == prob) & (df['method'] == method)] res = df_m.groupby('nthreads').agg({'gen_time': ['mean', 'std', 'count']}) res = res.droplevel(axis=1, level=0).reset_index() ymean = res['mean'] yerr = 2 * res['std'] / np.sqrt(res['count']) color, ls = method_styles[method] ax.errorbar(res['nthreads'], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls) ax.plot(res['nthreads'], ymean, lw=1, label=method, color=color, ls=ls) ax.set_ylim([0., 3.2]) ax.set_yticks([0, 1, 2, 3]) ax.set_yticklabels([]) ax.grid(alpha=0.1) ax.set_title("Psych. Discrimination (2-d)") ax.set_xlabel("Number of threads") prob = "discrim_highdim" ax = fig.add_subplot(133) for method in methods: df_m = df[(df['problem'] == prob) & (df['method'] == method)] res = df_m.groupby('nthreads').agg({'gen_time': ['mean', 'std', 'count']}) res = res.droplevel(axis=1, level=0).reset_index() ymean = res['mean'] yerr = 2 * res['std'] / np.sqrt(res['count']) color, ls = method_styles[method] ax.errorbar(res['nthreads'], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls) ax.plot(res['nthreads'], ymean, lw=1, label=method, color=color, ls=ls) ax.set_ylim([0., 3.2]) ax.set_yticks([0, 1, 2, 3]) ax.set_yticklabels([]) ax.grid(alpha=0.1) ax.set_title("Psych. Discrimination (8-d)") ax.set_xlabel("Number of threads") fig.subplots_adjust(bottom=0.34, left=0.05, top=0.91, right=0.99, wspace=0.1) plt.savefig("pdfs/gentime_plots.pdf", pad_inches=0) if __name__ == "__main__": make_figure()
bernoulli_lse-main
figures/plot_gentimes.py
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. from matplotlib import pyplot as plt from matplotlib.lines import Line2D from matplotlib import rc import matplotlib rc('font', family='serif', style='normal', variant='normal', weight='normal', stretch='normal', size=8) matplotlib.rcParams['ps.useafm'] = True matplotlib.rcParams['pdf.use14corefonts'] = True matplotlib.rcParams['text.usetex'] = True matplotlib.rcParams['xtick.labelsize'] = 7 matplotlib.rcParams['ytick.labelsize'] = 7 matplotlib.rcParams['axes.titlesize'] = 9 cmap = plt.get_cmap("tab10") method_styles = { 'Straddle': (cmap(0), ':'), 'EAVC': (cmap(1), '-'), 'LocalMI': (cmap(2), '--'), 'GlobalMI': (cmap(3), '-'), 'LocalSUR': (cmap(4), '--'), 'GlobalSUR': (cmap(5), '-'), 'ApproxGlobalSUR': (cmap(6), '-'), 'Quasi-random': (cmap(7), ':'), 'BALD': (cmap(8), ':'), 'BALV': (cmap(9), ':'), } model_to_method_name = { 'MCLevelSetEstimation': 'Straddle', 'EAVC': 'EAVC', 'LocalMI': 'LocalMI', 'GlobalMI': 'GlobalMI', 'LocalSUR': 'LocalSUR', 'GlobalSUR': 'GlobalSUR', 'ApproxGlobalSUR': 'ApproxGlobalSUR', 'random': 'Quasi-random', } mean_covar_config = { "fixed_mean": False, "lengthscale_prior": "gamma", "outputscale_prior": "gamma", "kernel": "RBFKernel", }
bernoulli_lse-main
figures/plot_config.py
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. import numpy as np from contrast_discrimination.helpers import HalfGrating from psychopy import visual, monitors screen = monitors.Monitor("testMonitor", gamma=1) win = visual.Window( allowGUI=True, units="deg", monitor=screen, bpc=(8, 8, 8), size=[300, 300], fullscr=False, ) base_args = { "pedestal": 0, "contrast": 1, "orientation": 0, "temporal_frequency": 0, # no animation so we screenshot "spatial_frequency": 10, "size": 10, "eccentricity": 0, # just plot in the middle so we can screenshot "angle_dist": 0, # not used in synth test function } p100_args = { "pedestal": [-0.5], "orientation": [ 60 ], # not used in the synth test function, but we used it in the plot in the paper "spatial_frequency": [3], "size": [7], } p75_args = { "pedestal": [-1.2], "spatial_frequency": [2], "size": [2.5], } p50_args = { "pedestal": [-1.5], "contrast": [-1.5], } def make_stim_image(args): stim = HalfGrating(**base_args, win=win) stim.update(args) image = stim.get_texture(phase=0, noisy_half="left") bg_color = np.array([stim.pedestal_psychopy_scale] * 3) win.setColor(bg_color) win.color = bg_color win.flip() stim._stim.image = image stim._stim.draw() win.flip() frame = win.getMovieFrame() return frame if __name__ == "__main__": f50 = make_stim_image(p50_args) f50.save("pdfs/p50.png") f75 = make_stim_image(p75_args) f75.save("pdfs/p75.png") f100 = make_stim_image(p100_args) f100.save("pdfs/p100.png")
bernoulli_lse-main
figures/make_stim_plots.py
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. import numpy as np import pandas as pd from copy import deepcopy import sys sys.path.append("..") from plot_config import * from plot_experiment_results import compile_results, run_data def make_classerr_figure(): res, itrs = compile_results(run_data) # Make the plot fig = plt.figure(figsize=(6.75, 2.3)) metric = "class_errors" # Plot each problem prob = "hartmann6_binary" methods = list(method_styles.keys())[:8] ax = fig.add_subplot(131) for method in methods: y = res[metric][prob][method] ymean = np.mean(y, axis=0) yerr = 2 * np.std(y, axis=0) / np.sqrt(y.shape[0]) color, ls = method_styles[method] ax.errorbar(itrs[prob], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls) ax.plot(itrs[prob], ymean, lw=1, label=method, color=color, ls=ls) ax.set_xlim([0, 760]) ax.set_xticks([0, 250, 500, 750]) ax.set_ylim([0.15, 0.45]) # ax.set_yticks([0.2, 0.3, 0.4, 0.5]) ax.grid(alpha=0.1) ax.set_title("Binarized Hartmann6 (6-d)") ax.set_xlabel("Iteration") ax.set_ylabel("Classification Error") ax.legend(loc="lower left", bbox_to_anchor=(0.45, -0.63), ncol=4) prob = "discrim_lowdim" ax = fig.add_subplot(132) for method in methods: y = res[metric][prob][method] ymean = np.mean(y, axis=0) yerr = 2 * np.std(y, axis=0) / np.sqrt(y.shape[0]) color, ls = method_styles[method] ax.errorbar(itrs[prob], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls) ax.plot(itrs[prob], ymean, lw=1, label=method, color=color, ls=ls) ax.set_xlim([0, 510]) ax.set_ylim([0.015, 0.11]) # ax.set_yticks([0.02, 0.04, 0.06, 0.08, 0.1]) ax.set_xticks([0, 100, 200, 300, 400, 500]) ax.grid(alpha=0.1) ax.set_title("Psych. Discrimination (2-d)") ax.set_xlabel("Iteration") prob = "discrim_highdim" ax = fig.add_subplot(133) for method in methods: y = res[metric][prob][method] ymean = np.mean(y, axis=0) yerr = 2 * np.std(y, axis=0) / np.sqrt(y.shape[0]) color, ls = method_styles[method] ax.errorbar(itrs[prob], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls) ax.plot(itrs[prob], ymean, lw=1, label=method, color=color, ls=ls) ax.set_xlim([0, 760]) ax.set_xticks([0, 250, 500, 750]) ax.set_ylim([0.1, 0.42]) # ax.set_yticks([0.1, 0.2, 0.3, 0.4, 0.5]) ax.grid(alpha=0.1) ax.set_title("Psych. Discrimination (8-d)") ax.set_xlabel("Iteration") fig.subplots_adjust(bottom=0.34, left=0.07, top=0.91, right=0.99, wspace=0.2) plt.savefig("pdfs/benchmark_classerr.pdf", pad_inches=0) def make_bald_figure(): res, itrs = compile_results(run_data) # Make the plot fig = plt.figure(figsize=(6.75, 2.3)) # Plot each problem metric = "brier" prob = "hartmann6_binary" ax = fig.add_subplot(131) methods = list(method_styles.keys()) for method in methods: y = res[metric][prob][method] ymean = np.mean(y, axis=0) yerr = 2 * np.std(y, axis=0) / np.sqrt(y.shape[0]) color, ls = method_styles[method] ax.errorbar(itrs[prob], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls) ax.plot(itrs[prob], ymean, lw=1, label=method, color=color, ls=ls) ax.set_xlim([0, 760]) ax.set_xticks([0, 250, 500, 750]) ax.set_ylim([0.15, 0.5]) ax.set_yticks([0.2, 0.3, 0.4, 0.5]) ax.grid(alpha=0.1) ax.set_title("Binarized Hartmann6 (6-d)") ax.set_xlabel("Iteration") ax.set_ylabel("Brier Score") ax.legend(loc="lower left", bbox_to_anchor=(0.22, -0.63), ncol=5) prob = "discrim_lowdim" ax = fig.add_subplot(132) for method in methods: y = res[metric][prob][method] ymean = np.mean(y, axis=0) yerr = 2 * np.std(y, axis=0) / np.sqrt(y.shape[0]) color, ls = method_styles[method] ax.errorbar(itrs[prob], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls) ax.plot(itrs[prob], ymean, lw=1, label=method, color=color, ls=ls) ax.set_xlim([0, 510]) ax.set_ylim([0.01, 0.10]) ax.set_yticks([0.02, 0.04, 0.06, 0.08, 0.1]) ax.set_xticks([0, 100, 200, 300, 400, 500]) ax.grid(alpha=0.1) ax.set_title("Psych. Discrimination (2-d)") ax.set_xlabel("Iteration") prob = "discrim_highdim" ax = fig.add_subplot(133) for method in methods: y = res[metric][prob][method] ymean = np.mean(y, axis=0) yerr = 2 * np.std(y, axis=0) / np.sqrt(y.shape[0]) color, ls = method_styles[method] ax.errorbar(itrs[prob], ymean, yerr=yerr, lw=1, alpha=0.3, color=color, ls=ls) ax.plot(itrs[prob], ymean, lw=1, label=method, color=color, ls=ls) ax.set_xlim([0, 760]) ax.set_xticks([0, 250, 500, 750]) ax.set_ylim([0.1, 1.0]) ax.set_yticks([0.2, 0.4, 0.6, 0.8, 1.0]) ax.grid(alpha=0.1) ax.set_title("Psych. Discrimination (8-d)") ax.set_xlabel("Iteration") fig.subplots_adjust(bottom=0.34, left=0.065, top=0.91, right=0.99, wspace=0.2) plt.savefig("pdfs/benchmarks_bald.pdf", pad_inches=0) if __name__ == "__main__": make_classerr_figure() make_bald_figure()
bernoulli_lse-main
figures/plot_supplement_experiment_results.py
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
bernoulli_lse-main
figures/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. from copy import deepcopy import numpy as np import torch from botorch.utils.sampling import draw_sobol_samples import sys sys.path.append('..') from plot_config import * from problems import DiscrimLowDim from aepsych.models.gp_classification import GPClassificationModel from aepsych.factory.factory import default_mean_covar_factory from aepsych.config import Config from aepsych.acquisition import ( MCLevelSetEstimation, GlobalSUR, GlobalMI, EAVC, LocalMI, LocalSUR, ) def make_figure(): # Generate training data for the model prob = DiscrimLowDim() X = draw_sobol_samples( bounds=torch.tensor(prob.bounds, dtype=torch.double), n=20, q=1, seed=1403 ).squeeze(1) np.random.seed(1403) y = torch.LongTensor([1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1]) ###print(X) ###tensor([[ 0.2829, 0.2585], ###[-0.8400, -0.4620], ###[-0.3771, 0.8218], ###[ 0.9996, -0.8986], ###[ 0.5347, 0.6318], ###[-0.0918, -0.5853], ###[-0.6252, 0.1951], ###[ 0.2478, -0.0219], ###[ 0.0526, 0.9270], ###[-0.5706, -0.8485], ###[-0.1469, 0.4888], ###[ 0.7304, -0.2870], ###[ 0.8047, 0.0576], ###[-0.3227, -0.2292], ###[-0.8948, 0.6194], ###[ 0.4783, -0.6676], ###[ 0.3968, 0.5543], ###[-0.9803, -0.7246], ###[-0.3026, 0.1158], ###[ 0.8207, -0.1633]], dtype=torch.float64) # Fit a model lb, ub = prob.bounds config = deepcopy(mean_covar_config) config["lb"] = str(lb.tolist()) config["ub"] = str(ub.tolist()) mean, covar = default_mean_covar_factory(Config({"default_mean_covar_factory": config})) # Fit a model m = GPClassificationModel(lb=lb, ub=ub, mean_module=mean, covar_module=covar) m.fit(train_x=X, train_y=y) # Create a grid for plotting ngrid = 25 xp = np.linspace(-1, 1, ngrid) yp = np.linspace(-1, 1, ngrid) xv, yv = np.meshgrid(xp, yp) x_plt = torch.tensor(np.vstack((xv.flatten(), yv.flatten())).T) # Make the plot fig = plt.figure(figsize=(6.75, 1.5)) Xrnd = draw_sobol_samples(bounds=prob.bounds, n=512, q=1, seed=1000).squeeze(1) # Evaluate each acquisition fn on x_plt and Xrnd for i, acq in enumerate([ MCLevelSetEstimation, LocalSUR, LocalMI, GlobalSUR, GlobalMI, EAVC, ]): if i == 0: acqf = acq(model=m, target=0.75, beta=3.84) elif i in [3, 4, 5]: acqf = acq(model=m, target=0.75, Xq=Xrnd) else: acqf = acq(model=m, target=0.75) ax = fig.add_subplot(1, 6, i + 1) vals_plt = acqf(x_plt.unsqueeze(1)).detach().numpy() vals_opt = acqf(Xrnd.unsqueeze(1)).detach().numpy() r = vals_plt.max() - vals_plt.min() levels = np.linspace(vals_plt.min() - 0.01 * r, vals_plt.max() + 0.01 * r, 30) ax.contourf(yv, xv, vals_plt.reshape(ngrid, ngrid), alpha=0.2, levels=levels) indx_max = np.argmax(vals_opt) ax.plot(Xrnd[indx_max, 1], Xrnd[indx_max, 0], 'r*', mew=0.5, ms=5, fillstyle='full') ax.set_xlim([-1.08, 1.08]) ax.set_ylim([-1.08, 1.08]) ax.set_title(model_to_method_name[acq.__name__]) ax.set_xticks([-1, 0, 1]) ax.set_yticks([-1, 0, 1]) ax.set_xlabel('$x_1$') if i > 0: ax.set_yticklabels([]) else: ax.set_ylabel('$x_2$') fig.subplots_adjust(wspace=0.08, left=0.058, right=0.995, top=0.87, bottom=0.23) plt.savefig('pdfs/acquisitions.pdf', pad_inches=0) if __name__ == '__main__': make_figure()
bernoulli_lse-main
figures/plot_acquisition.py