python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Optional
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.node import LeafNode
from torch.distributions.gamma import Gamma
from torch.distributions.normal import Normal
class NoiseStandardDeviation:
"""The NoiseStandardDeviation class encapsulates the noise standard deviation.
The variance is parametrized by an inverse-gamma prior which is conjugate to a normal likelihood.
Args:
prior_concentration (float): Also called alpha. Must be greater than zero.
prior_rate (float): Also called beta. Must be greater than 0.
val (float): Current value of noise standard deviation.
"""
def __init__(
self, prior_concentration: float, prior_rate: float, val: Optional[float] = None
):
if prior_concentration <= 0 or prior_rate <= 0:
raise ValueError("Invalid prior hyperparameters")
self.prior_concentration = prior_concentration
self.prior_rate = prior_rate
if val is None:
self.sample(X=torch.Tensor([]), residual=torch.Tensor([])) # prior init
else:
self._val = val
@property
def val(self) -> float:
return self._val
@val.setter
def val(self, val: float):
self._val = val
def sample(self, X: torch.Tensor, residual: torch.Tensor) -> float:
"""Sample from the posterior distribution of sigma.
If empty tensors are passed for X and residual, there will be no update so the sampling will be from the prior.
Note:
This sets the value of the `val` attribute to the drawn sample.
Args:
X: Covariate matrix / training data shape (num_observations, input_dimensions).
residual: The current residual of the model shape (num_observations, 1).
"""
self.val = self._get_sample(X, residual)
return self.val
def _get_sample(self, X: torch.Tensor, residual: torch.Tensor) -> float:
"""
Draw a sample from the posterior.
Args:
X: Covariate matrix / training data of shape (num_observations, input_dimensions).
residual: The current residual of the model of shape (num_observations, 1).
"""
posterior_concentration = self.prior_concentration + (len(X) / 2.0)
posterior_rate = self.prior_rate + (0.5 * (torch.sum(torch.square(residual))))
draw = torch.pow(Gamma(posterior_concentration, posterior_rate).sample(), -0.5)
return draw.item()
class LeafMean:
"""
Class to sample form the prior and posterior distributions of the leaf nodes in BART.
Reference:
[1] Hugh A. Chipman, Edward I. George, Robert E. McCulloch (2010). "BART: Bayesian additive regression trees"
https://projecteuclid.org/journals/annals-of-applied-statistics/volume-4/issue-1/BART-Bayesian-additive-regression-trees/10.1214/09-AOAS285.full
Args:
prior_loc: Prior location parameter.
prior_scale: Prior scale parameter.
"""
def __init__(self, prior_loc: float, prior_scale: float):
if prior_scale < 0:
raise ValueError("Invalid prior hyperparameters")
self._prior_loc = prior_loc
self._prior_scale = prior_scale
@property
def prior_scale(self):
return self._prior_scale
def sample_prior(self):
return Normal(loc=self._prior_loc, scale=self._prior_scale).sample().item()
def sample_posterior(
self,
node: LeafNode,
X: torch.Tensor,
y: torch.Tensor,
current_sigma_val: float,
):
X_in_node, y_in_node = node.data_in_node(X, y)
if len(X_in_node) == 0:
return None # no new data
num_points_in_node = len(X_in_node)
prior_variance = (self._prior_scale) ** 2
likelihood_variance = (current_sigma_val**2) / num_points_in_node
likelihood_mean = torch.sum(y_in_node) / num_points_in_node
posterior_variance = 1.0 / (1.0 / prior_variance + 1.0 / likelihood_variance)
posterior_mean = (
likelihood_mean * prior_variance + self._prior_loc * likelihood_variance
) / (likelihood_variance + prior_variance)
return (
Normal(loc=posterior_mean, scale=math.sqrt(posterior_variance))
.sample()
.item()
)
| beanmachine-main | src/beanmachine/ppl/experimental/causal_inference/models/bart/scalar_samplers.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional, overload, Tuple, Union
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.exceptions import (
PruneError,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.split_rule import (
CompositeRules,
SplitRule,
)
class BaseNode:
"""
Base class for node structures.
Contains reference to a left and right child which can be used to traverse the tree.
Args:
depth (int): Distance of node from root node.
composite_rules (CompositeRules): Dimensional rules that are satisfied by this node.
left_child ("BaseNode"): Left child of the node.
right_child ("BaseNode"): Right child of the node.
"""
def __init__(
self,
depth: int,
composite_rules: CompositeRules,
left_child: Optional["BaseNode"] = None,
right_child: Optional["BaseNode"] = None,
):
""" """
self.depth = depth
self.composite_rules = composite_rules
self._left_child = left_child
self._right_child = right_child
@property
def left_child(self) -> Optional["BaseNode"]:
"""Returns the left_child of the node."""
return self._left_child
@left_child.setter
def left_child(self, left_child: Optional["BaseNode"]):
self._left_child = left_child
@property
def right_child(self) -> Optional["BaseNode"]:
"""Returns the right_child of the node."""
return self._right_child
@right_child.setter
def right_child(self, right_child: Optional["BaseNode"]):
self._right_child = right_child
@overload
def data_in_node(
self, X: torch.Tensor, y: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
...
@overload
def data_in_node(self, X: torch.Tensor) -> torch.Tensor:
...
def data_in_node(
self, X: torch.Tensor, y: Optional[torch.Tensor] = None
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
Conditions the covariate matrix and (optionally) response vector to return the
respective subsets which satisfy the composite rules of this node.
Note that the conditioning only looks at the input / covariate matrix
to determine this conditioning.
Args:
X: Input / covariate matrix.
y: (Optional) response vector.
"""
condition_mask = self.composite_rules.condition_on_rules(X)
if y is not None:
return X[condition_mask], y[condition_mask]
return X[condition_mask]
class LeafNode(BaseNode):
"""
A representation of a leaf node in the tree. Does not have children.
In addition to the normal work of a `BaseNode`, a `LeafNode` is responsible for
making predictions based on its value.
Args:
depth (int): Distance of node from root node.
composite_rules (CompositeRules): Dimensional rules that are satisfied by this node.
val (float): The prediction value of the node.
"""
def __init__(
self,
depth: int,
composite_rules: CompositeRules,
val: float = 0.0,
):
self.val = val
super().__init__(
depth=depth,
composite_rules=composite_rules,
left_child=None,
right_child=None,
)
def predict(self) -> float:
"""
Returns the val attribute as a prediction.
"""
return self.val
def is_growable(self, X: torch.Tensor) -> bool:
"""
Returns true if this leaf node can be grown.
This is checked by ensuring the input covariate matrix
has atleast more than 1 unique values along any dimension.
Args:
X: Input / covariate matrix.
"""
return len(self.get_growable_dims(X)) > 0
def get_growable_dims(self, X: torch.Tensor) -> List[int]:
"""
Returns the list of dimensions along which this leaf node can be gronw.
This is checked by ensuring the input covariate matrix
has more than 1 unique values along any dimension.
Args:
X: Input / covariate matrix.
"""
X_conditioned = self.data_in_node(X)
if len(X_conditioned) == 0:
return []
return [
dim
for dim in range(X_conditioned.shape[-1])
if len(torch.unique(self.get_growable_vals(X_conditioned, dim))) > 1
]
def get_num_growable_dims(self, X: torch.Tensor) -> int:
"""
Returns the number of dimensions along which this leaf node can be grown.
This is checked by ensuring the input covariate matrix
has atleast more than 1 unique values along any dimension.
Args:
X: Input / covariate matrix.
"""
return len(self.get_growable_dims(X))
def get_growable_vals(self, X: torch.Tensor, grow_dim: int) -> torch.Tensor:
"""Returns the values in a feature dimension.
Args:
X: Input / covariate matrix.
grow_dim: Input dimensions along which values are required
"""
return self.data_in_node(X)[:, grow_dim]
def get_partition_of_split(
self, X: torch.Tensor, grow_dim: int, grow_val: float
) -> float:
"""
Get probability that a split value is chosen among possible values in an input dimension defined as
N(values_in_dimension == split_val) / N(values_in_dimension).
Args:
X: Input / covariate matrix.
grow_dim: Input dimensions along which values are required.
grow_va;: The value along which the split is being carried out.
"""
growable_vals = self.get_growable_vals(X, grow_dim)
return torch.mean(
(growable_vals == grow_val).to(torch.float), dtype=torch.float
).item()
@staticmethod
def grow_node(
node: "LeafNode",
left_rule: SplitRule,
right_rule: SplitRule,
) -> "SplitNode":
"""
Converts a LeafNode into an internal SplitNode by applying the split rules for the left and right nodes.
This returns a copy of the oriingal node.
Args:
left_rule: Rule applied to left child of the grown node.
right_rule: Rule applied to the right child of the grown node.
"""
left_composite_rules = node.composite_rules.add_rule(left_rule)
right_composite_rules = node.composite_rules.add_rule(right_rule)
return SplitNode(
depth=node.depth,
composite_rules=node.composite_rules,
left_child=LeafNode(
depth=node.depth + 1, composite_rules=left_composite_rules
),
right_child=LeafNode(
depth=node.depth + 1, composite_rules=right_composite_rules
),
)
class SplitNode(BaseNode):
"""
Encapsulates internal node in the tree. It has the same attributes as BaseNode.
It contains the additional logic to determine if this node can be pruned.
Args:
depth (int): Distance of node from root node.
composite_rules (CompositeRules): Dimensional rules that are satisfied by this node.
left_child ("BaseNode"): Left child of the node.
right_child ("BaseNode"): Right child of the node.
"""
def __init__(
self,
depth: int,
composite_rules: CompositeRules,
left_child: Optional["BaseNode"] = None,
right_child: Optional["BaseNode"] = None,
):
"""
Args:
depth: Distance of node from root node.
composite_rules: Dimensional rules that are satisfied by this node.
left_child: Left child of the node.
right_child: Right child of the node.
"""
super().__init__(
depth=depth,
composite_rules=composite_rules,
left_child=left_child,
right_child=right_child,
)
def is_prunable(self) -> bool:
"""Returns true if this node is prunable. This is decided by the fact if its children are `LeafNodes`."""
return isinstance(self.left_child, LeafNode) and isinstance(
self.right_child, LeafNode
)
def most_recent_rule(self) -> Optional[SplitRule]:
"""Returns the rule which grew this node from a `LeafNode` and is specifically the rule which created its left child."""
if self.left_child is None:
raise AttributeError("This node is not split")
return self.left_child.composite_rules.most_recent_split_rule()
@staticmethod
def prune_node(
node: "SplitNode",
) -> LeafNode:
"""
Converts a SplitNode to a LeafNode by eliminating its children (if they are leaf nodes). Returns a copy.
Args:
node: Node to prune.
Raises:
PruneError: If this node is not prunable.
"""
if not node.is_prunable():
raise PruneError("Not a valid prunable node")
return LeafNode(depth=node.depth, composite_rules=node.composite_rules)
| beanmachine-main | src/beanmachine/ppl/experimental/causal_inference/models/bart/node.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import enum
import math
from typing import Union
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.exceptions import (
GrowError,
PruneError,
TreeStructureError,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.mutation import (
GrowMutation,
Mutation,
PruneMutation,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.node import (
LeafNode,
SplitNode,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.split_rule import (
Operator,
SplitRule,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.tree import Tree
from beanmachine.ppl.experimental.causal_inference.models.bart.tree_proposer import (
TreeProposer,
)
from numpy.random import choice
from torch.distributions.uniform import Uniform
class MutationKind(enum.Enum):
grow = "grow operation"
prune = "prune operation"
class GrowPruneTreeProposer(TreeProposer):
"""This implements the Grow Prune tree sampling approach of Pratola [1] where the additional steps of
BART (Change and Swap) are eliminated for computational efficiency.
Reference:
[1] Pratola MT, Chipman H, Higdon D, McCulloch R, Rust W (2013). “Parallel Bayesian Additive Regression Trees.”
Technical report, University of Chicago.
https://arxiv.org/pdf/1309.1906.pdf
Args:
grow_probability: Probability of growing a node.
"""
def __init__(self, grow_probability: float = 0.5):
if grow_probability > 1.0 or grow_probability < 0.0:
raise ValueError(
f"Grow probability {grow_probability} not a valid probabiity"
)
self.grow_probability = grow_probability
self.prune_probability = 1 - self.grow_probability
self._uniform = Uniform(0.0, 1.0)
def propose(
self,
tree: Tree,
X: torch.Tensor,
partial_residual: torch.Tensor,
alpha: float,
beta: float,
sigma_val: float,
leaf_mean_prior_scale: float,
) -> Tree:
"""Propose a tree based on a Metropolis-Hastings step. Refer to [1] for details.
Reference:
[1] Adam Kapelner & Justin Bleich (2014). "bartMachine: Machine Learning with Bayesian
Additive Regression Trees".
https://arxiv.org/pdf/1312.2171.pdf
Args:
tree: Previous tree.
X: Covariate matrix / training data.
partial_residual: Partial residual of the current tree model with respect to the training data.
alpha: Hyperparameter used in tree prior.
beta: Hyperparameter used in tree prior.
sigma_val: Current estimate of noise standard deviation in the data.
leaf_mean_prior_scale: Prior of the scale hyperparameter in the normal distribution of the leaf mean.
"""
new_mutation = self._get_new_mutation(tree, X)
# carry out move
if new_mutation == MutationKind.grow:
try:
leaf_to_grow = self._select_leaf_to_grow(tree=tree, X=X)
except GrowError:
return self.propose(
tree,
X,
partial_residual,
alpha,
beta,
sigma_val,
leaf_mean_prior_scale,
)
grow_dim = self._select_grow_dim(leaf_to_grow, X)
grow_val = self._get_grow_val(
leaf_to_grow=leaf_to_grow, grow_dim=grow_dim, X=X
)
left_rule, right_rule = SplitRule(
grow_dim=grow_dim, grow_val=grow_val, operator=Operator.le
), SplitRule(grow_dim=grow_dim, grow_val=grow_val, operator=Operator.gt)
mutation = GrowMutation(
old_node=leaf_to_grow,
new_node=LeafNode.grow_node(
leaf_to_grow, left_rule=left_rule, right_rule=right_rule
),
)
elif new_mutation == MutationKind.prune:
try:
split_node_to_prune = self._select_split_node_to_prune(tree)
except PruneError:
return self.propose(
tree,
X,
partial_residual,
alpha,
beta,
sigma_val,
leaf_mean_prior_scale,
)
mutation = PruneMutation(
old_node=split_node_to_prune,
new_node=SplitNode.prune_node(split_node_to_prune),
)
else:
raise TreeStructureError("Can only grow or prune")
# Metropolis-Hasting step
log_draw_probability = (
self._get_log_transition_ratio(
tree=tree,
mutation=mutation,
X=X,
)
+ self._get_log_likelihood_ratio(
mutation=mutation,
X=X,
partial_residual=partial_residual,
sigma_val=sigma_val,
leaf_mean_prior_scale=leaf_mean_prior_scale,
)
+ self._get_log_structure_ratio(
mutation=mutation,
alpha=alpha,
beta=beta,
X=X,
)
)
if self._uniform.sample().item() < math.exp(log_draw_probability):
tree.mutate(mutation)
return tree
return tree
def _get_new_mutation(self, tree: Tree, X: torch.Tensor) -> MutationKind:
"""Get a new mutation.
Args:
tree: Previous tree.
X: Covariate matrix / training data.
"""
if tree.num_nodes() == 1 or tree.num_prunable_split_nodes() == 0:
return MutationKind.grow
if tree.num_growable_leaf_nodes(X) == 0:
return MutationKind.prune
if bool(torch.bernoulli(torch.Tensor([self.grow_probability])).item()):
return MutationKind.grow
return MutationKind.prune
def _select_leaf_to_grow(self, tree: Tree, X: torch.Tensor) -> LeafNode:
"""
Select which leaf to grow.
Args:
tree: Previous tree.
X: Covariate matrix / training data.
"""
growable_leaf_nodes = tree.growable_leaf_nodes(X)
if len(growable_leaf_nodes) < 1:
raise GrowError("Leaf cannot be grown")
return choice(growable_leaf_nodes)
def _select_grow_dim(self, leaf_to_grow: LeafNode, X: torch.Tensor) -> int:
"""
Select an input dimension to grow along.
Args:
tree: Previous tree.
leaf_to_grow: Leaf currently being grown.
X: Covariate matrix / training data.
"""
if not leaf_to_grow.is_growable(X):
raise GrowError("Leaf cannot be grown")
return choice(leaf_to_grow.get_growable_dims(X))
def _get_grow_val(
self, leaf_to_grow: LeafNode, grow_dim: int, X: torch.Tensor
) -> float:
"""
Select a value in the chosen input dimension to grow along.
Args:
tree: Previous tree.
leaf_to_grow: Leaf currently being grown.
grow_dim: Input dimension to grow along.
X: Covariate matrix / training data.
"""
if not leaf_to_grow.is_growable(X):
raise GrowError("Leaf cannot be grown")
growable_vals = leaf_to_grow.get_growable_vals(X, grow_dim)
max_growable_val = torch.max(growable_vals)
candidate_val = choice(growable_vals)
degenerate_grow_condition = candidate_val == max_growable_val
while degenerate_grow_condition:
return choice(growable_vals)
return candidate_val
def _select_split_node_to_prune(self, tree: Tree) -> SplitNode:
"""
Select and internal node to prune.
Args:
tree: Previous tree.
"""
prunable_split_nodes = tree.prunable_split_nodes()
if len(prunable_split_nodes) < 1:
raise PruneError
return choice(prunable_split_nodes)
def _get_log_transition_ratio(
self,
tree: Tree,
mutation: Mutation,
X: torch.Tensor,
) -> float:
"""
Get the log transition ratio as discussed in [1].
[1] Adam Kapelner & Justin Bleich (2014). "bartMachine: Machine Learning with Bayesian
Additive Regression Trees".
https://arxiv.org/pdf/1312.2171.pdf
Args:
tree: Previous tree.
mutation: Proposed mutation,
X: Covariate matrix / training data.
"""
if isinstance(mutation, GrowMutation):
return self._grow_log_transition_ratio(tree=tree, mutation=mutation, X=X)
elif isinstance(mutation, PruneMutation):
return self._prune_log_transition_ratio(tree=tree, mutation=mutation, X=X)
else:
raise TreeStructureError("Can only grow or prune")
def _grow_log_transition_ratio(
self,
tree: Tree,
mutation: GrowMutation,
X: torch.Tensor,
) -> float:
"""
Implement expression for log( P(T -> T*) / P(T* -> T) ) in a GROW move as discussed in eq. 8 of [1]
Reference:
[1] Adam Kapelner & Justin Bleich. "bartMachine: Machine Learning with Bayesian Additive Regression Trees" (2013).
https://arxiv.org/abs/1312.2171
Args:
tree: Previous tree.
mutation: Proposed mutation,
X: Covariate matrix / training data.
"""
log_p_new_to_old_tree = math.log(self.prune_probability) - math.log(
tree.num_prunable_split_nodes() + 1
)
log_p_old_to_new_tree = math.log(
self.grow_probability
) + _log_probability_of_growing_a_tree(
tree=tree,
mutation=mutation,
X=X,
)
return log_p_new_to_old_tree - log_p_old_to_new_tree
def _prune_log_transition_ratio(
self,
tree: Tree,
mutation: PruneMutation,
X: torch.Tensor,
) -> float:
"""
Implement expression for log( P(T -> T*) / P(T* -> T) ) in a PRUNE move as discussed in section A.2 of [1]
Reference:
[1] Adam Kapelner & Justin Bleich. "bartMachine: Machine Learning with Bayesian Additive Regression Trees" (2013).
https://arxiv.org/abs/1312.2171
Args:
tree: Previous tree.
mutation: Proposed mutation,
X: Covariate matrix / training data.
"""
num_growable_leaves_in_pruned_tree = (
tree.num_growable_leaf_nodes(X)
- mutation.old_node.left_child.is_growable(X=X)
- mutation.old_node.right_child.is_growable(X=X)
+ mutation.new_node.is_growable(X=X)
)
if num_growable_leaves_in_pruned_tree == 0:
return -float("inf") # impossible prune
log_p_old_to_new_tree = math.log(self.prune_probability) - math.log(
tree.num_prunable_split_nodes()
)
log_probability_selecting_leaf_to_grow = -math.log(
num_growable_leaves_in_pruned_tree
)
log_probability_growing_leaf = _log_probability_of_growing_node(
mutation=GrowMutation(
old_node=mutation.new_node, new_node=mutation.old_node
),
X=X,
)
log_p_new_to_old_tree = (
math.log(self.grow_probability)
+ log_probability_selecting_leaf_to_grow
+ log_probability_growing_leaf
)
return log_p_new_to_old_tree - log_p_old_to_new_tree
def _get_log_likelihood_ratio(
self,
mutation: Mutation,
X: torch.Tensor,
partial_residual: torch.Tensor,
sigma_val: float,
leaf_mean_prior_scale: float,
) -> float:
"""
Implement expression for log( P(R | T*, sigma) / P(R | T, sigma) ) in a GROW move as discussed in [1]
Reference:
[1] Adam Kapelner & Justin Bleich. "bartMachine: Machine Learning with Bayesian Additive Regression Trees" (2013).
https://arxiv.org/abs/1312.2171
Args:
tree: Previous tree.
mutation: Proposed mutation,
sigma_val:urrent estimate of noise standard deviation in the data.
leaf_mean_prior_scale: Prior of the scale hyperparameter in the normal distribution of the leaf mean.
X: Covariate matrix / training data.
partial_residual: Partial residual of the current tree model with respect to the training data.
"""
if isinstance(mutation, GrowMutation):
return self._grow_log_likelihood_ratio(
mutation=mutation,
sigma_val=sigma_val,
leaf_mean_prior_scale=leaf_mean_prior_scale,
X=X,
partial_residual=partial_residual,
)
elif isinstance(mutation, PruneMutation):
return -self._grow_log_likelihood_ratio(
mutation=GrowMutation(
old_node=mutation.new_node, new_node=mutation.old_node
),
sigma_val=sigma_val,
leaf_mean_prior_scale=leaf_mean_prior_scale,
X=X,
partial_residual=partial_residual,
)
else:
raise TreeStructureError(" Can only grow or prune")
def _grow_log_likelihood_ratio(
self,
mutation: GrowMutation,
sigma_val: float,
leaf_mean_prior_scale: float,
X: torch.Tensor,
partial_residual: torch.Tensor,
) -> float:
"""
Implement expression for log( P(R | T*, sigma) / P(R | T, sigma) ) in a GROW move as discussed in eq. 10 of [1]
Reference:
[1] Adam Kapelner & Justin Bleich. "bartMachine: Machine Learning with Bayesian Additive Regression Trees" (2013).
https://arxiv.org/abs/1312.2171
Args:
tree: Previous tree.
mutation: Proposed mutation,
sigma_val:urrent estimate of noise standard deviation in the data.
leaf_mean_prior_scale: Prior of the scale hyperparameter in the normal distribution of the leaf mean.
X: Covariate matrix / training data.
partial_residual: Partial residual of the current tree model with respect to the training data.
"""
var = sigma_val**2
var_mu = leaf_mean_prior_scale**2
nodes = {
"parent": mutation.old_node,
"left": mutation.new_node.left_child,
"right": mutation.new_node.right_child,
}
y_sum, num_points = {}, {}
for node_label, node in nodes.items():
X_conditioned, y_conditioned = node.data_in_node(X, partial_residual)
y_sum[node_label] = torch.sum(y_conditioned)
num_points[node_label] = len(X_conditioned)
first_term = (var * (var + num_points["parent"] * leaf_mean_prior_scale)) / (
(var + num_points["left"] * var_mu) * (var + num_points["right"] * var_mu)
)
first_term = math.log(math.sqrt(first_term))
left_contribution = torch.square(y_sum["left"]) / (
var + num_points["left"] * leaf_mean_prior_scale
)
right_contribution = torch.square(y_sum["right"]) / (
var + num_points["right"] * leaf_mean_prior_scale
)
parent_contribution = torch.square(y_sum["parent"]) / (
var + num_points["parent"] * leaf_mean_prior_scale
)
second_term = left_contribution + right_contribution - parent_contribution
return first_term + (var_mu / (2 * var)) * second_term.item()
def _get_log_structure_ratio(
self,
mutation: Mutation,
alpha: float,
beta: float,
X: torch.Tensor,
) -> float:
"""
Implement expression for log( P(T*) / P(T) ) in as discussed in [1].
Reference:
[1] Adam Kapelner & Justin Bleich. "bartMachine: Machine Learning with Bayesian Additive Regression Trees" (2013).
https://arxiv.org/abs/1312.2171
Args:
mutation: Proposed mutation,
X: Covariate matrix / training data.
alpha: Hyperparameter used in tree prior.
beta: Hyperparameter used in tree prior.
"""
if isinstance(mutation, GrowMutation):
return self._grow_log_structure_ratio(
mutation=mutation,
alpha=alpha,
beta=beta,
X=X,
)
elif isinstance(mutation, PruneMutation):
return -self._grow_log_structure_ratio(
mutation=GrowMutation(
old_node=mutation.new_node, new_node=mutation.old_node
),
alpha=alpha,
beta=beta,
X=X,
)
else:
raise TreeStructureError("Only grow or prune mutations are allowed")
def _grow_log_structure_ratio(
self,
mutation: GrowMutation,
alpha: float,
beta: float,
X: torch.Tensor,
) -> float:
"""
Implement expression for log( P(T*) / P(T) ) in a GROW step as discussed in section A.1 of [1].
Reference:
[1] Adam Kapelner & Justin Bleich. "bartMachine: Machine Learning with Bayesian Additive Regression Trees" (2013).
https://arxiv.org/abs/1312.2171
Args:
mutation: Proposed mutation,
X: Covariate matrix / training data.
alpha: Hyperparameter used in tree prior.
beta: Hyperparameter used in tree prior.
"""
denominator = _log_probability_node_is_terminal(alpha, beta, mutation.old_node)
log_probability_left_is_terminal = _log_probability_node_is_terminal(
alpha, beta, mutation.new_node.left_child
)
log_probability_right_is_terminal = _log_probability_node_is_terminal(
alpha, beta, mutation.new_node.right_child
)
log_probability_parent_is_nonterminal = _log_probability_node_is_nonterminal(
alpha, beta, mutation.old_node
)
log_probability_rule = _log_probability_of_growing_node(mutation=mutation, X=X)
numerator = (
log_probability_left_is_terminal
+ log_probability_right_is_terminal
+ log_probability_parent_is_nonterminal
+ log_probability_rule
)
return numerator - denominator
def _log_probability_node_is_nonterminal(
alpha: float, beta: float, node: Union[LeafNode, SplitNode]
) -> float:
"""Get log probability of node being non-terminal (internal node) as discussed in Eq. 7 of [1].
Reference:
[1] Hugh A. Chipman, Edward I. George, Robert E. McCulloch (2010). "BART: Bayesian additive regression trees"
https://projecteuclid.org/journals/annals-of-applied-statistics/volume-4/issue-1/BART-Bayesian-additive-regression-trees/10.1214/09-AOAS285.full
Args:
alpha: Hyperparameter used in tree prior.
beta: Hyperparameter used in tree prior.
node: Node for which probability is being calculated.
"""
return math.log(alpha * math.pow(1 + node.depth, -beta))
def _log_probability_node_is_terminal(
alpha: float, beta: float, node: Union[LeafNode, SplitNode]
) -> float:
"""Get log probability of node being terminal (leaf node) as discussed in Eq. 7 of [1].
Reference:
[1] Hugh A. Chipman, Edward I. George, Robert E. McCulloch (2010). "BART: Bayesian additive regression trees"
https://projecteuclid.org/journals/annals-of-applied-statistics/volume-4/issue-1/BART-Bayesian-additive-regression-trees/10.1214/09-AOAS285.full
Args:
alpha: Hyperparameter used in tree prior.
beta: Hyperparameter used in tree prior.
node: Node for which probability is being calculated.
"""
return 1 - _log_probability_node_is_nonterminal(alpha=alpha, beta=beta, node=node)
def _log_probability_of_growing_a_tree(
tree: Tree, mutation: GrowMutation, X: torch.Tensor
) -> float:
"""
Get probability of choosing a node and growing it as discussed in section A.1 of [1].
Reference:
[1] Adam Kapelner & Justin Bleich. "bartMachine: Machine Learning with Bayesian Additive Regression Trees" (2013).
https://arxiv.org/abs/1312.2171
Args:
tree: Previous tree.
mutation: Growth mutation being applied.
X: Covariate matrix / training data.
"""
return -math.log(tree.num_growable_leaf_nodes(X))
+_log_probability_of_growing_node(mutation=mutation, X=X)
def _log_probability_of_growing_node(mutation: GrowMutation, X: torch.Tensor) -> float:
"""
Get probability of growing a node as discussed in section A.1 of [1].
Reference:
[1] Adam Kapelner & Justin Bleich. "bartMachine: Machine Learning with Bayesian Additive Regression Trees" (2013).
https://arxiv.org/abs/1312.2171
Args:
mutation: Growth mutation being applied.
X: Covariate matrix / training data.
"""
log_probability_of_selecting_dim = -math.log(
mutation.old_node.get_num_growable_dims(X)
)
grow_dim = mutation.new_node.most_recent_rule().grow_dim
grow_val = mutation.new_node.most_recent_rule().grow_val
log_probability_of_growing_at_val = -math.log(
mutation.old_node.get_partition_of_split(X, grow_dim, grow_val)
)
return log_probability_of_selecting_dim + log_probability_of_growing_at_val
| beanmachine-main | src/beanmachine/ppl/experimental/causal_inference/models/bart/grow_prune_tree_proposer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import dataclasses
from typing import (
Collection,
Dict,
Iterable,
Iterator,
List,
Mapping,
Optional,
Set,
Tuple,
TypeVar,
)
import torch
import torch.distributions as dist
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world import init_to_uniform
from beanmachine.ppl.world.base_world import BaseWorld
from beanmachine.ppl.world.initialize_fn import init_from_prior, InitializeFn
from beanmachine.ppl.world.variable import Variable
RVDict = Dict[RVIdentifier, torch.Tensor]
T = TypeVar("T", bound="World")
@dataclasses.dataclass
class _TempVar:
node: RVIdentifier
parents: Set[RVIdentifier] = dataclasses.field(default_factory=set)
class World(BaseWorld, Mapping[RVIdentifier, torch.Tensor]):
"""
A World represents an instantiation of the graphical model and can be manipulated or evaluated.
In the context of MCMC inference, a world represents a single Monte Carlo posterior sample.
A World can also be used as a context manager to run and sample random variables.
Example::
@bm.random_variable
def foo():
return Normal(0., 1.)
world = World()
with world:
x = foo() # returns a sample, ie tensor.
with world:
y = foo() # same world = same tensor
assert x == y
Args:
observations (Optional): Optional observations, which fixes the random variables to observed values
initialize_fn (callable, Optional): Callable which takes a ``torch.distribution`` object as argument and returns a ``torch.Tensor``
"""
def __init__(
self,
observations: Optional[RVDict] = None,
initialize_fn: InitializeFn = init_from_prior,
) -> None:
self.observations: RVDict = observations or {}
self._initialize_fn: InitializeFn = initialize_fn
self._variables: Dict[RVIdentifier, Variable] = {}
self._call_stack: List[_TempVar] = []
@classmethod
def initialize_world(
cls: type[T],
queries: Iterable[RVIdentifier],
observations: Optional[RVDict] = None,
initialize_fn: InitializeFn = init_to_uniform,
max_retries: int = 100,
**kwargs,
) -> T:
"""
Initializes a world with all of the random variables (queries and observations).
In case of initializing values outside of support of the distributions, the
method will keep resampling until a valid initialization is found up to
``max_retries`` times.
Args:
queries: A list of random variables that need to be inferred.
observations: Observations, which fixes the random variables to observed values
initialize_fn: Function for initializing the values of random variables
max_retries: The number of attempts this method will make before throwing an
error (default to 100).
"""
observations = observations or {}
for _ in range(max_retries):
world = cls(observations, initialize_fn, **kwargs)
# recursively add parent nodes to the graph
for node in queries:
world.call(node)
for node in observations:
world.call(node)
# check if the initial state is valid
log_prob = world.log_prob()
if not torch.isinf(log_prob) and not torch.isnan(log_prob):
return world
# None of the world gives us a valid initial state
raise ValueError(
f"Cannot find a valid initialization after {max_retries} retries. The model"
" might be misspecified."
)
def __getitem__(self, node: RVIdentifier) -> torch.Tensor:
"""
Args:
node (RVIdentifier): RVIdentifier of node.
Returns:
torch.Tensor: The sampled value.
"""
return self._variables[node].value
def get_variable(self, node: RVIdentifier) -> Variable:
"""
Args:
node (RVIdentifier): RVIdentifier of node.
Returns:
Variable object that contains the metadata of the current node
in the world.
"""
return self._variables[node]
def replace(self, values: RVDict) -> World:
"""
Args:
values (RVDict): Dict of RVIdentifiers and their values to replace.
Returns:
A new world where values specified in the dictionary are replaced.
This method will update the internal graph structure.
"""
assert not any(node in self.observations for node in values)
new_world = self.copy()
for node, value in values.items():
new_world._variables[node] = new_world._variables[node].replace(
value=value.clone()
)
# changing the value of a node can change the dependencies of its children nodes
nodes_to_update = set().union(
*(self._variables[node].children for node in values)
)
for node in nodes_to_update:
# Invoke node conditioned on the provided values
new_distribution, new_parents = new_world._run_node(node)
# Update children's dependencies
old_node_var = new_world._variables[node]
new_world._variables[node] = old_node_var.replace(
parents=new_parents, distribution=new_distribution
)
dropped_parents = old_node_var.parents - new_parents
for parent in dropped_parents:
parent_var = new_world._variables[parent]
new_world._variables[parent] = parent_var.replace(
children=parent_var.children - {node}
)
return new_world
def __iter__(self) -> Iterator[RVIdentifier]:
return iter(self._variables)
def __len__(self) -> int:
return len(self._variables)
@property
def latent_nodes(self) -> Set[RVIdentifier]:
"""
All the latent nodes in the current world.
"""
return self._variables.keys() - self.observations.keys()
def copy(self) -> World:
"""
Returns:
Shallow copy of the current world.
"""
world_copy = World(self.observations.copy(), self._initialize_fn)
world_copy._variables = self._variables.copy()
return world_copy
def initialize_value(self, node: RVIdentifier) -> None:
# recursively calls into parent nodes
distribution, parents = self._run_node(node)
if node in self.observations:
node_val = self.observations[node]
else:
node_val = self._initialize_fn(distribution)
self._variables[node] = Variable(
value=node_val,
distribution=distribution,
parents=parents,
)
def update_graph(self, node: RVIdentifier) -> torch.Tensor:
"""
This function adds a node to the graph and initialize its value if the node
is not found in the graph already.
Args:
node (RVIdentifier): RVIdentifier of node to update in the graph.
Returns:
The value of the node stored in world (in original space).
"""
if node not in self._variables:
self.initialize_value(node)
node_var = self._variables[node]
if len(self._call_stack) > 0:
tmp_child_var = self._call_stack[-1]
tmp_child_var.parents.add(node)
node_var.children.add(tmp_child_var.node)
return node_var.value
def log_prob(
self, nodes: Optional[Collection[RVIdentifier]] = None
) -> torch.Tensor:
"""
Args:
nodes (Optional): Optional collection of RVIdentifiers to evaluate the log prob of a subset of
the graph. If none is specified, then all the variables in the world are used.
Returns:
The joint log prob of all of the nodes in the current world
"""
if nodes is None:
nodes = self._variables.keys()
log_prob = torch.tensor(0.0)
for node in set(nodes):
log_prob = log_prob + torch.sum(self._variables[node].log_prob)
return log_prob
def enumerate_node(self, node: RVIdentifier) -> torch.Tensor:
"""
Args:
node (RVIdentifier): RVIdentifier of node.
Returns:
A tensor enumerating the support of the node.
"""
distribution = self._variables[node].distribution
if not distribution.has_enumerate_support:
raise ValueError(str(node) + " is not enumerable")
return distribution.enumerate_support()
def _run_node(
self, node: RVIdentifier
) -> Tuple[dist.Distribution, Set[RVIdentifier]]:
"""
Invoke a random variable function conditioned on the current world.
Args:
node (RVIdentifier): RVIdentifier of node.
Returns:
Its distribution and set of parent nodes
"""
self._call_stack.append(_TempVar(node))
with self:
distribution = node.function(*node.arguments)
temp_var = self._call_stack.pop()
if not isinstance(distribution, dist.Distribution):
raise TypeError("A random_variable is required to return a distribution.")
return distribution, temp_var.parents
| beanmachine-main | src/beanmachine/ppl/world/world.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[16, 20]
from typing import Callable
import torch
import torch.distributions as dist
InitializeFn = Callable[[dist.Distribution], torch.Tensor]
def init_to_uniform(distribution: dist.Distribution) -> torch.Tensor:
"""
Initializes a uniform distribution to sample from transformed to the
support of ``distribution``. A Categorical is used for discrete distributions,
a bijective transform is used for constrained continuous distributions, and
``distribution`` is used otherwise.
Used as an arg for ``World``
Args:
distribution: ``torch.distribution.Distribution`` of the RV, usually
the prior distribution.
"""
sample_val = distribution.sample()
if distribution.has_enumerate_support:
support = distribution.enumerate_support(expand=False).flatten()
return support[torch.randint_like(sample_val, support.numel()).long()]
elif not distribution.support.is_discrete:
transform = dist.biject_to(distribution.support)
return transform(torch.rand_like(transform.inv(sample_val)) * 4 - 2)
else:
# fall back to sample from prior
return init_from_prior(distribution)
def init_from_prior(distribution: dist.Distribution) -> torch.Tensor:
"""
Samples from the distribution.
Used as an arg for ``World``
Args:
distribution: ``torch.distribution.Distribution`` corresponding to
the distribution to sample from
"""
return distribution.sample()
| beanmachine-main | src/beanmachine/ppl/world/initialize_fn.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from beanmachine.ppl.world.base_world import get_world_context
from beanmachine.ppl.world.initialize_fn import (
init_from_prior,
init_to_uniform,
InitializeFn,
)
from beanmachine.ppl.world.utils import BetaDimensionTransform, get_default_transforms
from beanmachine.ppl.world.world import RVDict, World
__all__ = [
"BetaDimensionTransform",
"InitializeFn",
"RVDict",
"World",
"get_default_transforms",
"get_world_context",
"init_from_prior",
"init_to_uniform",
]
| beanmachine-main | src/beanmachine/ppl/world/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import dataclasses
from typing import Set
import torch
import torch.distributions as dist
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from torch.distributions.utils import lazy_property
@dataclasses.dataclass
class Variable:
"""
Primitive used for maintaining metadata of random variables. Usually used
in conjunction with `World` during inference.
"""
value: torch.Tensor
"Sampled value of random variable"
distribution: dist.Distribution
"Distribution random variable was sampled from"
parents: Set[RVIdentifier] = dataclasses.field(default_factory=set)
"Set containing the RVIdentifiers of the parents of the random variable"
children: Set[RVIdentifier] = dataclasses.field(default_factory=set)
"Set containing the RVIdentifiers of the children of the random variable"
@lazy_property
def log_prob(self) -> torch.Tensor:
"""
Returns
The logprob of the `value` of the value given the distribution.
"""
return self.distribution.log_prob(self.value)
def replace(self, **changes) -> Variable:
"""Return a new Variable object with fields replaced by the changes"""
return dataclasses.replace(self, **changes)
| beanmachine-main | src/beanmachine/ppl/world/variable.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections.abc import Iterable
from typing import Iterable as IterableType, overload, Type, Union
import torch
import torch.distributions as dist
import torch.distributions.constraints as constraints
from torch.distributions import Distribution
from torch.distributions.transforms import Transform
ConstraintType = Union[constraints.Constraint, Type]
class BetaDimensionTransform(Transform):
"""
Volume preserving transformation to the Beta distribution support.
"""
bijective = True
domain = constraints.real
codomain = constraints.real_vector
def __eq__(self, other):
return isinstance(other, BetaDimensionTransform)
def _call(self, x):
return torch.cat((x.unsqueeze(-1), (1 - x).unsqueeze(-1)), -1)
def _inverse(self, y):
return y[..., 0] / y.sum(dim=-1)
def forward_shape(self, shape):
return shape + (2,)
def inverse_shape(self, shape):
return shape[:-1]
def log_abs_det_jacobian(self, x, y):
return torch.zeros_like(x)
def _unwrap(constraint: ConstraintType):
if isinstance(constraint, constraints.independent):
return _unwrap(constraint.base_constraint)
return constraint if isinstance(constraint, type) else constraint.__class__
def _is_constraint_eq(constraint1: ConstraintType, constraint2: ConstraintType):
return _unwrap(constraint1) == _unwrap(constraint2)
@overload
def is_constraint_eq(
constraint: ConstraintType, check_constraints: ConstraintType
) -> bool:
...
@overload
def is_constraint_eq(
constraint: ConstraintType, check_constraints: IterableType[ConstraintType]
) -> IterableType[bool]:
...
def is_constraint_eq(
constraint: ConstraintType,
check_constraints: Union[ConstraintType, IterableType[ConstraintType]],
) -> Union[bool, IterableType[bool]]:
"""
This provides an equality check that works for different constraints
specified in :mod:`torch.distributions.constraints`. If `constraint` is
`constraints.Independent`, then the `base_constraint` is checked. If
`check_constraints` is a single `Constraint` type or instance this
returns a `True` if the given `constraint` matches `check_constraints`.
Otherwise, if `check_constraints` is an iterable, this returns a `bool`
list that represents an element-wise check.
:param constraint: A constraint class or instance.
:param check_constraints: A constraint class or instance or an iterable
containing constraint classes or instances to check against.
:returns: bool (or a list of bool) values indicating if the given constraint
equals the constraint in `check_constraints`.
"""
if isinstance(check_constraints, Iterable):
return [_is_constraint_eq(constraint, c) for c in check_constraints]
return _is_constraint_eq(constraint, check_constraints)
def get_default_transforms(distribution: Distribution) -> dist.Transform:
"""
Get transforms of a distribution to transform it from constrained space
into unconstrained space.
:param distribution: the distribution to check
:returns: a Transform that need to be applied to the distribution
to transform it from constrained space into unconstrained space
"""
if distribution.support.is_discrete:
return dist.transforms.identity_transform
else:
return dist.biject_to(distribution.support).inv
def initialize_value(distribution: Distribution, initialize_from_prior: bool = False):
"""
Initialized the Variable value
:param initialize_from_prior: if true, returns sample from prior
:returns: the value to the set the Variable value to
"""
sample_val = distribution.sample()
if initialize_from_prior:
return sample_val
support = distribution.support
if isinstance(support, dist.constraints.independent):
support = support.base_constraint
if initialize_from_prior:
return sample_val
elif is_constraint_eq(support, dist.constraints.real):
return torch.zeros_like(sample_val)
elif is_constraint_eq(support, dist.constraints.simplex):
value = torch.ones_like(sample_val)
return value / sample_val.shape[-1]
elif is_constraint_eq(support, dist.constraints.greater_than):
return (
torch.ones(
sample_val.shape, dtype=sample_val.dtype, device=sample_val.device
)
+ support.lower_bound
)
elif is_constraint_eq(support, dist.constraints.boolean):
return dist.Bernoulli(torch.ones_like(sample_val) / 2).sample()
elif is_constraint_eq(support, dist.constraints.interval):
lower_bound = torch.ones_like(sample_val) * support.lower_bound
upper_bound = torch.ones_like(sample_val) * support.upper_bound
return dist.Uniform(lower_bound, upper_bound).sample()
elif is_constraint_eq(support, dist.constraints.integer_interval):
integer_interval = support.upper_bound - support.lower_bound
return dist.Categorical(
(torch.ones(integer_interval, device=sample_val.device)).expand(
sample_val.shape + (integer_interval,)
)
).sample()
elif is_constraint_eq(support, dist.constraints.nonnegative_integer):
return (
torch.ones(
sample_val.shape, dtype=sample_val.dtype, device=sample_val.device
)
+ support.lower_bound
)
return sample_val
| beanmachine-main | src/beanmachine/ppl/world/utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from abc import ABCMeta, abstractmethod
from typing import Optional
import torch
from beanmachine.ppl.model.rv_identifier import RVIdentifier
_WORLD_STACK: list[BaseWorld] = []
def get_world_context() -> Optional[BaseWorld]:
return _WORLD_STACK[-1] if _WORLD_STACK else None
class BaseWorld(metaclass=ABCMeta):
def __enter__(self) -> BaseWorld:
"""
This method, together with __exit__, allow us to use world as a context, e.g.
```
with World():
# invoke random variables to update the graph
```
By keeping a stack of context tokens, we can easily nest multiple worlds and
restore the outer context if needed, e.g.
```
world1, world2 = World(), World()
with world1:
# do some graph update specific to world1
with world2:
# update world2
# back to updating world1
```
"""
_WORLD_STACK.append(self)
return self
def __exit__(self, *args) -> None:
_WORLD_STACK.pop()
def call(self, node: RVIdentifier):
"""
A helper function that invokes the random variable and return its value
"""
with self:
return node.wrapper(*node.arguments)
@abstractmethod
def update_graph(self, node: RVIdentifier) -> torch.Tensor:
raise NotImplementedError
| beanmachine-main | src/beanmachine/ppl/world/base_world.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import inspect
from functools import wraps
from typing import Any, Callable, Dict, Tuple
from beanmachine.ppl.utils.item_counter import ItemCounter
from torch import Tensor
def _tuplify(t: Any) -> Any:
if isinstance(t, list):
return tuple(_tuplify(y) for y in t)
return t
# This returns a tuple or value whose shape is the same as the input tensor.
# That is:
#
# tensor(1) --> 1
# tensor([]) --> ()
# tensor([1]) --> (1,)
# tensor([1, 2]) --> (1, 2)
# tensor([[1, 2], [3, 4]]) --> ((1, 2), (3, 4))
#
# and so on
def tensor_to_tuple(t: Tensor) -> Any:
result = _tuplify(t.tolist())
return result
class MemoizationKey:
# It would be nice to just use a tuple (wrapper, args) for the memoization
# key, but tensors can only be compared for equality with torch.equal(t1, t2),
# and tensors do not hash via value equality.
#
# We therefore replace tensors with tuples that contain all the values of the
# tensor. For example, if our arguments are (1, tensor([2, 3]), 4) then our
# new arguments are (1, (2, 3), 4)
wrapper: Callable
arguments: Tuple
hashcode: int
def __init__(self, wrapper: Callable, arguments: Tuple) -> None:
self.arguments = (
wrapper,
tuple(
tensor_to_tuple(a) if isinstance(a, Tensor) else a for a in arguments
),
)
self.wrapper = wrapper
self.hashcode = hash(self.arguments)
def __hash__(self) -> int:
return self.hashcode
def __eq__(self, o) -> bool:
return (
isinstance(o, MemoizationKey)
and self.hashcode == o.hashcode
and self.wrapper == o.wrapper
and self.arguments == o.arguments
)
total_memoized_functions = 0
total_memoized_calls = 0
total_cache_misses = 0
count_calls = False
function_calls = ItemCounter()
def memoizer_report() -> str:
call_report = [
f"{item.__name__}: {count}\n" for (item, count) in function_calls.items.items()
]
return (
f"funcs: {total_memoized_functions} "
+ f"calls: {total_memoized_calls} "
+ f"misses: {total_cache_misses}\n"
+ "".join(call_report)
)
def memoize(f):
"""
Decorator to be used to memoize arbitrary functions.
"""
global total_memoized_functions
total_memoized_functions += 1
cache: Dict[Any, Any] = {}
@wraps(f)
def wrapper(*args):
if count_calls:
global total_memoized_calls
total_memoized_calls += 1
function_calls.add_item(f)
key = MemoizationKey(wrapper, args)
if key not in cache:
global total_cache_misses
total_cache_misses += 1
result = f(*args)
cache[key] = result
return result
return cache[key]
if inspect.ismethod(f):
meth_name = f.__name__ + "_wrapper"
setattr(f.__self__, meth_name, wrapper)
else:
f._wrapper = wrapper
return wrapper
# In Python, how do we memoize a constructor to ensure that instances of
# a class with the same constructor arguments are reference-equal? We could
# put the @memoize attribute on the class, but this leads to many problems.
#
# ASIDE: What problems? And why?
#
# A class is a function that constructs instances; a decorator is a function
# from functions to functions. "@memoize class C: ..." passes the instance-
# construction function to the decorator and assigns the result to C; this means
# that C is no longer a *type*; it is the *function* returned by the decorator.
# This in turn means that "instanceof(c, C)" no longer works because C is not a type.
# Similarly C cannot be a base class because it is not a type. And so on.
#
# END ASIDE
#
# The correct way to do this in Python is to create a metaclass. A class is a factory
# for instances; a metaclass is a factory for classes. We can create a metaclass which
# produces classes that are memoized.
#
# The default metaclass in Python is "type"; if you call "type(name, bases, attrs)"
# where name is the name of the new type, bases is a tuple of base types, and attrs
# is a dictionary of name-value pairs, then you get back a new class with that name,
# base classes, and attributes. We can derive from type to make new metaclasses:
class MemoizedClass(type):
# __new__ is called when the metaclass creates a new class.
# metacls is the "self" of the metaclass
# name is the name of the class we're creating
# bases is a tuple of base types
# attrs is a dictionary of attributes
def __new__(
metacls, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any]
) -> type:
# The memoized values will be stored in a per-class dictionary called
# _cache, so make sure that the attributes dictionary has that.
if "_cache" not in attrs:
attrs["_cache"] = {}
# That's the only special thing we need to do, so defer the actual
# type creation to the "type" metaclass -- our base type.
return super(MemoizedClass, metacls).__new__(metacls, name, bases, attrs)
# A class is a function which constructs instances; when that function is
# called to construct an instance, the __call__ handler is invoked in the
# metaclass. By default type.__call__ simply creates a new instance. We
# can replace that behavior by overriding the __call__ handler to do something
# else.
#
# cls is the class that we are trying to create an instance of; *args is
# the argument list passed to the constructor.
def __call__(cls, *args):
# TODO: We do not collect statistics on memoization use here.
# TODO: We do not canonicalize arguments as the memoizer does above.
if args not in cls._cache:
# This is the first time we've constructed this class with these
# arguments. Defer to the __call__ behavior of "type", which is the
# superclass of this metaclass.
new_instance = super(MemoizedClass, cls).__call__(*args)
cls._cache[args] = new_instance
return new_instance
return cls._cache[args]
# You then use this as
# class Foo(FooBase, metaclass=MemoizedClass): ...
| beanmachine-main | src/beanmachine/ppl/utils/memoize.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""A helper class to give unique names to a set of objects."""
from typing import Any, Callable, Dict, Optional
def make_namer(
namer: Optional[Callable[[Any], str]] = None, prefix: str = ""
) -> Callable[[Any], str]:
if namer is None:
un = UniqueNames(prefix)
return lambda x: un.name(x)
else:
return namer
class UniqueNames(object):
_map: Dict[Any, str]
_prefix: str
def __init__(self, prefix: str = ""):
self._map = {}
self._prefix = prefix
def name(self, o: Any) -> str:
if o.__hash__ is None:
# This can lead to a situation where two objects are given the
# same name; if the object is named, then freed, and then a different
# object is allocated at the same address, the ID will be re-used.
# Ideally, the instance of UniqueNames should be longer-lived than
# any of the named objects.
o = "unhashable " + str(id(o))
if o not in self._map:
self._map[o] = self._prefix + str(len(self._map))
return self._map[o]
| beanmachine-main | src/beanmachine/ppl/utils/unique_name.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""A mutable graph builder"""
from hashlib import md5
from typing import Callable, Dict, Generic, List, Optional, TypeVar
from beanmachine.ppl.utils.dotbuilder import DotBuilder
from beanmachine.ppl.utils.equivalence import partition_by_kernel
from beanmachine.ppl.utils.unique_name import make_namer
# A plate is a collection of nodes and plates.
# A graph is a single plate with a collection of edges.
# That is, in this model of a graph, only the topmost level
# contains the edges; plates contain no edges.
T = TypeVar("T") # Node type
class Plate(Generic[T]):
# Yes, using lists means that we have O(n) removal. But removals are
# rare, the lists are typically short, and lists guarantee that
# the enumeration order is deterministic, which means that we get
# repeatable behavior for testing.
_plates: "List[Plate[T]]"
_parent: "Optional[Plate[T]]"
_graph: "Graph[T]"
_nodes: List[T]
def __init__(self, graph: "Graph[T]", parent: "Optional[Plate[T]]") -> None:
self._plates = []
self._parent = parent
self._graph = graph
self._nodes = []
def with_plate(self) -> "Plate[T]":
"""Add a new Plate to this Plate; returns the new Plate"""
sub = Plate(self._graph, self)
self._plates.append(sub)
return sub
def without_plate(self, sub: "Plate[T]") -> "Plate[T]":
"""Removes a given Plate, and all its Plates, and all its nodes."""
if sub in self._plates:
# Recursively destroy every nested Plate.
# We're going to be modifying a collection as we enumerate it
# so make a copy.
for subsub in sub._plates.copy():
sub.without_plate(subsub)
# Destroy every node.
for node in sub._nodes.copy():
sub.without_node(node)
# Delete the Plate
self._plates.remove(sub)
return self
def with_node(self, node: T) -> "Plate[T]":
"""Adds a new node to the plate, or, if the node is already in the
graph, moves it to this plate. Edges are unaffected by moves."""
if node not in self._nodes:
# Remove the node from its current Plate.
if node in self._graph._nodes:
self._graph._nodes[node]._nodes.remove(node)
# Let the graph know that this node is in this Plate.
self._graph._nodes[node] = self
self._nodes.append(node)
# If this is a new node, set its incoming and outgoing
# edge sets to empty. If it is not a new node, keep
# them the same.
if node not in self._graph._outgoing:
self._graph._outgoing[node] = []
if node not in self._graph._incoming:
self._graph._incoming[node] = []
return self
def without_node(self, node: T) -> "Plate[T]":
if node in self._nodes:
# Remove the node
del self._graph._nodes[node]
self._nodes.remove(node)
# Delete all the edges associated with this node
for o in list(self._graph._outgoing[node]):
self._graph._incoming[o].remove(node)
for i in list(self._graph._incoming[node]):
self._graph._outgoing[i].remove(node)
del self._graph._outgoing[node]
del self._graph._incoming[node]
return self
def with_edge(self, start: T, end: T) -> "Plate[T]":
if start not in self._graph._nodes:
self.with_node(start)
if end not in self._graph._nodes:
self.with_node(end)
self._graph._incoming[end].append(start)
self._graph._outgoing[start].append(end)
return self
class Graph(Generic[T]):
_nodes: Dict[T, Plate[T]]
_outgoing: Dict[T, List[T]]
_incoming: Dict[T, List[T]]
_top: Plate[T]
_to_name: Callable[[T], str]
_to_label: Callable[[T], str]
_to_kernel: Callable[[T], str]
def __init__(
self,
to_name: Optional[Callable[[T], str]] = None,
to_label: Callable[[T], str] = str,
to_kernel: Callable[[T], str] = str,
):
# to_name gives a *unique* name to a node.
# to_label gives a *not necessarily unique* label when *displaying* a graph.
# to_kernel gives a string that is always the same if two nodes are to
# be treated as isomorphic. This lets us make labels in the output that
# are different than the isomorphism kernel.
self._nodes = {}
self._outgoing = {}
self._incoming = {}
self._top = Plate(self, None)
self._to_name = make_namer(to_name, "N")
self._to_label = to_label
self._to_kernel = to_kernel
def with_plate(self) -> "Plate[T]":
"""Add a plate to the top level; returns the plate"""
return self._top.with_plate()
def without_plate(self, sub: Plate[T]) -> "Graph[T]":
"""Removes a plate from the top level, and all its plates, and
all its nodes."""
self._top.without_plate(sub)
return self
def global_without_plate(self, sub: Plate[T]) -> "Graph[T]":
"""Remove a plate no matter where it is, and all its plates,
and all its nodes."""
if sub._graph == self:
p = sub._parent
if p is not None: # This should never happen
p.without_plate(sub)
return self
def with_node(self, node: T) -> "Graph[T]":
"""Add a node to the top level"""
self._top.with_node(node)
return self
def without_node(self, node: T) -> "Graph[T]":
"""Remove a node from the top level"""
self._top.without_node(node)
return self
def global_without_node(self, node: T) -> "Graph[T]":
"""Remove a node no matter where it is"""
if node in self._nodes:
self._nodes[node].without_node(node)
return self
def with_edge(self, start: T, end: T) -> "Graph[T]":
if start not in self._nodes:
self.with_node(start)
if end not in self._nodes:
self.with_node(end)
if start not in self._incoming[end]:
self._incoming[end].append(start)
if end not in self._outgoing[start]:
self._outgoing[start].append(end)
return self
def without_edge(self, start: T, end: T) -> "Graph[T]":
if start in self._nodes and end in self._nodes:
self._incoming[end].remove(start)
self._outgoing[start].remove(end)
return self
def _is_dag(self, node: T) -> bool:
if node not in self._nodes:
return True
in_flight: List[T] = []
done: List[T] = []
def depth_first(current: T) -> bool:
if current in in_flight:
return False
if current in done:
return True
in_flight.append(current)
for child in self._outgoing[current]:
if not depth_first(child):
return False
in_flight.remove(current)
done.append(current)
return True
return depth_first(node)
def _dag_hash(self, current: T, map: Dict[T, str]) -> str:
if current in map:
return map[current]
label = self._to_kernel(current)
children = (self._dag_hash(c, map) for c in self._outgoing[current])
summary = label + "/".join(sorted(children))
hash = md5(summary.encode("utf-8")).hexdigest()
map[current] = hash
return hash
def are_dags_isomorphic(self, n1: T, n2: T) -> bool:
"""Determines if two nodes in a graph, which must both be roots of a DAG,
are isomorphic. Node labels are given by the function, which must return the
same string for two nodes iff the two nodes are value-equal for the purposes of
isomorphism detection."""
map: Dict[T, str] = {}
assert self._is_dag(n1)
assert self._is_dag(n2)
h1 = self._dag_hash(n1, map)
h2 = self._dag_hash(n2, map)
return h1 == h2
def merge_isomorphic(self, n1: T, n2: T) -> bool:
"""Merges two isomorphic nodes.
Returns true if there was any merge made."""
# All edges of n2 become edges of n1, and n2 is deleted.
if n1 not in self._nodes or n2 not in self._nodes:
return False
for in_n2 in self._incoming[n2]:
self.with_edge(in_n2, n1)
for out_n2 in self._outgoing[n2]:
self.with_edge(n1, out_n2)
self.without_node(n2)
return True
def merge_isomorphic_many(self, nodes: List[T]) -> bool:
"""Merges a collection of two or more isomorphic nodes into nodes[0]
Returns true if there was any merge made."""
result = False
for i in range(1, len(nodes)):
result = self.merge_isomorphic(nodes[0], nodes[i]) or result
return result
def merge_isomorphic_children(self, node: T) -> bool:
"""Merges all the isomorphic children of a node.
Returns true if there was any merge made.
The surviving node is the one with the least name."""
if node not in self._outgoing:
return False
map: Dict[T, str] = {}
def kernel(n: T) -> str:
return self._dag_hash(n, map)
equivalence_classes = partition_by_kernel(self._outgoing[node], kernel)
result = False
for eqv in equivalence_classes:
result = (
self.merge_isomorphic_many(sorted(eqv, key=self._to_name)) or result
)
return result
def outgoing(self, node: T) -> List[T]:
if node in self._outgoing:
return list(self._outgoing[node])
return []
def incoming(self, node: T) -> List[T]:
if node in self._incoming:
return list(self._incoming[node])
return []
def reachable(self, node: T) -> List[T]:
# Given a node in a graph, return the transitive closure of outgoing
# nodes, including the original node.
if node not in self._nodes:
return []
in_flight: List[T] = []
done: List[T] = []
def depth_first(current: T) -> None:
if (current not in in_flight) and (current not in done):
in_flight.append(current)
for child in self._outgoing[current]:
depth_first(child)
in_flight.remove(current)
done.append(current)
depth_first(node)
return done
def to_dot(self) -> str:
"""Converts a graph to a program in the DOT language."""
db: DotBuilder = DotBuilder()
def add_nodes(sub: Plate[T], name: str) -> None:
if name != "":
db.start_subgraph(name, True)
namer = make_namer(prefix=name + "_")
for subsub in sub._plates:
add_nodes(subsub, namer(subsub))
for n in sub._nodes:
db.with_node(self._to_name(n), self._to_label(n))
if name != "":
db.end_subgraph()
add_nodes(self._top, "")
for start, ends in self._outgoing.items():
for end in ends:
db.with_edge(self._to_name(start), self._to_name(end))
return str(db)
| beanmachine-main | src/beanmachine/ppl/utils/graph.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from beanmachine.ppl.utils.dotbuilder import DotBuilder, print_graph
from beanmachine.ppl.utils.equivalence import partition_by_kernel, partition_by_relation
from beanmachine.ppl.utils.treeprinter import _is_named_tuple, _to_string, print_tree
from beanmachine.ppl.utils.unique_name import make_namer
__all__ = [
"print_tree",
"_is_named_tuple",
"_to_string",
"partition_by_relation",
"partition_by_kernel",
"print_graph",
"DotBuilder",
"make_namer",
]
| beanmachine-main | src/beanmachine/ppl/utils/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""A builder for the graphviz DOT language"""
import json
import re
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
from beanmachine.ppl.utils.treeprinter import _is_named_tuple, _to_string
from beanmachine.ppl.utils.unique_name import make_namer
def _get_children(n: Any) -> List[Tuple[str, Any]]:
if isinstance(n, dict):
return list(n.items())
if _is_named_tuple(n):
return [(k, getattr(n, k)) for k in type(n)._fields]
if isinstance(n, tuple) or isinstance(n, list):
return [(str(ind), item) for (ind, item) in enumerate(n)]
return []
def print_graph(
roots: List[Any],
get_children: Callable[[Any], List[Tuple[str, Any]]] = _get_children,
to_node_name: Optional[Callable[[Any], str]] = None,
to_label: Callable[[Any], str] = _to_string,
) -> str:
"""
This function converts an object representing a graph into a string
in the DOT graph display language.
The roots are a set of nodes in the graph; the final graph description will
contain the transitive closure of the children of all roots.
get_children returns a list of (edge_label, node) pairs; if no argument
is supplied then a default function that can handle lists, tuples and
dictionaries is used.
to_node_name returns a *unique* string used to identify the node in the
graph.
to_label gives a *not necessarily unique* label for a node in a graph.
Again if not supplied, a default that can handle dictionaries, lists and
tuples is used.
"""
tnn = make_namer(to_node_name, "N")
builder: DotBuilder = DotBuilder()
stack: List[Any] = []
stack.extend(roots)
done: Set[str] = set()
for root in roots:
builder.with_node(tnn(root), to_label(root))
while len(stack) > 0:
current = stack.pop()
current_node = tnn(current)
if current_node not in done:
for (edge_label, child) in get_children(current):
child_node = tnn(child)
builder.with_node(child_node, to_label(child))
builder.with_edge(current_node, child_node, edge_label)
stack.append(child)
done.add(current_node)
return str(builder)
class DotBuilder:
name: str
is_subgraph: bool
is_cluster: bool
_label: str
_node_map: "Dict[str, DotNode]"
_edges: "Set[DotEdge]"
_comments: List[str]
_subgraphs: "List[DotBuilder]"
_nodes: "List[DotNode]"
_current_subgraph: "Optional[DotBuilder]"
def __init__(
self, name: str = "graph", is_subgraph: bool = False, is_cluster: bool = False
):
self.name = name
self.is_subgraph = is_subgraph
self.is_cluster = is_cluster
self._label = ""
self._node_map = {}
self._edges = set()
self._comments = []
self._subgraphs = []
self._nodes = []
self._current_subgraph = None
def with_label(self, label: str) -> "DotBuilder":
sg = self._current_subgraph
if sg is None:
self._label = label
else:
sg.with_label(label)
return self
def start_subgraph(self, name: str, is_cluster: bool) -> "DotBuilder":
sg = self._current_subgraph
if sg is None:
csg = DotBuilder(name, True, is_cluster)
self._current_subgraph = csg
self._subgraphs.append(csg)
else:
sg.start_subgraph(name, is_cluster)
return self
def end_subgraph(self) -> "DotBuilder":
sg = self._current_subgraph
if sg is None:
raise ValueError("Cannot end a non-existing subgraph.")
elif sg._current_subgraph is None:
self._current_subgraph = None
else:
sg.end_subgraph()
return self
def _get_node(self, name: str) -> "DotNode":
if name in self._node_map:
return self._node_map[name]
new_node = DotNode(name, "", "")
self._node_map[name] = new_node
self._nodes.append(new_node)
return new_node
def with_comment(self, comment: str) -> "DotBuilder":
sg = self._current_subgraph
if sg is None:
self._comments.append(comment)
else:
sg.with_comment(comment)
return self
def with_node(self, name: str, label: str, color: str = "") -> "DotBuilder":
sg = self._current_subgraph
if sg is None:
n = self._get_node(name)
n.label = label
n.color = color
else:
sg.with_node(name, label, color)
return self
def with_edge(
self,
frm: str,
to: str,
label: str = "",
color: str = "",
constrained: bool = True,
) -> "DotBuilder":
sg = self._current_subgraph
if sg is None:
f = self._get_node(frm)
t = self._get_node(to)
self._edges.add(DotEdge(f, t, label, color, constrained))
else:
sg.with_edge(frm, to, label, color, constrained)
return self
def _to_string(self, indent: str, sb: List[str]) -> List[str]:
new_indent = indent + " "
sb.append(indent)
sb.append("subgraph" if self.is_subgraph else "digraph")
i = ""
has_name = len(self.name) > 0
if has_name and self.is_cluster:
i = smart_quote("cluster_" + self.name)
elif has_name:
i = smart_quote(self.name)
elif self.is_cluster:
i = "cluster"
if len(i) > 0:
sb.append(" " + i)
sb.append(" {\n")
for c in self._comments:
sb.append(new_indent + "// " + c + "\n")
if len(self._label) > 0:
sb.append(new_indent + "label=" + smart_quote(self._label) + "\n")
nodes = sorted(new_indent + str(n) + "\n" for n in self._nodes)
sb.extend(nodes)
edges = sorted(new_indent + str(e) + "\n" for e in self._edges)
sb.extend(edges)
for db in self._subgraphs:
sb = db._to_string(new_indent, sb)
sb.append(indent + "}\n")
return sb
def __str__(self):
return "".join(self._to_string("", []))
class DotNode:
name: str
label: str
color: str
def __init__(self, name: str, label: str, color: str):
self.name = name
self.label = label
self.color = color
def __str__(self) -> str:
props: List[str] = []
if len(self.label) != 0 and self.label != self.name:
props.append("label=" + smart_quote(self.label))
if len(self.color) != 0:
props.append("color=" + smart_quote(self.label))
p = "" if len(props) == 0 else "[" + " ".join(props) + "]"
return smart_quote(self.name) + p + ";"
class DotEdge:
frm: DotNode
to: DotNode
label: str
color: str
constrained: bool
def __init__(
self, frm: DotNode, to: DotNode, label: str, color: str, constrained: bool
):
self.frm = frm
self.to = to
self.label = label
self.color = color
self.constrained = constrained
def __str__(self) -> str:
props: List[str] = []
if len(self.label) != 0:
props.append("label=" + smart_quote(self.label))
if len(self.color) != 0:
props.append("color=" + smart_quote(self.label))
if not self.constrained:
props.append("constraint=false")
p = "" if len(props) == 0 else "[" + " ".join(props) + "]"
return smart_quote(self.frm.name) + " -> " + smart_quote(self.to.name) + p + ";"
_keywords: List[str] = ["digraph", "edge", "graph", "node", "strict", "subgraph"]
_alphanum = re.compile("^[A-Za-z_][A-Za-z_0-9]*$")
_numeric = re.compile("^[-]?(\\.[0-9]+|[0-9]+(\\.[0-9]*)?)$")
def smart_quote(s: str) -> str:
if s is None or len(s) == 0:
return '""'
if s.lower() in _keywords:
return json.dumps(s)
if _alphanum.match(s):
return s
if _numeric.match(s):
return s
return json.dumps(s)
| beanmachine-main | src/beanmachine/ppl/utils/dotbuilder.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
def _first_word(s: str) -> str:
r = re.search("\\w+", s)
return r.group(0) if r else ""
_always_a = {"uniform"}
_always_an = {"18"}
_vowel_sounds = "aeiouxAEIOUX8"
def use_an(s: str) -> bool:
w = _first_word(s)
if len(w) == 0:
return False
if any(w.startswith(prefix) for prefix in _always_a):
return False
if any(w.startswith(prefix) for prefix in _always_an):
return True
return w[0] in _vowel_sounds
def a_or_an(s: str) -> str:
return "an " + s if use_an(s) else "a " + s
def A_or_An(s: str) -> str:
return "An " + s if use_an(s) else "A " + s
| beanmachine-main | src/beanmachine/ppl/utils/a_or_an.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Defines print_tree, a helper function to render Python objects as trees."""
from typing import Any, Callable, List
def _is_named_tuple_type(t: type) -> bool:
if not isinstance(getattr(t, "_fields", None), tuple):
return False
if len(t.__bases__) == 1 and t.__bases__[0] == tuple:
return True
return any(_is_named_tuple_type(b) for b in t.__bases__)
def _is_named_tuple(x: Any) -> bool:
return _is_named_tuple_type(type(x))
def _get_children_key_value(v: Any) -> List[Any]:
if isinstance(v, dict):
return list(v.items())
if isinstance(v, list):
return v
if _is_named_tuple(v):
return [(k, getattr(v, k)) for k in type(v)._fields]
if isinstance(v, tuple):
return list(v)
return [v]
def _get_children(n: Any) -> List[Any]:
if isinstance(n, dict):
return list(n.items())
if isinstance(n, list):
return n
if _is_named_tuple(n):
return [(k, getattr(n, k)) for k in type(n)._fields]
# for key-value pairs we do not want subtypes of tuple, just tuple.
if type(n) == tuple and len(n) == 2:
return _get_children_key_value(n[1])
if isinstance(n, tuple):
return list(n)
return []
def _to_string(n: Any) -> str:
if isinstance(n, dict):
return "dict"
if isinstance(n, list):
return "list"
# for key-value pairs we do not want subtypes of tuple, just tuple.
if type(n) == tuple and len(n) == 2:
return str(n[0])
if _is_named_tuple(n):
return type(n).__name__
if isinstance(n, tuple):
return "tuple"
return str(n)
def print_tree(
root: Any,
get_children: Callable[[Any], List[Any]] = _get_children,
to_string: Callable[[Any], str] = _to_string,
unicode: bool = True,
) -> str:
"""
Renders an arbitrary Python object as a tree. This is handy for debugging.
If you have a specific tree structure imposed on an object, you can pass
in your own get_children method; if omitted, a function that handles Python
dictionaries, tuples, named tuples and lists is the default.
The text of each node is determined by the to_string argument; if omitted
a default function is used.
The tree produced uses the Unicode box-drawing characters by default; to
use straight ASCII characters, pass False for the unicode parameter.
"""
def pt(node, indent):
builder.append(to_string(node))
builder.append("\n")
children = get_children(node)
for i in range(len(children)):
last = i == len(children) - 1
child = children[i]
builder.append(indent)
builder.append(el if last else tee)
builder.append(dash)
pt(child, indent + (" " if last else bar) + " ")
el = "\u2514" if unicode else "+"
tee = "\u251c" if unicode else "+"
dash = "\u2500" if unicode else "-"
bar = "\u2502" if unicode else "|"
builder = []
pt(root, "")
return "".join(builder)
| beanmachine-main | src/beanmachine/ppl/utils/treeprinter.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This is just a little wrapper class around a dictionary for quickly
# and easily counting how many of each item you've got.
from typing import Any, Dict
class ItemCounter:
items: Dict[Any, int]
def __init__(self) -> None:
self.items = {}
def add_item(self, item: Any) -> None:
if item not in self.items:
self.items[item] = 1
else:
self.items[item] = self.items[item] + 1
def remove_item(self, item: Any) -> None:
if item not in self.items:
return
count = self.items[item] - 1
if count == 0:
del self.items[item]
else:
assert count > 0
self.items[item] = count
| beanmachine-main | src/beanmachine/ppl/utils/item_counter.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple
import torch
import torch.autograd
from torch._vmap_internals import _vmap as vmap
def gradients(
outputs: torch.Tensor, inputs: torch.Tensor, allow_unused: bool = True
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Compute the first and the second gradient of the output Tensor
w.r.t. the input Tensor.
:param output: A Tensor variable with a single element.
:param input: A 1-d tensor input variable that was used to compute the
output. Note: the input must have requires_grad=True
:returns: tuple of Tensor variables -- The first and the second gradient.
"""
if outputs.numel() != 1:
raise ValueError(
f"output tensor must have exactly one element, got {outputs.numel()}"
)
grad1 = torch.autograd.grad(
outputs, inputs, create_graph=True, retain_graph=True, allow_unused=allow_unused
)[0].reshape(-1)
# using identity matrix to reconstruct the full hessian from vector-Jacobian product
hessians = vmap(
lambda vec: torch.autograd.grad(
grad1,
inputs,
vec,
create_graph=True,
retain_graph=True,
allow_unused=allow_unused,
)[0].reshape(-1)
)(torch.eye(grad1.size(0)))
return grad1.detach(), hessians.detach()
def halfspace_gradients(
outputs: torch.Tensor, inputs: torch.Tensor, allow_unused: bool = True
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Compute the first and the second gradient of the output Tensor w.r.t. the input
Tensor for half space.
:param output: A Tensor variable with a single element.
:param input: A 1-d tensor input variable that was used to compute the
output. Note: the input must have requires_grad=True
:returns: tuple of Tensor variables -- The first and the second gradient.
"""
grad1, hessians = gradients(outputs, inputs, allow_unused)
return grad1, torch.diagonal(hessians)
def simplex_gradients(
outputs: torch.Tensor, inputs: torch.Tensor, allow_unused: bool = True
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Compute the first and the second gradient of the output Tensor w.r.t. the input
Tensor for simplex.
:param output: A Tensor variable with a single element.
:param input: A 1-d tensor input variable that was used to compute the
output. Note: the input must have requires_grad=True
:returns: tuple of Tensor variables -- The first and the second gradient.
"""
grad1, hessians = gradients(outputs, inputs, allow_unused)
hessian_diag = torch.diagonal(hessians).clone()
# mask diagonal entries
hessians[torch.eye(hessians.size(0)).bool()] = float("-inf")
hessian_diag -= hessians.max(dim=0)[0]
return grad1, hessian_diag
| beanmachine-main | src/beanmachine/ppl/utils/tensorops.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Defines partition, a helper function to partition a set into
equivalence classes by an equivalence relation."""
from collections import defaultdict
from typing import Callable, Iterable, List, Set, TypeVar
_T = TypeVar("T")
_K = TypeVar("K")
def partition_by_relation(
items: Iterable[_T], relation: Callable[[_T, _T], bool]
) -> List[Set[_T]]:
# This is a quadratic algorithm, but n is likely to be small.
result = []
for item in items:
eqv = next(filter((lambda s: relation(next(iter(s)), item)), result), None)
if eqv is None:
eqv = set()
result.append(eqv)
eqv.add(item)
return result
def partition_by_kernel(
items: Iterable[_T], kernel: Callable[[_T], _K]
) -> List[Set[_T]]:
d = defaultdict(set)
for item in items:
d[kernel(item)].add(item)
return list(d.values())
| beanmachine-main | src/beanmachine/ppl/utils/equivalence.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
from typing import Set, Tuple
from beanmachine.ppl.utils.memoize import tensor_to_tuple
from torch import Tensor, tensor
# When constructing the support of various nodes we often
# must remove duplicates from a set of possible values.
# Unfortunately, it is not easy to do so with torch tensors.
# This helper class implements a set of tensors using the same
# technique as is used in the function call memoizer: we encode
# the data in the tensor into a tuple with the same shape. The
# tuple implements hashing and equality correctly, so we can put
# it in a set.
class SetOfTensors(collections.abc.Set):
_elements: Set[Tuple]
def __init__(self, iterable):
self._elements = set()
for value in iterable:
t = value if isinstance(value, Tensor) else tensor(value)
self._elements.add(tensor_to_tuple(t))
def __iter__(self):
return (tensor(t) for t in self._elements)
def __contains__(self, value):
t = value if isinstance(value, Tensor) else tensor(value)
return tensor_to_tuple(t) in self._elements
def __len__(self):
return len(self._elements)
def __str__(self):
return "\n".join(sorted(str(t) for t in self))
| beanmachine-main | src/beanmachine/ppl/utils/set_of_tensors.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Set
class MultiDictionary:
"""A simple append-only multidictionary; values are deduplicated
and must be hashable."""
_d: Dict[Any, Set[Any]]
def __init__(self) -> None:
self._d = {}
def add(self, key: Any, value: Any) -> None:
if key not in self._d:
self._d[key] = {value}
else:
self._d[key].add(value)
def __getitem__(self, key: Any) -> Set[Any]:
return self._d[key] if key in self else set()
def __iter__(self):
return iter(self._d)
def __len__(self):
return len(self._d)
def __contains__(self, key: Any):
return key in self._d
def keys(self):
return self._d.keys()
def items(self):
return self._d.items()
def __repr__(self) -> str:
return (
"{"
+ "\n".join(
str(key) + ":{" + ",\n".join(sorted(str(v) for v in self[key])) + "}"
for key in self
)
+ "}"
)
| beanmachine-main | src/beanmachine/ppl/utils/multidictionary.py |
beanmachine-main | src/beanmachine/ppl/examples/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import torch.distributions as dist
from torch import Tensor
class BetaBinomialModel:
"""This Bean Machine model is an example of conjugacy, where
the prior and the likelihood are the Beta and the Binomial
distributions respectively. Conjugacy means the posterior
will also be in the same family as the prior, Beta.
The random variable names theta and x follow the
typical presentation of the conjugate prior relation in the
form of p(theta|x) = p(x|theta) * p(theta)/p(x).
Note: Variable names here follow those used on:
https://en.wikipedia.org/wiki/Conjugate_prior
"""
def __init__(self, alpha: Tensor, beta: Tensor, n: Tensor) -> None:
self.alpha_ = alpha
self.beta_ = beta
self.n_ = n
@bm.random_variable
def theta(self) -> dist.Distribution:
return dist.Beta(self.alpha_, self.beta_)
@bm.random_variable
def x(self) -> dist.Distribution:
return dist.Binomial(self.n_, self.theta())
| beanmachine-main | src/beanmachine/ppl/examples/conjugate_models/beta_binomial.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from beanmachine.ppl.examples.conjugate_models.beta_binomial import BetaBinomialModel
from beanmachine.ppl.examples.conjugate_models.categorical_dirichlet import (
CategoricalDirichletModel,
)
from beanmachine.ppl.examples.conjugate_models.gamma_gamma import GammaGammaModel
from beanmachine.ppl.examples.conjugate_models.gamma_normal import GammaNormalModel
from beanmachine.ppl.examples.conjugate_models.normal_normal import NormalNormalModel
__all__ = [
"BetaBinomialModel",
"CategoricalDirichletModel",
"GammaGammaModel",
"GammaNormalModel",
"NormalNormalModel",
]
| beanmachine-main | src/beanmachine/ppl/examples/conjugate_models/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import torch.distributions as dist
from torch import Tensor
class CategoricalDirichletModel:
def __init__(self, alpha: Tensor) -> None:
self.alpha_ = alpha
@bm.random_variable
def dirichlet(self) -> dist.Distribution:
return dist.Dirichlet(self.alpha_)
@bm.random_variable
def categorical(self) -> dist.Distribution:
return dist.Categorical(self.dirichlet())
| beanmachine-main | src/beanmachine/ppl/examples/conjugate_models/categorical_dirichlet.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import torch.distributions as dist
from torch import Tensor
class NormalNormalModel:
def __init__(self, mu: Tensor, std: Tensor, sigma: Tensor) -> None:
self.mu_ = mu
self.std_ = std
self.sigma_ = sigma
@bm.random_variable
def normal_p(self) -> dist.Distribution:
return dist.Normal(self.mu_, self.std_)
@bm.random_variable
def normal(self) -> dist.Distribution:
return dist.Normal(self.normal_p(), self.sigma_)
| beanmachine-main | src/beanmachine/ppl/examples/conjugate_models/normal_normal.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import torch.distributions as dist
from torch import Tensor
class BetaBernoulliModel:
def __init__(self, alpha: Tensor, beta: Tensor) -> None:
self.alpha_ = alpha
self.beta_ = beta
@bm.random_variable
def theta(self) -> dist.Distribution:
return dist.Beta(self.alpha_, self.beta_)
@bm.random_variable
def y(self, i: int) -> dist.Distribution:
return dist.Bernoulli(self.theta())
| beanmachine-main | src/beanmachine/ppl/examples/conjugate_models/beta_bernoulli.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
from torch import Tensor
class GammaNormalModel:
def __init__(self, shape: Tensor, rate: Tensor, mu: Tensor) -> None:
self.shape_ = shape
self.rate_ = rate
self.mu_ = mu
@bm.random_variable
def gamma(self) -> dist.Distribution:
return dist.Gamma(self.shape_, self.rate_)
@bm.random_variable
def normal(self) -> dist.Distribution:
# pyre-fixme[58]: `/` is not supported for operand types `int` and `Tensor`.
return dist.Normal(self.mu_, 1 / torch.sqrt(self.gamma()))
| beanmachine-main | src/beanmachine/ppl/examples/conjugate_models/gamma_normal.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import torch.distributions as dist
from torch import Tensor
class GammaGammaModel:
def __init__(self, shape: Tensor, rate: Tensor, alpha: Tensor) -> None:
self.shape_ = shape
self.rate_ = rate
self.alpha_ = alpha
@bm.random_variable
def gamma_p(self) -> dist.Distribution:
return dist.Gamma(self.shape_, self.rate_)
@bm.random_variable
def gamma(self) -> dist.Distribution:
return dist.Gamma(self.alpha_, self.gamma_p())
| beanmachine-main | src/beanmachine/ppl/examples/conjugate_models/gamma_gamma.py |
beanmachine-main | src/beanmachine/ppl/testlib/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABCMeta, abstractmethod
from typing import Dict, List, Optional, Tuple
import numpy as np
import scipy.stats
import torch
from beanmachine.ppl.diagnostics.common_statistics import effective_sample_size
from beanmachine.ppl.examples.conjugate_models.beta_binomial import BetaBinomialModel
from beanmachine.ppl.examples.conjugate_models.categorical_dirichlet import (
CategoricalDirichletModel,
)
from beanmachine.ppl.examples.conjugate_models.gamma_gamma import GammaGammaModel
from beanmachine.ppl.examples.conjugate_models.gamma_normal import GammaNormalModel
from beanmachine.ppl.examples.conjugate_models.normal_normal import NormalNormalModel
from beanmachine.ppl.inference import utils
from beanmachine.ppl.inference.base_inference import BaseInference
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.testlib.hypothesis_testing import (
mean_equality_hypothesis_confidence_interval,
variance_equality_hypothesis_confidence_interval,
)
from torch import Tensor, tensor
class AbstractConjugateTests(metaclass=ABCMeta):
"""
Computes the posterior mean and standard deviation of some of the conjugate
distributions included below.
https://en.wikipedia.org/wiki/Conjugate_prior#Table_of_conjugate_distributions
Note: Whenever possible, we will use same variable names as on that page.
"""
def compute_statistics(self, predictions: Tensor) -> Tuple[Tensor, Tensor]:
"""
Computes mean and standard deviation of a given tensor of samples.
:param predictions: tensor of samples
:returns: mean and standard deviation of the tensor of samples.
"""
return (
torch.mean(predictions, 0),
torch.std(predictions, 0, unbiased=True, keepdim=True),
)
def compute_beta_binomial_moments(
self,
) -> Tuple[Tensor, Tensor, List[RVIdentifier], Dict[RVIdentifier, Tensor]]:
"""
Computes mean and standard deviation of a small beta binomial model.
:return: expected mean, expected standard deviation, conjugate model
queries and observations
"""
alpha = tensor([2.0, 2.0])
beta = tensor([1.0, 1.0])
n = tensor([1.0, 1.0])
obs = tensor([1.0, 0.0])
model = BetaBinomialModel(alpha, beta, n)
queries = [model.theta()]
observations = {model.x(): obs}
alpha_prime = alpha + obs
beta_prime = beta - obs + n
mean_prime = alpha_prime / (alpha_prime + beta_prime)
std_prime = (
(alpha_prime * beta_prime)
/ ((alpha_prime + beta_prime).pow(2.0) * (alpha_prime + beta_prime + 1.0))
).pow(0.5)
return (mean_prime, std_prime, queries, observations)
def compute_gamma_gamma_moments(
self,
) -> Tuple[Tensor, Tensor, List[RVIdentifier], Dict[RVIdentifier, Tensor]]:
"""
Computes mean and standard deviation of a small gamma gamma model.
:return: expected mean, expected standard deviation, conjugate model
queries and observations
"""
shape = tensor([2.0, 2.0])
rate = tensor([2.0, 2.0])
alpha = tensor([1.5, 1.5])
obs = tensor([2.0, 4.0])
model = GammaGammaModel(shape, rate, alpha)
queries = [model.gamma_p()]
observations = {model.gamma(): obs}
shape = shape + alpha
rate = rate + obs
expected_mean = shape / rate
expected_std = (expected_mean / rate).pow(0.5)
return (expected_mean, expected_std, queries, observations)
def compute_gamma_normal_moments(
self,
) -> Tuple[Tensor, Tensor, List[RVIdentifier], Dict[RVIdentifier, Tensor]]:
"""
Computes mean and standard deviation of a small gamma normal model.
:return: expected mean, expected standard deviation, conjugate model
queries and observations
"""
shape = tensor([1.0, 1.0])
rate = tensor([2.0, 2.0])
mu = tensor([1.0, 2.0])
obs = tensor([1.5, 2.5])
model = GammaNormalModel(shape, rate, mu)
queries = [model.gamma()]
observations = {model.normal(): obs}
shape = shape + tensor([0.5, 0.5])
deviations = (obs - mu).pow(2.0)
rate = rate + (deviations * (0.5))
expected_mean = shape / rate
expected_std = (expected_mean / rate).pow(0.5)
return (expected_mean, expected_std, queries, observations)
def compute_normal_normal_moments(
self,
) -> Tuple[Tensor, Tensor, List[RVIdentifier], Dict[RVIdentifier, Tensor]]:
"""
Computes mean and standard deviation of a small normal normal model.
:return: expected mean, expected standard deviation, conjugate model
queries and observations
"""
mu = tensor([1.0, 1.0])
std = tensor([1.0, 1.0])
sigma = tensor([1.0, 1.0])
obs = tensor([1.5, 2.5])
model = NormalNormalModel(mu, std, sigma)
queries = [model.normal_p()]
observations = {model.normal(): obs}
expected_mean = (mu / std.pow(2.0) + obs / sigma.pow(2.0)) / (
# pyre-fixme[58]: `/` is not supported for operand types `float` and
# `Tensor`.
1.0 / sigma.pow(2.0)
# pyre-fixme[58]: `/` is not supported for operand types `float` and
# `Tensor`.
+ 1.0 / std.pow(2.0)
)
expected_std = (std.pow(-2.0) + sigma.pow(-2.0)).pow(-0.5)
return (expected_mean, expected_std, queries, observations)
def compute_dirichlet_categorical_moments(self):
"""
Computes mean and standard deviation of a small dirichlet categorical
model.
:return: expected mean, expected standard deviation, conjugate model
queries and observations
"""
alpha = tensor([0.5, 0.5])
model = CategoricalDirichletModel(alpha)
obs = tensor([1.0])
queries = [model.dirichlet()]
observations = {model.categorical(): obs}
alpha = alpha + tensor([0.0, 1.0])
expected_mean = alpha / alpha.sum()
expected_std = (expected_mean * (1 - expected_mean) / (alpha.sum() + 1)).pow(
0.5
)
return (expected_mean, expected_std, queries, observations)
def _compare_run(
self,
moments: Tuple[Tensor, Tensor, List[RVIdentifier], Dict[RVIdentifier, Tensor]],
mh: BaseInference,
num_chains: int,
num_samples: int,
random_seed: Optional[int],
num_adaptive_samples: int = 0,
alpha: float = 0.01,
):
# Helper functions for hypothesis tests
def chi2(alpha, df):
return scipy.stats.chi2.ppf(alpha, df)
def z(alpha):
return scipy.stats.norm.ppf(alpha)
expected_mean, expected_std, queries, observations = moments
if random_seed is None:
random_seed = 123
utils.seed(random_seed)
predictions = mh.infer(
queries,
observations,
num_samples,
num_chains=num_chains,
num_adaptive_samples=num_adaptive_samples,
)
for i in range(predictions.num_chains):
sample = predictions.get_chain(i)[queries[0]]
mean, std = self.compute_statistics(sample)
total_samples = tensor(sample.size())[0].item()
n_eff = effective_sample_size(sample.unsqueeze(dim=0))
# For out purposes, it seems more appropriate to use n_eff ONLY
# to discount sample size. In particular, we should not allow
# n_eff > total_samples
n_eff = torch.min(n_eff, tensor(total_samples))
# Hypothesis Testing
# First, let's start by making sure that we can assume normalcy of means
# pyre-fixme[16]: `AbstractConjugateTests` has no attribute
# `assertGreaterEqual`.
self.assertGreaterEqual(
total_samples, 30, msg="Sample size too small for normalcy assumption"
)
self.assertGreaterEqual(
torch.min(n_eff).item(),
30,
msg="Effective sample size too small for normalcy assumption",
)
# Second, let us check the means using confidence intervals:
lower_bound, upper_bound = mean_equality_hypothesis_confidence_interval(
expected_mean,
expected_std,
n_eff,
# pyre-fixme[6]: For 4th param expected `int` but got `float`.
alpha,
)
below_upper = torch.min(lower_bound <= mean).item()
above_lower = torch.min(mean <= upper_bound).item()
accept_interval = below_upper and above_lower
message = "abs(mean - expected_mean) * sqr(n_eff) / expected_std = " + str(
torch.abs(mean - expected_mean) / (expected_std / np.sqrt(n_eff))
)
message = (
" alpha = "
+ str(alpha)
+ " z_alpha/2 = "
+ str(z(1 - alpha / 2))
+ " => "
+ message
)
message = (
str(lower_bound)
+ " <= "
+ str(mean)
+ " <= "
+ str(upper_bound)
+ ". "
+ message
)
message = (
"Mean outside confidence interval.\n"
+ "n_eff = "
+ str(n_eff)
+ ".\nExpected: "
+ message
)
# pyre-fixme[16]: `AbstractConjugateTests` has no attribute
# `assertTrue`.
self.assertTrue(accept_interval, msg=message)
# Third, let us check the variance using confidence intervals:
lower_bound, upper_bound = variance_equality_hypothesis_confidence_interval(
expected_std,
n_eff - 1,
# pyre-fixme[6]: For 3rd param expected `int` but got `float`.
alpha,
)
below_upper = torch.min(lower_bound <= std).item()
above_lower = torch.min(std <= upper_bound).item()
accept_interval = below_upper and above_lower
message = "(n_eff - 1) * (std/ expected_std) ** 2 = " + str(
(n_eff - 1)
# pyre-fixme[58]: `**` is not supported for operand types `Tensor`
# and `int`.
* (std / expected_std) ** 2
)
message = (
" alpha = "
+ str(alpha)
+ " chi2_alpha/2 = "
+ str(chi2(alpha / 2, n_eff - 1))
+ " <= "
+ message
+ " <= "
+ " chi2_(1-alpha/2) = "
+ str(chi2(1 - alpha / 2, n_eff - 1))
)
message = (
str(lower_bound)
+ " <= "
+ str(std)
+ " <= "
+ str(upper_bound)
+ ". "
+ message
)
message = (
"Standard deviation outside confidence interval.\n"
+ "n_eff = "
+ str(n_eff)
+ ".\nExpected: "
+ message
)
self.assertTrue(accept_interval, msg=message)
continue
def beta_binomial_conjugate_run(
self,
mh: BaseInference,
num_chains: int = 1,
num_samples: int = 1000,
random_seed: Optional[int] = 17,
num_adaptive_samples: int = 0,
):
"""
Tests the inference run for a small beta binomial model.
:param mh: inference algorithm
:param num_samples: number of samples
:param num_chains: number of chains
:param random_seed: seed for pytorch random number generator
"""
moments = self.compute_beta_binomial_moments()
self._compare_run(
moments,
mh,
num_chains,
num_samples,
random_seed,
num_adaptive_samples,
)
def gamma_gamma_conjugate_run(
self,
mh: BaseInference,
num_chains: int = 1,
num_samples: int = 1000,
random_seed: Optional[int] = 17,
num_adaptive_samples: int = 0,
):
"""
Tests the inference run for a small gamma gamma model.
:param mh: inference algorithm
:param num_samples: number of samples
:param num_chains: number of chains
:param random_seed: seed for pytorch random number generator
"""
moments = self.compute_gamma_gamma_moments()
self._compare_run(
moments,
mh,
num_chains,
num_samples,
random_seed,
num_adaptive_samples,
)
def gamma_normal_conjugate_run(
self,
mh: BaseInference,
num_chains: int = 1,
num_samples: int = 1000,
random_seed: Optional[int] = 17,
num_adaptive_samples: int = 0,
):
"""
Tests the inference run for a small gamma normal model.
:param mh: inference algorithm
:param num_samples: number of samples
:param num_chains: number of chains
:param random_seed: seed for pytorch random number generator
"""
moments = self.compute_gamma_normal_moments()
self._compare_run(
moments,
mh,
num_chains,
num_samples,
random_seed,
num_adaptive_samples,
)
def normal_normal_conjugate_run(
self,
mh: BaseInference,
num_chains: int = 1,
num_samples: int = 1000,
random_seed: Optional[int] = 17,
num_adaptive_samples: int = 0,
):
"""
Tests the inference run for a small normal normal model.
:param mh: inference algorithm
:param num_samples: number of samples
:param num_chains: number of chains
:param random_seed: seed for pytorch random number generator
"""
moments = self.compute_normal_normal_moments()
self._compare_run(
moments,
mh,
num_chains,
num_samples,
random_seed,
num_adaptive_samples,
)
def dirichlet_categorical_conjugate_run(
self,
mh: BaseInference,
num_chains: int = 1,
num_samples: int = 1000,
random_seed: Optional[int] = 17,
num_adaptive_samples: int = 0,
):
"""
Tests the inference run for a small dirichlet categorical model.
:param mh: inference algorithm
:param num_samples: number of samples
:param num_chains: number of chains
:param random_seed: seed for pytorch random number generator
"""
moments = self.compute_dirichlet_categorical_moments()
self._compare_run(
moments,
mh,
num_chains,
num_samples,
random_seed,
num_adaptive_samples,
)
@abstractmethod
def test_beta_binomial_conjugate_run(self):
"""
To be implemented for all classes extending AbstractConjugateTests.
"""
raise NotImplementedError(
"Conjugate test must implement test_beta_binomial_conjugate_run."
)
@abstractmethod
def test_gamma_gamma_conjugate_run(self):
"""
To be implemented for all classes extending AbstractConjugateTests.
"""
raise NotImplementedError(
"Conjugate test must implement test_gamma_gamma_conjugate_run."
)
@abstractmethod
def test_gamma_normal_conjugate_run(self):
"""
To be implemented for all classes extending AbstractConjugateTests.
"""
raise NotImplementedError(
"Conjugate test must implement test_gamma_normal_conjugate_run."
)
@abstractmethod
def test_normal_normal_conjugate_run(self):
"""
To be implemented for all classes extending AbstractConjugateTests.
"""
raise NotImplementedError(
"Conjugate test must implement test_normal_normal_conjugate_run."
)
@abstractmethod
def test_dirichlet_categorical_conjugate_run(self):
"""
To be implemented for all classes extending AbstractConjugateTests.
"""
raise NotImplementedError(
"Conjugate test must implement test_categorical_dirichlet_conjugate_run."
)
| beanmachine-main | src/beanmachine/ppl/testlib/abstract_conjugate.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import scipy.stats as stats
import torch
# This module defines hypothesis tests for equal means and equal variance
# Helper functions:
# Inverse of CDF of normal distribution at given probability
inverse_normal_cdf = stats.norm.ppf
# Inverse of CDF of chi-squared distribution at given probability
def inverse_chi2_cdf(df, p):
return stats.chi2(df).ppf(p)
# Hypothesis test for equality of sample mean to a true mean
def mean_equality_hypothesis_test(
sample_mean: torch.Tensor,
true_mean: torch.Tensor,
true_std: torch.Tensor,
sample_size: torch.Tensor,
p_value: int,
):
"""Test for the null hypothesis that the mean of a Gaussian
distribution is within the central 1 - alpha confidence
interval (CI) for a sample of size sample_size. We also apply an adjustment
that takes into account that we do the test pointwise independently
for each element of the tensor. This is basically the Dunn-Šidák
correction,
https://en.wikipedia.org/wiki/%C5%A0id%C3%A1k_correction"""
if torch.min(sample_size) <= 0:
return False
dimensions = torch.numel(true_mean) # treat scalar and 1-D tensors the same
# early exit for empty tensor
if dimensions == 0:
return False
if torch.max(true_std <= 0):
return False
adjusted_p_value = 1 - (1 - p_value) ** (1.0 / dimensions)
test_result = torch.max(
torch.abs(sample_mean - true_mean) * np.sqrt(sample_size) / true_std
) <= inverse_normal_cdf(1 - adjusted_p_value / 2)
return test_result
# The following function explicitly constructs a confidence interval.
# This provides an alternative way for performing the hypothesis test,
# but which also makes reporting test failures easier.
def mean_equality_hypothesis_confidence_interval(
true_mean: torch.Tensor,
true_std: torch.Tensor,
sample_size: torch.Tensor,
p_value: int,
):
"""Computes the central 1 - p_value confidence interval in which the sample mean
can fall without causing us to reject the null hypothesis that the mean of
a Gaussian distribution for a sample of size sample_size. We also apply
an adjustment that takes into account that we do the test pointwise independently
for each element of the tensor. This is basically the Dunn-Šidák
correction,
https://en.wikipedia.org/wiki/%C5%A0id%C3%A1k_correction"""
# TODO: Consider refactoring the common input checks for both methods
if torch.min(sample_size) <= 0:
return None
dimensions = torch.numel(true_mean) # treat scalar and 1-D tensors the same
# early exit for empty tensor
if dimensions == 0:
return None
if torch.max(true_std == 0):
return None
adjusted_p_value = 1 - (1 - p_value) ** (1.0 / dimensions)
bound_std = true_std / np.sqrt(sample_size)
z_score = inverse_normal_cdf(1 - adjusted_p_value / 2)
# TODO: We use z_{1-alpha} instead of -z_alpha for compatibility
# with mean_equality_hypothesis_test. Ideally, both should be
# changed to use the unmodified bounds. In any case, the two
# functions should be matched for consistency
lower_bound = true_mean - bound_std * z_score
upper_bound = true_mean + bound_std * z_score
return lower_bound, upper_bound
# Hypothesis test for equality of sample variance to a true variance
def variance_equality_hypothesis_test(
sample_std: torch.Tensor,
true_std: torch.Tensor,
degrees_of_freedom: torch.Tensor,
alpha: int,
):
"""Test for the null hypothesis that the variance of a Gaussian
distribution is within the central 1 - alpha confidence
interval (CI) for a sample of effective sample size (ESS)
degrees_of_freedom. We also apply an adjustment that takes
into account that we do the test pointwise independently
for each element of the tensor. This is basically the Dunn-Šidák
correction,
https://en.wikipedia.org/wiki/%C5%A0id%C3%A1k_correction"""
if torch.min(degrees_of_freedom).item() <= 0:
return False
dimensions = torch.prod(torch.tensor(torch.Tensor.size(true_std))).item()
if dimensions == 0:
return False
if torch.max(true_std <= 0).item():
return False
adjusted_alpha = 1 - (1 - alpha) ** (1.0 / dimensions)
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
test_statistic = degrees_of_freedom * (sample_std / true_std) ** 2
lower_bound = inverse_chi2_cdf(degrees_of_freedom, adjusted_alpha / 2)
upper_bound = inverse_chi2_cdf(degrees_of_freedom, 1 - adjusted_alpha / 2)
lower_bound_result = lower_bound <= torch.min(test_statistic).item()
upper_bound_result = torch.max(test_statistic).item() <= upper_bound
test_result = lower_bound_result and upper_bound_result
return test_result
# The following function explicitly constructs a confidence interval.
# This provides an alternative way for performing the hypothesis test,
# but which also makes reporting test failures easier.
def variance_equality_hypothesis_confidence_interval(
true_std: torch.Tensor, degrees_of_freedom: torch.Tensor, alpha: int
):
"""Computes the central 1 - alpha confidence interval in which the sample
variance can fall without causing us to reject the null hypothesis that the variance
of a Gaussian distribution for a sample of size sample_size. We also apply
an adjustment that takes into account that we do the test pointwise independently
for each element of the tensor. This is basically the Dunn-Šidák
correction,
https://en.wikipedia.org/wiki/%C5%A0id%C3%A1k_correction"""
if torch.min(degrees_of_freedom).item() <= 0:
return None
dimensions = torch.prod(torch.tensor(torch.Tensor.size(true_std))).item()
if dimensions == 0:
return None
if torch.max(true_std == 0).item():
return None
adjusted_alpha = 1 - (1 - alpha) ** (1.0 / dimensions)
lower_bound = (
inverse_chi2_cdf(degrees_of_freedom, adjusted_alpha / 2) / degrees_of_freedom
) ** 0.5 * true_std
upper_bound = (
inverse_chi2_cdf(degrees_of_freedom, 1 - adjusted_alpha / 2)
/ degrees_of_freedom
) ** 0.5 * true_std
return lower_bound, upper_bound
| beanmachine-main | src/beanmachine/ppl/testlib/hypothesis_testing.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import wraps
from typing import Callable, Union
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.vi.variational_world import VariationalWorld
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world import get_world_context
from typing_extensions import ParamSpec
P = ParamSpec("P")
class StatisticalModel:
"""
Parent class to all statistical models implemented in Bean Machine.
Every random variable in the model needs to be defined with function
declaration wrapped the ``bm.random_variable`` .
Every deterministic functional that a user would like to query during
inference should be wrapped in a ``bm.functional`` .
Every parameter of the guide distribution that is to be learned
via variational inference should be wrapped in a ``bm.param`` .
"""
@staticmethod
def get_func_key(wrapper, arguments) -> RVIdentifier:
"""
Creates a key to uniquely identify the Random Variable.
Args:
wrapper: reference to the wrapper function
arguments: function arguments
Returns:
Tuple of function and arguments which is to be used to identify
a particular function call.
"""
return RVIdentifier(wrapper=wrapper, arguments=arguments)
@staticmethod
def random_variable(
f: Callable[P, dist.Distribution]
) -> Callable[P, Union[RVIdentifier, torch.Tensor]]:
"""
Decorator to be used for every stochastic random variable defined in
all statistical models. E.g.::
@bm.random_variable
def foo():
return Normal(0., 1.)
def foo():
return Normal(0., 1.)
foo = bm.random_variable(foo)
"""
@wraps(f)
def wrapper(
*args: P.args, **kwargs: P.kwargs
) -> Union[RVIdentifier, torch.Tensor]:
func_key = StatisticalModel.get_func_key(wrapper, args, **kwargs)
world = get_world_context()
if world is None:
return func_key
else:
return world.update_graph(func_key)
wrapper.is_functional = False
wrapper.is_random_variable = True
return wrapper
@staticmethod
def functional(
f: Callable[P, torch.Tensor]
) -> Callable[P, Union[RVIdentifier, torch.Tensor]]:
"""
Decorator to be used for every query defined in statistical model, which are
functions of ``bm.random_variable`` ::
@bm.random_variable
def foo():
return Normal(0., 1.)
@bm.functional():
def bar():
return foo() * 2.0
"""
@wraps(f)
def wrapper(
*args: P.args, **kwargs: P.kwargs
) -> Union[RVIdentifier, torch.Tensor]:
world = get_world_context()
if world is None:
return StatisticalModel.get_func_key(wrapper, args, **kwargs)
else:
return f(*args, **kwargs)
wrapper.is_functional = True
wrapper.is_random_variable = False
return wrapper
@staticmethod
def param(init_fn):
"""
Decorator to be used for params (variable to be optimized with VI).::
@bm.param
def mu():
return torch.zeros(2)
@bm.random_variable
def foo():
return Normal(mu(), 1.)
"""
@wraps(init_fn)
def wrapper(*args):
func_key = StatisticalModel.get_func_key(wrapper, args)
world = get_world_context()
if world is None:
return func_key
else:
assert isinstance(
world, VariationalWorld
), "encountered params outside of VariationalWorld, this should never happen."
return world.get_param(func_key)
return wrapper
random_variable = StatisticalModel.random_variable
functional = StatisticalModel.functional
param = StatisticalModel.param
| beanmachine-main | src/beanmachine/ppl/model/statistical_model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.model.statistical_model import (
functional,
param,
random_variable,
StatisticalModel,
)
from beanmachine.ppl.model.utils import get_beanmachine_logger
__all__ = [
"Mode",
"RVIdentifier",
"StatisticalModel",
"functional",
"param",
"query",
"random_variable",
"sample",
"get_beanmachine_logger",
]
| beanmachine-main | src/beanmachine/ppl/model/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from dataclasses import dataclass
from typing import Any, Callable, Tuple
import torch
@dataclass(eq=True, frozen=True)
class RVIdentifier:
"""
Struct representing the unique key corresponding to a
BM random variable.
"""
wrapper: Callable
arguments: Tuple
def __post_init__(self):
for arg in self.arguments:
if torch.is_tensor(arg):
warnings.warn(
"PyTorch tensors are hashed by memory address instead of value. "
"Therefore, it is not recommended to use tensors as indices of random variables.",
stacklevel=3,
)
def __str__(self):
return str(self.function.__name__) + str(self.arguments)
def __lt__(self, other: Any) -> bool:
# define comparison so that functorch doesn't raise when it tries to
# sort dictionary keys (https://fburl.com/0gomiv80). This can be
# removed with the v0.2.1+ release of functorch.
if isinstance(other, RVIdentifier):
return str(self) < str(other)
return NotImplemented
@property
def function(self):
return self.wrapper.__wrapped__
@property
def is_functional(self):
w = self.wrapper
assert hasattr(w, "is_functional")
return w.is_functional
@property
def is_random_variable(self):
w = self.wrapper
assert hasattr(w, "is_random_variable")
return w.is_random_variable
| beanmachine-main | src/beanmachine/ppl/model/rv_identifier.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from enum import Enum
import torch
class LogLevel(Enum):
"""
Enum class mapping the logging levels to numeric values.
"""
ERROR = 40
WARNING = 30
INFO = 20
DEBUG_UPDATES = 16
DEBUG_PROPOSER = 14
DEBUG_GRAPH = 12
def get_beanmachine_logger(
console_level: LogLevel = LogLevel.WARNING, file_level: LogLevel = LogLevel.INFO
) -> logging.Logger:
console_handler = logging.StreamHandler()
console_handler.setLevel(console_level.value)
file_handler = logging.FileHandler("beanmachine.log")
file_handler.setLevel(file_level.value)
logger = logging.getLogger("beanmachine")
logger.setLevel(
file_level.value
if file_level.value < console_level.value
else console_level.value
)
logger.handlers.clear()
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger
float_types = (torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor)
| beanmachine-main | src/beanmachine/ppl/model/utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import Optional, Tuple
import numpy as np
import torch
import torch.fft
from torch import Tensor
"""
Common statistic functions, they all get a Tensor as input and return a Tensor
as output
"""
def mean(query_samples: Tensor) -> Tensor:
return query_samples.mean(dim=[0, 1])
def std(query_samples: Tensor) -> Tensor:
return torch.std(query_samples, dim=[0, 1])
def confidence_interval(query_samples: Tensor) -> Tensor:
percentile_list = [2.5, 50, 97.5]
query_dim = query_samples.shape[2:]
query_samples = query_samples.reshape(-1, *query_dim)
return torch.tensor(
np.percentile(query_samples.detach().numpy(), percentile_list, axis=0)
)
def _compute_var(query_samples: Tensor) -> Tuple[Tensor, Tensor]:
n_chains, n_samples = query_samples.shape[:2]
if query_samples.dtype not in [torch.float32, torch.float64]:
"""TODO have separate diagnostics for discrete variables.
This would require passing supprt-type information to Diagnostics.
"""
query_samples = query_samples.float()
if n_chains > 1:
per_chain_avg = query_samples.mean(1)
b = n_samples * torch.var(per_chain_avg, dim=0)
else:
b = 0
w = torch.mean(torch.var(query_samples, dim=1), dim=0)
# pyre-fixme[58]: `*` is not supported for operand types `float` and `Union[int,
# torch._tensor.Tensor]`.
var_hat = (n_samples - 1) / n_samples * w + (1 / n_samples) * b
return w, var_hat.clamp(min=1e-10)
def r_hat(query_samples: Tensor) -> Optional[Tensor]:
n_chains = query_samples.shape[0]
if n_chains < 2:
return None
w, var_hat = _compute_var(query_samples)
return torch.sqrt(var_hat / w)
def split_r_hat(query_samples: Tensor) -> Optional[Tensor]:
n_chains, n_samples = query_samples.shape[:2]
if n_chains < 2:
return None
n_chains = n_chains * 2
n_samples = n_samples // 2
query_samples = torch.cat(torch.split(query_samples, n_samples, dim=1)[0:2])
w, var_hat = _compute_var(query_samples)
return torch.sqrt(var_hat / w)
def effective_sample_size(query_samples: Tensor) -> Tensor:
n_chains, n_samples, *query_dim = query_samples.shape
if query_samples.dtype not in [torch.float32, torch.float64]:
"""TODO have separate diagnostics for discrete variables.
This would require passing supprt-type information to Diagnostics.
"""
query_samples = query_samples.float()
samples = query_samples - query_samples.mean(dim=1, keepdim=True)
samples = samples.transpose(1, -1)
# computes fourier transform (with padding)
padding = torch.zeros(samples.shape, dtype=samples.dtype)
padded_samples = torch.cat((samples, padding), dim=-1)
fvi = torch.view_as_real(torch.fft.fft(padded_samples))
# multiply by complex conjugate
acf = fvi.pow(2).sum(-1, keepdim=True)
# transform back to reals (with padding)
padding = torch.zeros(acf.shape, dtype=acf.dtype)
padded_acf = torch.cat((acf, padding), dim=-1)
rho_per_chain = torch.fft.ifft(torch.view_as_complex(padded_acf)).real
rho_per_chain = rho_per_chain.narrow(-1, 0, n_samples)
num_per_lag = torch.tensor(range(n_samples, 0, -1), dtype=samples.dtype)
rho_per_chain = torch.div(rho_per_chain, num_per_lag)
rho_per_chain = rho_per_chain.transpose(1, -1)
rho_avg = rho_per_chain.mean(dim=0)
w, var_hat = _compute_var(query_samples)
if n_chains > 1:
rho = 1 - ((w - rho_avg) / var_hat)
else:
rho = rho_avg / var_hat
rho[0] = 1
# reshape to 2d matrix where each row contains all samples for specific dim
rho_2d = torch.stack(torch.unbind(rho, dim=0), dim=-1).reshape(-1, n_samples)
rho_sum = torch.zeros(rho_2d.shape[0])
for i, chain in enumerate(torch.unbind(rho_2d, dim=0)):
total_sum = torch.tensor(0.0, dtype=samples.dtype)
for t in range(n_samples // 2):
rho_even = chain[2 * t]
rho_odd = chain[2 * t + 1]
if rho_even + rho_odd < 0:
break
else:
total_sum += rho_even + rho_odd
rho_sum[i] = total_sum
rho_sum = torch.reshape(rho_sum, query_dim)
tau = -1 + 2 * rho_sum
n_eff = torch.div(n_chains * n_samples, tau)
if n_eff.isnan().any():
warnings.warn("NaN encountered in computing effective sample size.")
return torch.tensor(0.0)
return n_eff
| beanmachine-main | src/beanmachine/ppl/diagnostics/common_statistics.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import math
import warnings
from typing import Callable, Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
import plotly
from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from plotly.subplots import make_subplots
from torch import Tensor
from . import common_plots, common_statistics as common_stats
class BaseDiagnostics:
def __init__(self, samples: MonteCarloSamples):
self.samples = samples
self.statistics_dict = {}
self.plots_dict = {}
def _prepare_query_list(
self, query_list: Optional[List[RVIdentifier]] = None
) -> List[RVIdentifier]:
if query_list is None:
return list(self.samples.keys())
for query in query_list:
if not (query in self.samples):
raise ValueError(f"query {self._stringify_query(query)} does not exist")
return query_list
def summaryfn(self, func: Callable, display_names: List[str]) -> Callable:
"""
this function keeps a directory of all summary-related functions,
so it could handle the overridden functions and new ones that user defines
:param func: method which is going to be executed when summary() is called.
:param display_name: the name appears in the summary() output dataframe
:returns: user-visible function that can be called over a list of queries
"""
statistics_name = func.__name__
self.statistics_dict[statistics_name] = (func, display_names)
return self._standalone_summary_stat_function(statistics_name, func)
def _prepare_summary_stat_input(
self, query: RVIdentifier, chain: Optional[int] = None
):
query_samples = self.samples[query]
if query_samples.shape[0] != 1:
# squeeze out non-chain singleton dims
query_samples = query_samples.squeeze()
if chain is not None:
query_samples = query_samples[chain].unsqueeze(0)
return query_samples
def _create_table(
self, query: RVIdentifier, results: List[Tensor], func_list: List[str]
) -> pd.DataFrame:
"""
this function turns output of each summary stat function to a dataframe
"""
out_pd = pd.DataFrame()
if len(results) > 0:
single_result_set = results[0]
if single_result_set is not None and len(single_result_set) > 0:
for flattened_index in range(single_result_set[0].numel()):
index = np.unravel_index(
flattened_index, tuple(single_result_set[0].size())
)
row_data = []
rowname = f"{self._stringify_query(query)}{list(index)}"
for result in results:
num_of_sets = result.size()[0]
for set_num in range(num_of_sets):
row_data.append(result[set_num][index].item())
cur = pd.DataFrame([row_data], columns=func_list, index=[rowname])
if out_pd.empty:
out_pd = cur
else:
out_pd = pd.concat([out_pd, cur])
return out_pd
def _stringify_query(self, query: RVIdentifier) -> str:
return f"{query.function.__name__}{query.arguments}"
def _execute_summary_stat_funcs(
self,
query: RVIdentifier,
func_dict: Dict[str, Tuple[Callable, str]],
chain: Optional[int] = None,
raise_warning: bool = False,
):
frames = pd.DataFrame()
query_results = []
func_list = []
queried_samples = self._prepare_summary_stat_input(query, chain)
for _k, (func, display_names) in func_dict.items():
result = func(queried_samples)
if result is None:
# in the case of r hat and other algorithms, they may return None
# if the samples do not have enough chains or have the wrong shape
if raise_warning:
warnings.warn(
f"{display_names} cannot be calculated for the provided samples"
)
continue
# the first dimension is equivalant to the size of the display_names
if len(display_names) <= 1:
result = result.unsqueeze(0)
query_results.append(result)
func_list.extend(display_names)
out_df = self._create_table(query, query_results, func_list)
if frames.empty:
frames = out_df
else:
frames = pd.concat([frames, out_df])
return frames
def summary(
self,
query_list: Optional[List[RVIdentifier]] = None,
chain: Optional[int] = None,
) -> pd.DataFrame:
"""
this function outputs a table summarizing results of registered functions
in self.statistics_dict for requested queries in query_list,
if chain is None, results correspond to the aggreagated chains
"""
frames = pd.DataFrame()
query_list = self._prepare_query_list(query_list)
for query in query_list:
out_df = self._execute_summary_stat_funcs(
query, self.statistics_dict, chain
)
frames = pd.concat([frames, out_df])
frames.sort_index(inplace=True)
return frames
def _prepare_plots_input(
self, query: RVIdentifier, chain: Optional[int] = None
) -> Tensor:
"""
:param query: the query for which registered plot functions are called
:param chain: the chain that query samples are extracted from
:returns: tensor of query samples
"""
query_samples = self.samples[query]
if chain is not None:
return query_samples[chain].unsqueeze(0)
return query_samples
def plotfn(self, func: Callable, display_name: str) -> Callable:
"""
this function keeps a directory of all plot-related functions
so it could handle the overridden functions and new ones that user defines
:param func: method which is going to be executed when plot() is called.
:param display_name: appears as part of the plot title for func
:returns: user-visible function that can be called over a list of queries
"""
self.plots_dict[func.__name__] = (func, display_name)
return self._standalone_plot_function(func.__name__, func)
def _execute_plot_funcs(
self,
query: RVIdentifier,
func_dict: Dict[str, Tuple[Callable, str]],
chain: Optional[int] = None,
display: Optional[bool] = False,
): # task T57168727 to add type
figs = []
queried_samples = self._prepare_plots_input(query, chain)
for _k, (func, display_name) in func_dict.items():
trace, labels = common_plots.plot_helper(queried_samples, func)
title = f"{self._stringify_query(query)} {display_name}"
fig = self._display_results(
trace,
[title + label for label in labels],
# pyre-fixme[6]: Expected `bool` for 3rd param but got `Optional[bool]`.
display,
)
figs.append(fig)
return figs
def plot(
self,
query_list: Optional[List[RVIdentifier]] = None,
display: Optional[bool] = False,
chain: Optional[int] = None,
): # task T57168727 to add type
"""
this function outputs plots related to registered functions in
self.plots_dict for requested queries in query_list
:param query_list: list of queries for which plot functions will be called
:param chain: the chain that query samples are extracted from
:returns: plotly object holding the results from registered plot functions
"""
figs = []
query_list = self._prepare_query_list(query_list)
for query in query_list:
fig = self._execute_plot_funcs(query, self.plots_dict, chain, display)
figs.extend(fig)
return figs
def _display_results(
self, traces, labels: List[str], display: bool
): # task T57168727 to add type
"""
:param traces: a list of plotly objects
:param labels: plot labels
:returns: a plotly subplot object
"""
fig = make_subplots(
rows=math.ceil(len(traces) / 2), cols=2, subplot_titles=tuple(labels)
)
r = 1
for trace in traces:
for data in trace:
fig.add_trace(data, row=math.ceil(r / 2), col=((r - 1) % 2) + 1)
r += 1
if display:
plotly.offline.iplot(fig)
return fig
def _standalone_plot_function(self, func_name: str, func: Callable) -> Callable:
"""
this function makes each registered plot function directly callable by the user
"""
@functools.wraps(func)
def _wrapper(
query_list: List[RVIdentifier],
chain: Optional[int] = None,
display: Optional[bool] = False,
):
figs = []
query_list = self._prepare_query_list(query_list)
for query in query_list:
fig = self._execute_plot_funcs(
query, {func_name: self.plots_dict[func_name]}, chain, display
)
figs.extend(fig)
return figs
return _wrapper
def _standalone_summary_stat_function(
self, func_name: str, func: Callable
) -> Callable:
"""
this function makes each registered summary-stat related function directly callable by the user
"""
@functools.wraps(func)
def _wrapper(query_list: List[RVIdentifier], chain: Optional[int] = None):
frames = pd.DataFrame()
query_list = self._prepare_query_list(query_list)
for query in query_list:
out_df = self._execute_summary_stat_funcs(
query, {func_name: self.statistics_dict[func_name]}, chain, True
)
frames = pd.concat([frames, out_df])
return frames
return _wrapper
class Diagnostics(BaseDiagnostics):
def __init__(self, samples: MonteCarloSamples):
super().__init__(samples)
"""
every function related to summary stat should be registered in the constructor
"""
self.mean = self.summaryfn(common_stats.mean, display_names=["avg"])
self.std = self.summaryfn(common_stats.std, display_names=["std"])
self.confidence_interval = self.summaryfn(
common_stats.confidence_interval, display_names=["2.5%", "50%", "97.5%"]
)
self.split_r_hat = self.summaryfn(
common_stats.split_r_hat, display_names=["r_hat"]
)
self.effective_sample_size = self.summaryfn(
common_stats.effective_sample_size, display_names=["n_eff"]
)
self.trace = self.plotfn(common_plots.trace_plot, display_name="trace")
self.autocorr = self.plotfn(common_plots.autocorr, display_name="autocorr")
| beanmachine-main | src/beanmachine/ppl/diagnostics/diagnostics.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Tools for supporting MCMC qiagnostics. Will be deprecated in a future release in favor of arviz integration.
"""
from beanmachine.ppl.diagnostics.diagnostics import Diagnostics
__all__ = ["Diagnostics"]
| beanmachine-main | src/beanmachine/ppl/diagnostics/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, List, NamedTuple, Tuple
import numpy as np
import plotly.graph_objs as go
import torch
from torch import Tensor
class SamplesSummary(NamedTuple):
num_chain: int
num_samples: int
single_sample_sz: Tensor
def _samples_info(query_samples: Tensor) -> SamplesSummary:
return SamplesSummary(
num_chain=query_samples.size(0),
num_samples=query_samples.size(1),
# pyre-fixme[6]: For 3rd param expected `Tensor` but got `Size`.
single_sample_sz=query_samples.size()[2:],
)
def trace_helper(
x: List[List[List[int]]], y: List[List[List[float]]], labels: List[str]
) -> Tuple[List[go.Scatter], List[str]]:
"""
this function gets results prepared by a plot-related function and
outputs a tuple including plotly object and its corresponding legend.
"""
all_traces = []
num_chains = len(x)
num_indices = len(x[0])
for index in range(num_indices):
trace = []
for chain in range(num_chains):
trace.append(
go.Scatter(
x=x[chain][index],
y=y[chain][index],
mode="lines",
name="chain" + str(chain),
)
)
all_traces.append(trace)
return (all_traces, labels)
def plot_helper(
query_samples: Tensor, func: Callable
) -> Tuple[List[go.Scatter], List[str]]:
"""
this function executes a plot-related function, passed as input parameter func, and
outputs a tuple including plotly object and its corresponding legend.
"""
num_chain, num_samples, single_sample_sz = _samples_info(query_samples)
x_axis, y_axis, all_labels = [], [], []
for chain in range(num_chain):
flattened_data = query_samples[chain].reshape(num_samples, -1)
numel = flattened_data[0].numel()
x_axis_data, y_axis_data, labels = [], [], []
for i in range(numel):
index = np.unravel_index(i, single_sample_sz)
data = flattened_data[:, i]
partial_label = f" for {list(index)}"
x_data, y_data = func(data.detach())
x_axis_data.append(x_data)
y_axis_data.append(y_data)
labels.append(partial_label)
x_axis.append(x_axis_data)
y_axis.append(y_axis_data)
all_labels.append(labels)
return trace_helper(x_axis, y_axis, all_labels[0])
def autocorr(x: Tensor) -> Tuple[List[int], List[float]]:
def autocorr_calculation(x: Tensor, lag: int) -> Tensor:
y1 = x[: (len(x) - lag)]
y2 = x[lag:]
sum_product = (
(y1 - (x.mean(dim=0).expand(y1.size())))
* (y2 - (x.mean(dim=0).expand(y2.size())))
).sum(0)
return sum_product / ((len(x) - lag) * torch.var(x, dim=0))
max_lag = x.size(0)
y_axis_data = [autocorr_calculation(x, lag).item() for lag in range(max_lag)]
x_axis_data = list(range(max_lag))
return (x_axis_data, y_axis_data)
def trace_plot(x: Tensor) -> Tuple[List[int], Tensor]:
return (list(range(x.size(0))), x)
| beanmachine-main | src/beanmachine/ppl/diagnostics/common_plots.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# flake8: noqa
"""Visual diagnostic tools for Bean Machine models."""
import sys
from pathlib import Path
if sys.version_info >= (3, 8):
# NOTE: We need to import NotRequired from typing_extensions until PEP 655 is
# accepted, see https://peps.python.org/pep-0655/. This is to follow the
# interface objects in JavaScript that allow keys to not be required using ?.
from typing import TypedDict
from typing_extensions import NotRequired
else:
from typing_extensions import NotRequired, TypedDict
TOOLS_DIR = Path(__file__).parent.resolve()
JS_DIR = TOOLS_DIR.joinpath("js")
JS_DIST_DIR = JS_DIR.joinpath("dist")
| beanmachine-main | src/beanmachine/ppl/diagnostics/tools/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Visual diagnostics tools for Bean Machine models."""
from __future__ import annotations
from functools import wraps
from typing import Callable, TypeVar
from beanmachine.ppl.diagnostics.tools.utils import accessor
from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples
from typing_extensions import ParamSpec
P = ParamSpec("P")
R = TypeVar("R")
def _requires_dev_packages(f: Callable[P, R]) -> Callable[P, R]:
"""A utility decorator that allow us to lazily imports the plotting modules
and throw a useful error message when the required dependencies are not
installed."""
@wraps(f)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
try:
return f(*args, **kwargs)
except ModuleNotFoundError as e:
# The diagnostic tools uses packages that are not part of the core
# BM dependency, so we need to prompt users to manually install
# those
raise ModuleNotFoundError(
"Dev packages are required for the diagnostic widgets, which "
"can be installed with `pip install 'beanmachine[dev]'"
) from e
return wrapper
@accessor.register_mcs_accessor("diagnostics")
class DiagnosticsTools:
"""Accessor object for the visual diagnostics tools."""
def __init__(self: DiagnosticsTools, mcs: MonteCarloSamples) -> None:
"""Initialize."""
self.mcs = mcs
self.idata = self.mcs.to_inference_data()
@_requires_dev_packages
def marginal1d(self: DiagnosticsTools) -> None:
"""
Marginal 1D diagnostic tool for a Bean Machine model.
Returns:
None: Displays the tool directly in a Jupyter notebook.
"""
from beanmachine.ppl.diagnostics.tools.marginal1d.tool import Marginal1d
Marginal1d(self.mcs).show()
@_requires_dev_packages
def trace(self: DiagnosticsTools) -> None:
"""
Trace diagnostic tool for a Bean Machine model.
Returns:
None: Displays the tool directly in a Jupyter notebook.
"""
from beanmachine.ppl.diagnostics.tools.trace.tool import Trace
Trace(self.mcs).show()
| beanmachine-main | src/beanmachine/ppl/diagnostics/tools/viz.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| beanmachine-main | src/beanmachine/ppl/diagnostics/tools/trace/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Methods used to generate the diagnostic tool."""
from typing import List
from beanmachine.ppl.diagnostics.tools.trace import typing
from beanmachine.ppl.diagnostics.tools.utils import plotting_utils
from bokeh.core.property.wrappers import PropertyValueList
from bokeh.models.annotations import Legend, LegendItem
from bokeh.models.glyphs import Circle, Line, Quad
from bokeh.models.layouts import Column, Row
from bokeh.models.sources import ColumnDataSource
from bokeh.models.tools import HoverTool
from bokeh.models.widgets.inputs import Select
from bokeh.models.widgets.markups import Div
from bokeh.models.widgets.panels import Panel, Tabs
from bokeh.models.widgets.sliders import Slider
from bokeh.plotting.figure import figure
PLOT_WIDTH = 400
PLOT_HEIGHT = 500
TRACE_PLOT_WIDTH = 600
FIGURE_NAMES = ["marginals", "forests", "traces", "ranks"]
# Define what the empty data object looks like in order to make the browser handle all
# computations.
EMPTY_DATA = {}
def create_empty_data(num_chains: int) -> typing.Data:
"""Create an empty data object for the tool.
We do not know a priori how many chains a model will have, so we use this method to
build an empty data object with the given number of chains.
Parameters
----------
num_chains : int
The number of chains from the model.
Returns
-------
typing.Data
An empty data object to be filled by JavaScript.
"""
output = {
"marginals": {},
"forests": {},
"traces": {},
"ranks": {},
}
for chain in range(num_chains):
chain_index = chain + 1
chain_name = f"chain{chain_index}"
marginal = {
"line": {"x": [], "y": []},
"chain": [],
"mean": [],
"bandwidth": [],
}
forest = {
"line": {"x": [], "y": []},
"circle": {"x": [], "y": []},
"chain": [],
"mean": [],
}
trace = {
"line": {"x": [], "y": []},
"chain": [],
"mean": [],
}
rank = {
"quad": {
"left": [],
"top": [],
"right": [],
"bottom": [],
"chain": [],
"draws": [],
"rank": [],
},
"line": {"x": [], "y": []},
"chain": [],
"rankMean": [],
"mean": [],
}
single_chain_data = [marginal, forest, trace, rank]
chain_data = dict(zip(FIGURE_NAMES, single_chain_data))
for figure_name in FIGURE_NAMES:
output[figure_name][chain_name] = chain_data[figure_name]
return output
def create_sources(num_chains: int) -> typing.Sources:
"""Create Bokeh sources from the given data that will be bound to glyphs.
Parameters
----------
num_chains : int
The number of chains from the model.
Returns
-------
typing.Sources
A dictionary of Bokeh ColumnDataSource objects.
"""
global EMPTY_DATA
if not EMPTY_DATA:
EMPTY_DATA = create_empty_data(num_chains=num_chains)
output = {}
for figure_name, figure_data in EMPTY_DATA.items():
output[figure_name] = {}
for chain_name, chain_data in figure_data.items():
output[figure_name][chain_name] = {}
if figure_name == "marginals":
output[figure_name][chain_name]["line"] = ColumnDataSource(
{
"x": chain_data["line"]["x"],
"y": chain_data["line"]["y"],
"chain": chain_data["chain"],
"mean": chain_data["mean"],
},
)
if figure_name == "forests":
output[figure_name][chain_name]["line"] = ColumnDataSource(
{
"x": chain_data["line"]["x"],
"y": chain_data["line"]["y"],
},
)
output[figure_name][chain_name]["circle"] = ColumnDataSource(
{
"x": chain_data["circle"]["x"],
"y": chain_data["circle"]["y"],
"chain": chain_data["chain"],
},
)
if figure_name == "traces":
output[figure_name][chain_name]["line"] = ColumnDataSource(
{
"x": chain_data["line"]["x"],
"y": chain_data["line"]["y"],
"chain": chain_data["chain"],
"mean": chain_data["mean"],
},
)
if figure_name == "ranks":
output[figure_name][chain_name]["line"] = ColumnDataSource(
{
"x": chain_data["line"]["x"],
"y": chain_data["line"]["y"],
"chain": chain_data["chain"],
"rankMean": chain_data["rankMean"],
},
)
output[figure_name][chain_name]["quad"] = ColumnDataSource(
{
"left": chain_data["quad"]["left"],
"top": chain_data["quad"]["top"],
"right": chain_data["quad"]["right"],
"bottom": chain_data["quad"]["bottom"],
"chain": chain_data["chain"],
"draws": chain_data["quad"]["draws"],
"rank": chain_data["quad"]["rank"],
},
)
return output
def create_figures(rv_name: str, num_chains: int) -> typing.Figures:
"""Create the Bokeh figures used for the tool.
Parameters
----------
rv_name : str
The string representation of the random variable data.
num_chains : int
The number of chains from the model.
Returns
-------
typing.Figures
A dictionary of Bokeh Figure objects.
"""
output = {}
for figure_name in FIGURE_NAMES:
fig = figure(
width=PLOT_WIDTH,
height=PLOT_HEIGHT,
outline_line_color="black",
sizing_mode="scale_both",
)
plotting_utils.style_figure(fig)
# NOTE: There are several figures where we do not want the x-axis to change its
# limits. This is why we set the x_range to an object from Bokeh called
# Range1d.
if figure_name == "marginals":
fig.title = "Marginal"
fig.xaxis.axis_label = rv_name
fig.yaxis.visible = False
elif figure_name == "forests":
fig.title = "Forest"
fig.xaxis.axis_label = rv_name
fig.yaxis.axis_label = "Chain"
fig.yaxis.minor_tick_line_color = None
fig.yaxis.ticker.desired_num_ticks = num_chains
elif figure_name == "traces":
fig.title = "Trace"
fig.xaxis.axis_label = "Draw from single chain"
fig.yaxis.axis_label = rv_name
fig.width = TRACE_PLOT_WIDTH
elif figure_name == "ranks":
fig.title = "Rank"
fig.xaxis.axis_label = "Rank from all chains"
fig.yaxis.axis_label = "Chain"
fig.width = TRACE_PLOT_WIDTH
fig.yaxis.minor_tick_line_color = None
fig.yaxis.ticker.desired_num_ticks = num_chains
output[figure_name] = fig
return output
def create_glyphs(num_chains: int) -> typing.Glyphs:
"""Create the glyphs used for the figures of the tool.
Parameters
----------
num_chains : int
The number of chains from the model.
Returns
-------
typing.Glyphs
A dictionary of Bokeh Glyphs objects.
"""
global EMPTY_DATA
if not EMPTY_DATA:
EMPTY_DATA = create_empty_data(num_chains=num_chains)
palette = plotting_utils.choose_palette(num_colors=num_chains)
output = {}
for figure_name, figure_data in EMPTY_DATA.items():
output[figure_name] = {}
for i, (chain_name, _) in enumerate(figure_data.items()):
output[figure_name][chain_name] = {}
color = palette[i]
if figure_name == "marginals":
output[figure_name][chain_name]["line"] = {
"glyph": Line(
x="x",
y="y",
line_color=color,
line_alpha=0.7,
line_width=2.0,
name=f"{figure_name}{chain_name.title()}LineGlyph",
),
"hover_glyph": Line(
x="x",
y="y",
line_color=color,
line_alpha=1.0,
line_width=2.0,
name=f"{figure_name}{chain_name.title()}LineHoverGlyph",
),
}
elif figure_name == "forests":
output[figure_name][chain_name] = {
"line": {
"glyph": Line(
x="x",
y="y",
line_color=color,
line_alpha=0.7,
line_width=2.0,
name=f"{figure_name}{chain_name.title()}LineGlyph",
),
"hover_glyph": Line(
x="x",
y="y",
line_color=color,
line_alpha=1.0,
line_width=2.0,
name=f"{figure_name}{chain_name.title()}LineHoverGlyph",
),
},
"circle": {
"glyph": Circle(
x="x",
y="y",
size=10,
fill_color=color,
fill_alpha=0.7,
line_color="white",
name=f"{figure_name}{chain_name.title()}CircleGlyph",
),
"hover_glyph": Circle(
x="x",
y="y",
size=10,
fill_color=color,
fill_alpha=1.0,
line_color="black",
name=f"{figure_name}{chain_name.title()}CircleHoverGlyph",
),
},
}
if figure_name == "traces":
output[figure_name][chain_name]["line"] = {
"glyph": Line(
x="x",
y="y",
line_color=color,
line_alpha=0.6,
line_width=0.6,
name=f"{figure_name}{chain_name.title()}LineGlyph",
),
"hover_glyph": Line(
x="x",
y="y",
line_color=color,
line_alpha=0.6,
line_width=1.0,
name=f"{figure_name}{chain_name.title()}LineHoverGlyph",
),
}
if figure_name == "ranks":
output[figure_name][chain_name] = {
"quad": {
"glyph": Quad(
left="left",
top="top",
right="right",
bottom="bottom",
fill_color=color,
fill_alpha=0.7,
line_color="white",
name=f"{figure_name}{chain_name.title()}QuadGlyph",
),
"hover_glyph": Quad(
left="left",
top="top",
right="right",
bottom="bottom",
fill_color=color,
fill_alpha=1.0,
line_color="black",
name=f"{figure_name}{chain_name.title()}QuadHoverGlyph",
),
},
"line": {
"glyph": Line(
x="x",
y="y",
line_color="grey",
line_alpha=0.7,
line_width=3.0,
line_dash="dashed",
name=f"{figure_name}{chain_name.title()}LineGlyph",
),
"hover_glyph": Line(
x="x",
y="y",
line_color="grey",
line_alpha=1.0,
line_width=3.0,
line_dash="solid",
name=f"{figure_name}{chain_name.title()}LineGlyph",
),
},
}
return output
def add_glyphs(
figures: typing.Figures,
glyphs: typing.Glyphs,
sources: typing.Sources,
) -> None:
"""Bind source data to glyphs and add the glyphs to the given figures.
Parameters
----------
figures : typing.Figures
A dictionary of Bokeh Figure objects.
glyphs : typing.Glyphs
A dictionary of Bokeh Glyphs objects.
sources : typing.Sources
A dictionary of Bokeh ColumnDataSource objects.
Returns
-------
None
Adds data bound glyphs to the given figures directly.
"""
for figure_name, figure_sources in sources.items():
fig = figures[figure_name]
for chain_name, source in figure_sources.items():
chain_glyphs = glyphs[figure_name][chain_name]
# NOTE: Every figure has a line glyph, so we always add it here.
fig.add_glyph(
source_or_glyph=source["line"],
glyph=chain_glyphs["line"]["glyph"],
hover_glyph=chain_glyphs["line"]["hover_glyph"],
name=chain_glyphs["line"]["glyph"].name,
)
# We want to keep the x-axis from moving when changing queries, so we add
# the bounds below from the marginal figure. All figures that need to keep
# its range stable are linked to the marginal figure's range below.
if figure_name == "marginals":
pass
elif figure_name == "forests":
fig.add_glyph(
source_or_glyph=source["circle"],
glyph=chain_glyphs["circle"]["glyph"],
hover_glyph=chain_glyphs["circle"]["hover_glyph"],
name=chain_glyphs["circle"]["glyph"].name,
)
elif figure_name == "ranks":
fig.add_glyph(
source_or_glyph=source["quad"],
glyph=chain_glyphs["quad"]["glyph"],
hover_glyph=chain_glyphs["quad"]["hover_glyph"],
name=chain_glyphs["quad"]["glyph"].name,
)
# Link figure ranges together.
figures["forests"].x_range = figures["marginals"].x_range
def create_annotations(figures: typing.Figures, num_chains: int) -> typing.Annotations:
"""Create any annotations for the figures of the tool.
Parameters
----------
figures : typing.Figures
A dictionary of Bokeh Figure objects.
num_chains : int
The number of chains of the model.
Returns
-------
typing.Annotations
A dictionary of Bokeh Annotation objects.
"""
renderers = []
for _, fig in figures.items():
renderers.extend(PropertyValueList(fig.renderers))
legend_items = []
for chain in range(num_chains):
chain_index = chain + 1
chain_name = f"chain{chain_index}"
legend_items.append(
LegendItem(
renderers=[
renderer
for renderer in renderers
if chain_name in renderer.name.lower()
],
label=chain_name,
),
)
legend = Legend(
items=legend_items,
orientation="horizontal",
border_line_color="black",
click_policy="hide",
)
output = {"traces": {"legend": legend}, "ranks": {"legend": legend}}
return output
def add_annotations(figures: typing.Figures, annotations: typing.Annotations) -> None:
"""Add the given annotations to the given figures of the tool.
Parameters
----------
figures : typing.Figures
A dictionary of Bokeh Figure objects.
annotations : typing.Annotations
A dictionary of Bokeh Annotation objects.
Returns
-------
None
Adds annotations directly to the given figures.
"""
for figure_name, figure_annotations in annotations.items():
fig = figures[figure_name]
for _, annotation in figure_annotations.items():
fig.add_layout(annotation, "below")
def create_tooltips(
rv_name: str,
figures: typing.Figures,
num_chains: int,
) -> typing.Tooltips:
"""Create hover tools for the glyphs used in the figures of the tool.
Parameters
----------
rv_name : str
The string representation of the random variable data.
figures : typing.Figures
A dictionary of Bokeh Figure objects.
num_chains : int
The number of chains of the model.
Returns
-------
typing.Tooltips
A dictionary of Bokeh HoverTools objects.
"""
output = {}
for figure_name, fig in figures.items():
output[figure_name] = []
for chain in range(num_chains):
chain_index = chain + 1
chain_name = f"chain{chain_index}"
if figure_name == "marginals":
glyph_name = f"{figure_name}{chain_name.title()}LineGlyph"
output[figure_name].append(
HoverTool(
renderers=plotting_utils.filter_renderers(fig, glyph_name),
tooltips=[
("Chain", "@chain"),
("Mean", "@mean"),
(rv_name, "@x"),
],
),
)
if figure_name == "forests":
glyph_name = f"{figure_name}{chain_name.title()}CircleGlyph"
output[figure_name].append(
HoverTool(
renderers=plotting_utils.filter_renderers(fig, glyph_name),
tooltips=[
("Chain", "@chain"),
(rv_name, "@x"),
],
),
)
if figure_name == "traces":
glyph_name = f"{figure_name}{chain_name.title()}LineGlyph"
output[figure_name].append(
HoverTool(
renderers=plotting_utils.filter_renderers(fig, glyph_name),
tooltips=[
("Chain", "@chain"),
("Mean", "@mean"),
(rv_name, "@y"),
],
),
)
if figure_name == "ranks":
output[figure_name].append(
{
"line": HoverTool(
renderers=plotting_utils.filter_renderers(
fig,
f"{figure_name}{chain_name.title()}LineGlyph",
),
tooltips=[("Chain", "@chain"), ("Rank mean", "@rankMean")],
),
"quad": HoverTool(
renderers=plotting_utils.filter_renderers(
fig,
f"{figure_name}{chain_name.title()}QuadGlyph",
),
tooltips=[
("Chain", "@chain"),
("Draws", "@draws"),
("Rank", "@rank"),
],
),
},
)
return output
def add_tooltips(figures: typing.Figures, tooltips: typing.Tooltips) -> None:
"""Add the given tools to the figures.
Parameters
----------
figures : typing.Figures
A dictionary of Bokeh Figure objects.
tooltips : typing.Tooltips
A dictionary of Bokeh HoverTools objects.
Returns
-------
None
Adds the tooltips directly to the given figures.
"""
for figure_name, fig in figures.items():
for tips in tooltips[figure_name]:
if figure_name == "ranks":
for _, tips_ in tips.items():
fig.add_tools(tips_)
else:
fig.add_tools(tips)
def create_widgets(rv_names: List[str], rv_name: str) -> typing.Widgets:
"""Create the widgets used in the tool.
Parameters
----------
rv_names : List[str]
A list of all available random variable names.
rv_name : str
The string representation of the random variable data.
Returns
-------
typing.Widgets
A dictionary of Bokeh widget objects.
"""
output = {
"rv_select": Select(value=rv_name, options=rv_names, title="Query"),
"bw_factor_slider": Slider(
start=0.01,
end=2.00,
step=0.01,
value=1.0,
title="Bandwidth factor",
),
"hdi_slider": Slider(start=1, end=99, step=1, value=89, title="HDI"),
}
return output
def help_page() -> Div:
"""Help tab for the tool.
Returns
-------
Div
Bokeh Div widget containing the help tab information.
"""
text = """
<h2>Rank plots</h2>
<p style="margin-bottom: 10px">
Rank plots are a histogram of the samples over time. All samples across
all chains are ranked and then we plot the average rank for each chain on
regular intervals. If the chains are mixing well this histogram should
look roughly uniform. If it looks highly irregular that suggests chains
might be getting stuck and not adequately exploring the sample space.
See the paper by Vehtari <em>et al</em> for more information.
</p>
<h2>Trace plots</h2>
<p style="margin-bottom: 10px">
The more familiar trace plots are also included in this widget. You can
click on the legend to show/hide different chains and compare them to the
rank plots.
</p>
<ul>
<li>
Vehtari A, Gelman A, Simpson D, Carpenter B, Bürkner PC (2021)
<b>
Rank-normalization, folding, and localization: An improved \\(\\hat{R}\\)
for assessing convergence of MCMC (with discussion)
</b>.
<em>Bayesian Analysis</em> 16(2)
667–718.
<a
href=https://dx.doi.org/10.1214/20-BA1221
style="color: blue"
target="_blank"
>
doi: 10.1214/20-BA1221
</a>.
</li>
</ul>
"""
return Div(text=text, disable_math=False, min_width=PLOT_WIDTH)
def create_view(figures: typing.Figures, widgets: typing.Widgets) -> Tabs:
"""Create the tool view.
Parameters
----------
figures : typing.Figures
A dictionary of Bokeh Figure objects.
widgets : typing.Widgets
A dictionary of Bokeh widget objects.
Returns
-------
Tabs
Bokeh Tabs objects.
"""
toolbar = plotting_utils.create_toolbar(list(figures.values()))
help_panel = Panel(child=help_page(), title="Help", name="helpPanel")
marginal_panel = Panel(
child=Column(
children=[figures["marginals"], widgets["bw_factor_slider"]],
sizing_mode="scale_both",
),
title="Marginals",
)
forest_panel = Panel(
child=Column(
children=[figures["forests"], widgets["hdi_slider"]],
sizing_mode="scale_both",
),
title="HDIs",
)
left_panels = Tabs(tabs=[marginal_panel, forest_panel], sizing_mode="scale_both")
trace_panel = Panel(
child=Column(children=[figures["traces"]], sizing_mode="scale_both"),
title="Traces",
)
rank_panel = Panel(
child=Column(children=[figures["ranks"]], sizing_mode="scale_both"),
title="Ranks",
)
right_panels = Tabs(tabs=[trace_panel, rank_panel], sizing_mode="scale_both")
tool_panel = Panel(
child=Column(
children=[
widgets["rv_select"],
Row(
children=[left_panels, right_panels, toolbar],
sizing_mode="scale_both",
),
],
sizing_mode="scale_both",
),
title="Trace tool",
)
return Tabs(tabs=[tool_panel, help_panel], sizing_mode="scale_both")
| beanmachine-main | src/beanmachine/ppl/diagnostics/tools/trace/utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Trace diagnostic tool types for a Bean Machine model."""
from typing import Any, Dict, List, Union
from beanmachine.ppl.diagnostics.tools import NotRequired, TypedDict
from bokeh.models.annotations import Legend
from bokeh.models.glyphs import Circle, Line, Quad
from bokeh.models.sources import ColumnDataSource
from bokeh.models.tools import HoverTool
from bokeh.models.widgets.inputs import Select
from bokeh.models.widgets.sliders import Slider
from bokeh.plotting.figure import Figure
# NOTE: These are the types pyre gives us when using `reveal_type(...)` on the outputs
# of the methods.
Data = Dict[str, Dict[Any, Any]]
Sources = Dict[Any, Any]
Figures = Dict[Any, Any]
Glyphs = Dict[Any, Any]
Annotations = Dict[str, Dict[str, Legend]]
Tooltips = Dict[Any, Any]
Widgets = Dict[str, Union[Select, Slider]]
# NOTE: TypedDict objects are for reference only. Due to the way pyre accesses keys in
# dictionaries, and how NumPy casts arrays when using tolist(), we are unable to
# use them, but they provide semantic information for the different types. We must
# ignore a lot of lines due to the issue discussed here
# https://pyre-check.org/docs/errors/#13-uninitialized-attribute.
class _LineOrCircleGlyphData(TypedDict): # pyre-ignore
x: List[float]
y: List[float]
class _QuadGlyphData(TypedDict): # pyre-ignore
"""Follow the RankHistogram interface in stats/histogram.js."""
left: List[float]
top: List[float]
right: List[float]
bottom: List[float]
chain: List[int]
draws: List[str]
rank: List[float]
class _MarginalDataSingleChain(TypedDict): # pyre-ignore
line: _LineOrCircleGlyphData
chain: int
mean: float
bandwidth: float
class _ForestDataSingleChain(TypedDict): # pyre-ignore
line: _LineOrCircleGlyphData
circle: _LineOrCircleGlyphData
chain: int
mean: float
class _TraceDataSingleChain(TypedDict): # pyre-ignore
line: _LineOrCircleGlyphData
chain: int
mean: float
class _RankDataSingleChain(TypedDict): # pyre-ignore
quad: _QuadGlyphData
line: _LineOrCircleGlyphData
chain: List[int]
rankMean: List[float]
mean: List[float]
_MarginalDataAllChains = Dict[str, _MarginalDataSingleChain]
_ForestDataAllChains = Dict[str, _ForestDataSingleChain]
_TraceDataAllChains = Dict[str, _TraceDataSingleChain]
_RankDataAllChains = Dict[str, _RankDataSingleChain]
class _Data(TypedDict): # pyre-ignore
marginals: _MarginalDataAllChains
forests: _ForestDataAllChains
traces: _TraceDataAllChains
ranks: _RankDataAllChains
class _SourceSingleChain(TypedDict): # pyre-ignore
line: ColumnDataSource
circle: NotRequired[ColumnDataSource]
quad: NotRequired[ColumnDataSource]
_SourceAllChains = Dict[str, _SourceSingleChain]
class _Sources(TypedDict): # pyre-ignore
marginals: _SourceAllChains
forests: _SourceAllChains
traces: _SourceAllChains
ranks: _SourceAllChains
class _Figures(TypedDict): # pyre-ignore
marginals: Figure
forests: Figure
traces: Figure
ranks: Figure
class _RankTooltip(TypedDict): # pyre-ignore
line: HoverTool
quad: HoverTool
class _Tooltips(TypedDict): # pyre-ignore
marginals: List[HoverTool]
forests: List[HoverTool]
traces: List[HoverTool]
ranks: List[_RankTooltip]
class _Glyph(TypedDict): # pyre-ignore
glyph: Union[Circle, Line, Quad]
hover_glyph: Union[Circle, Line, Quad]
class _GlyphSingleChain(TypedDict): # pyre-ignore
line: _Glyph
circle: NotRequired[_Glyph]
quad: NotRequired[_Glyph]
_GlyphAllChains = Dict[str, _GlyphSingleChain]
class _Glyphs(TypedDict): # pyre-ignore
marginals: _GlyphAllChains
forests: _GlyphAllChains
traces: _GlyphAllChains
ranks: _GlyphAllChains
_Annotations = Dict[str, Dict[str, Legend]]
class _Widgets(TypedDict): # pyre-ignore
rv_select: Select
bw_factor_slider: Slider
hdi_slider: Slider
| beanmachine-main | src/beanmachine/ppl/diagnostics/tools/trace/typing.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Trace diagnostic tool for a Bean Machine model."""
from __future__ import annotations
from beanmachine.ppl.diagnostics.tools.trace import utils
from beanmachine.ppl.diagnostics.tools.utils.diagnostic_tool_base import (
DiagnosticToolBaseClass,
)
from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples
from bokeh.models import Model
from bokeh.models.callbacks import CustomJS
class Trace(DiagnosticToolBaseClass):
"""Trace tool.
Args:
mcs (MonteCarloSamples): The return object from running a Bean Machine model.
Attributes:
data (Dict[str, List[List[float]]]): JSON serializable representation of the
given `mcs` object.
rv_names (List[str]): The list of random variables string names for the given
model.
num_chains (int): The number of chains of the model.
num_draws (int): The number of draws of the model for each chain.
palette (List[str]): A list of color values used for the glyphs in the figures.
The colors are specifically chosen from the Colorblind palette defined in
Bokeh.
tool_js (str):The JavaScript callbacks needed to render the Bokeh tool
independently from a Python server.
"""
def __init__(self: Trace, mcs: MonteCarloSamples) -> None:
super(Trace, self).__init__(mcs)
def create_document(self: Trace) -> Model:
# Initialize widget values using Python.
rv_name = self.rv_names[0]
# NOTE: We are going to use Python and Bokeh to render the tool in the notebook
# output cell, however, we WILL NOT use Python to calculate any of the
# statistics displayed in the tool. We do this so we can make the BROWSER
# run all the calculations based on user interactions. If we did not
# employ this strategy, then the initial display a user would receive
# would be calculated by Python, and any subsequent updates would be
# calculated by JavaScript. The side-effect of having two backends
# calculate data could cause the figures to flicker, which would not be a
# good end user experience.
#
# Bokeh 3.0 is implementing an "on load" feature, which would nullify this
# requirement, and until that version is released, we have to employ this
# work-around.
# Create empty Bokeh sources using Python.
sources = utils.create_sources(num_chains=self.num_chains)
# Create empty figures for the tool using Python.
figures = utils.create_figures(rv_name=rv_name, num_chains=self.num_chains)
# Create empty glyphs and attach them to the figures using Python.
glyphs = utils.create_glyphs(num_chains=self.num_chains)
utils.add_glyphs(sources=sources, figures=figures, glyphs=glyphs)
# Create empty annotations and attach them to the figures using Python.
annotations = utils.create_annotations(
figures=figures,
num_chains=self.num_chains,
)
utils.add_annotations(figures=figures, annotations=annotations)
# Create empty tool tips and attach them to the figures using Python.
tooltips = utils.create_tooltips(
figures=figures,
rv_name=rv_name,
num_chains=self.num_chains,
)
utils.add_tooltips(figures=figures, tooltips=tooltips)
# Create the widgets for the tool using Python.
widgets = utils.create_widgets(rv_names=self.rv_names, rv_name=rv_name)
# Create the view of the tool and serialize it into HTML using static resources
# from Bokeh. Embedding the tool in this manner prevents external CDN calls for
# JavaScript resources, and prevents the user from having to know where the
# Bokeh server is.
tool_view = utils.create_view(figures=figures, widgets=widgets)
# Create callbacks for the tool using JavaScript.
callback_js = f"""
const rvName = widgets.rv_select.value;
const rvData = data[rvName];
let bw = 0.0;
// Remove the CSS classes that dim the tool output on initial load.
const toolTab = toolView.tabs[0];
const toolChildren = toolTab.child.children;
const dimmedComponent = toolChildren[1];
dimmedComponent.css_classes = [];
try {{
trace.update(
rvData,
rvName,
bwFactor,
hdiProbability,
sources,
figures,
tooltips,
);
}} catch (error) {{
{self.tool_js}
trace.update(
rvData,
rvName,
bwFactor,
hdiProbability,
sources,
figures,
tooltips,
);
}}
"""
# Each widget requires the following dictionary for the CustomJS method. Notice
# that the callback_js object above uses the names defined as keys in the below
# object with values defined by the Python objects.
callback_arguments = {
"data": self.data,
"widgets": widgets,
"sources": sources,
"figures": figures,
"tooltips": tooltips,
"toolView": tool_view,
}
# Each widget requires slightly different JS.
rv_select_js = f"""
const bwFactor = 1.0;
const hdiProbability = 0.89;
widgets.bw_factor_slider.value = bwFactor;
widgets.hdi_slider.value = 100 * hdiProbability;
{callback_js};
figures.marginals.reset.emit();
"""
slider_js = f"""
const bwFactor = widgets.bw_factor_slider.value;
const hdiProbability = widgets.hdi_slider.value / 100;
{callback_js};
"""
slider_callback = CustomJS(args=callback_arguments, code=slider_js)
rv_select_callback = CustomJS(args=callback_arguments, code=rv_select_js)
# Tell Python to use the JavaScript.
widgets["rv_select"].js_on_change("value", rv_select_callback)
widgets["bw_factor_slider"].js_on_change("value", slider_callback)
widgets["hdi_slider"].js_on_change("value", slider_callback)
return tool_view
| beanmachine-main | src/beanmachine/ppl/diagnostics/tools/trace/tool.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| beanmachine-main | src/beanmachine/ppl/diagnostics/tools/marginal1d/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Methods used to generate the diagnostic tool."""
from typing import List
import numpy as np
from beanmachine.ppl.diagnostics.tools.marginal1d import typing
from beanmachine.ppl.diagnostics.tools.utils import plotting_utils
from bokeh.models.annotations import Band, LabelSet
from bokeh.models.glyphs import Circle, Line
from bokeh.models.layouts import Column, Row
from bokeh.models.sources import ColumnDataSource
from bokeh.models.tools import HoverTool
from bokeh.models.widgets.inputs import Select
from bokeh.models.widgets.markups import Div
from bokeh.models.widgets.panels import Panel, Tabs
from bokeh.models.widgets.sliders import Slider
from bokeh.plotting.figure import figure
PLOT_WIDTH = 500
PLOT_HEIGHT = 500
FIGURE_NAMES = ["marginal", "cumulative"]
# Define what the empty data object looks like in order to make the browser handle all
# computations.
EMPTY_DATA = {
"marginal": {
"distribution": {"x": [], "y": [], "bandwidth": np.NaN},
"hdi": {"base": [], "lower": [], "upper": []},
"stats": {"x": [], "y": [], "text": []},
"labels": {
"x": [],
"y": [],
"text": [],
"text_align": [],
"x_offset": [],
"y_offset": [],
},
},
"cumulative": {
"distribution": {"x": [], "y": [], "bandwidth": np.NaN},
"hdi": {"base": [], "lower": [], "upper": []},
"stats": {"x": [], "y": [], "text": []},
"labels": {
"x": [],
"y": [],
"text": [],
"text_align": [],
"x_offset": [],
"y_offset": [],
},
},
}
SIZING = {
"sizing_mode": "scale_both",
"max_height": PLOT_HEIGHT + 250, # drop down menus and tabs
"max_width": 2 * PLOT_WIDTH + 30, # tool bars
}
def create_sources() -> typing.Sources:
"""Create Bokeh sources from the given data that will be bound to glyphs.
Returns
-------
typing.Sources
A dictionary of Bokeh ColumnDataSource objects.
"""
output = {}
for figure_name, figure_data in EMPTY_DATA.items():
output[figure_name] = {}
for glyph_name, glyph_data in figure_data.items():
if "bandwidth" in list(glyph_data.keys()):
glyph_data.pop("bandwidth")
output[figure_name][glyph_name] = ColumnDataSource(data=glyph_data)
return output
def create_figures(rv_name: str) -> typing.Figures:
"""Create the Bokeh figures used for the tool.
Parameters
----------
rv_name : str
The string representation of the random variable data.
Returns
-------
typing.Figures
A dictionary of Bokeh Figure objects.
"""
output = {}
for figure_name in FIGURE_NAMES:
fig = figure(
max_width=PLOT_WIDTH,
max_height=PLOT_HEIGHT,
outline_line_color="black",
title=f"{figure_name} distribution",
x_axis_label=rv_name,
y_axis_label=None,
sizing_mode="scale_both",
)
fig.yaxis.visible = False
plotting_utils.style_figure(fig)
output[figure_name] = fig
output[FIGURE_NAMES[0]].x_range = output[FIGURE_NAMES[1]].x_range
output[FIGURE_NAMES[0]].y_range = output[FIGURE_NAMES[1]].y_range
return output
def create_glyphs() -> typing.Glyphs:
"""Create the glyphs used for the figures of the tool.
Returns
-------
typing.Glyphs
A dictionary of Bokeh Glyphs objects.
"""
palette = plotting_utils.choose_palette(num_colors=2)
output = {}
for figure_name, figure_data in EMPTY_DATA.items():
output[figure_name] = {}
for glyph_name, _ in figure_data.items():
if glyph_name in ["distribution", "stats"]:
if glyph_name == "distribution":
output[figure_name][glyph_name] = {
"glyph": Line(
x="x",
y="y",
line_color=palette[0],
line_alpha=0.7,
line_width=2.0,
name=f"{figure_name}DistributionGlyph",
),
"hover_glyph": Line(
x="x",
y="y",
line_color=palette[1],
line_alpha=1.0,
line_width=2.0,
name=f"{figure_name}DistributionHoverGlyph",
),
}
if glyph_name == "stats":
output[figure_name][glyph_name] = {
"glyph": Circle(
x="x",
y="y",
size=10,
fill_color=palette[0],
line_color="white",
fill_alpha=1.0,
name=f"{figure_name}StatsGlyph",
),
"hover_glyph": Circle(
x="x",
y="y",
size=10,
fill_color=palette[1],
line_color="black",
fill_alpha=1.0,
name=f"{figure_name}StatsHoverGlyph",
),
}
return output
def add_glyphs(
figures: typing.Figures,
glyphs: typing.Glyphs,
sources: typing.Sources,
) -> None:
"""Bind source data to glyphs and add the glyphs to the given figures.
Parameters
----------
figures : typing.Figures
A dictionary of Bokeh Figure objects.
glyphs : typing.Glyphs
A dictionary of Bokeh Glyphs objects.
sources : typing.Sources
A dictionary of Bokeh ColumnDataSource objects.
Returns
-------
None
Adds data bound glyphs to the given figures directly.
"""
for figure_name, figure_glyphs in glyphs.items():
fig = figures[figure_name]
figure_sources = sources[figure_name]
for glyph_name, glyphs in figure_glyphs.items():
glyph_source = figure_sources[glyph_name]
fig.add_glyph(
source_or_glyph=glyph_source,
glyph=glyphs["glyph"],
hover_glyph=glyphs["hover_glyph"],
name=glyphs["glyph"].name,
)
def create_annotations(sources: typing.Sources) -> typing.Annotations:
"""Create any annotations for the figures of the tool.
Parameters
----------
source : typing.Sources
A dictionary of Bokeh ColumnDataSource objects.
Returns
-------
typing.Annotations
A dictionary of Bokeh Annotation objects.
"""
palette = plotting_utils.choose_palette(num_colors=1)
output = {}
for figure_name, figure_sources in sources.items():
output[figure_name] = {}
for glyph_name, glyph_source in figure_sources.items():
if glyph_name == "hdi":
output[figure_name][glyph_name] = Band(
base="base",
lower="lower",
upper="upper",
source=glyph_source,
level="underlay",
fill_color=palette[0],
fill_alpha=0.2,
line_width=1.0,
line_color="white",
name=f"{figure_name}HdiAnnotation",
)
elif glyph_name == "labels":
output[figure_name][glyph_name] = LabelSet(
x="x",
y="y",
text="text",
x_offset="x_offset",
y_offset="y_offset",
text_align="text_align",
source=glyph_source,
background_fill_color="white",
background_fill_alpha=0.8,
name=f"{figure_name}LabelAnnotation",
)
return output
def add_annotations(figures: typing.Figures, annotations: typing.Annotations) -> None:
"""Add the given annotations to the given figures of the tool.
Parameters
----------
figures : typing.Figures
A dictionary of Bokeh Figure objects.
annotations : typing.Annotations
A dictionary of Bokeh Annotation objects.
Returns
-------
None
Adds annotations directly to the given figures.
"""
for figure_name, annotation_sources in annotations.items():
fig = figures[figure_name]
for _, annotation in annotation_sources.items():
fig.add_layout(annotation)
def create_tooltips(rv_name: str, figures: typing.Figures) -> typing.Tooltips:
"""Create hover tools for the glyphs used in the figures of the tool.
Parameters
----------
rv_name : str
The string representation of the random variable data.
figures : typing.Figures
A dictionary of Bokeh Figure objects.
Returns
-------
typing.Tooltips
A dictionary of Bokeh HoverTools objects.
"""
output = {}
for figure_name, fig in figures.items():
output[figure_name] = {
"distribution": HoverTool(
renderers=plotting_utils.filter_renderers(
figure=fig,
search="DistributionGlyph",
glyph_type="GlyphRenderer",
substring=True,
),
tooltips=[(rv_name, "@x")],
),
"stats": HoverTool(
renderers=plotting_utils.filter_renderers(
figure=fig,
search="StatsGlyph",
glyph_type="GlyphRenderer",
substring=True,
),
tooltips=[("", "@text")],
),
}
return output
def add_tooltips(figures: typing.Figures, tooltips: typing.Tooltips) -> None:
"""Add the given tools to the figures.
Parameters
----------
figures : typing.Figures
A dictionary of Bokeh Figure objects.
tooltips : typing.Tooltips
A dictionary of Bokeh HoverTools objects.
Returns
-------
None
Adds the tooltips directly to the given figures.
"""
for figure_name, figure_tooltips in tooltips.items():
fig = figures[figure_name]
for _, tooltip in figure_tooltips.items():
fig.add_tools(tooltip)
def create_widgets(
rv_name: str,
rv_names: List[str],
bw_factor: float,
bandwidth: float,
) -> typing.Widgets:
"""Create the widgets used in the tool.
Parameters
----------
rv_name : str
The string representation of the random variable data.
rv_names : List[str]
A list of all available random variable names.
bw_factor : float
Multiplicative factor used when calculating the kernel density estimate.
bandwidth : float
The bandwidth used to calculate the KDE.
Returns
-------
typing.Widgets
A dictionary of Bokeh widget objects.
"""
return {
"rv_select": Select(value=rv_name, options=rv_names, title="Query"),
"bw_factor_slider": Slider(
title="Bandwidth factor",
start=0.01,
end=2.00,
value=1.00,
step=0.01,
),
"bw_div": Div(text=f"Bandwidth: {bw_factor * bandwidth}"),
"hdi_slider": Slider(start=1, end=99, step=1, value=89, title="HDI"),
}
def help_page() -> Div:
"""Help tab for the tool.
Returns
-------
Div
Bokeh Div widget containing the help tab information.
"""
text = """
<h2>
Highest density interval
</h2>
<p style="margin-bottom: 10px">
The highest density interval region is not equal tailed like a typical
equal tailed interval of 2.5%. Thus it will include the mode(s) of the
posterior distribution.
</p>
<p style="margin-bottom: 10px">
There is nothing particularly specific about having a default HDI of 89%.
If fact, the only remarkable thing about defaulting to 89% is that it is
the highest prime number that does not exceed the unstable 95% threshold.
See the link to McElreath's book below for further discussion.
</p>
<ul>
<li>
McElreath R (2020)
<b>
Statistical Rethinking: A Bayesian Course with Examples in R and Stan
2nd edition.
</b>
<em>Chapman and Hall/CRC</em>
<a
href=https://dx.doi.org/10.1201/9780429029608
style="color: blue"
target="_blank"
>
doi: 10.1201/9780429029608
</a>.
</li>
</ul>
"""
return Div(text=text, disable_math=False, min_width=PLOT_WIDTH)
def create_figure_grid(figures: typing.Figures) -> Row:
"""Layout the given figures in a grid, and make one toolbar.
Parameters
----------
figures : typing.Figures
A dictionary of Bokeh Figure objects.
Returns
-------
Row
A Bokeh layout object.
"""
toolbar = plotting_utils.create_toolbar(figures=list(figures.values()))
return Row(children=[*list(figures.values()), toolbar], css_classes=["bk-loading"])
def create_view(widgets: typing.Widgets, figures: typing.Figures) -> Tabs:
"""Create the tool view.
Parameters
----------
widgets : typing.Widgets
A dictionary of Bokeh widget objects.
figures : typing.Figures
A dictionary of Bokeh Figure objects.
Returns
-------
Tabs
Bokeh Tabs objects.
"""
help_panel = Panel(child=help_page(), title="Help", name="helpPanel")
fig_child = Column(
children=[
create_figure_grid(figures),
widgets["bw_factor_slider"],
widgets["bw_div"],
widgets["hdi_slider"],
],
css_classes=["bm-tool-loading", "arcs"],
)
fig_child.update_from_json(SIZING)
tool_child = Column(children=[widgets["rv_select"], fig_child])
tool_child.update_from_json(SIZING)
tool_panel = Panel(
child=tool_child,
title="Marginal 1D",
name="toolPanel",
)
tabs = Tabs(tabs=[tool_panel, help_panel])
tabs.update_from_json(SIZING)
return tabs
| beanmachine-main | src/beanmachine/ppl/diagnostics/tools/marginal1d/utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Marginal 1D diagnostic tool types for a Bean Machine model."""
from typing import Any, Dict, List, Union
from beanmachine.ppl.diagnostics.tools import TypedDict
from bokeh.models.annotations import Band, LabelSet
from bokeh.models.glyphs import Circle, Line
from bokeh.models.sources import ColumnDataSource
from bokeh.models.tools import HoverTool
from bokeh.models.widgets.inputs import Select
from bokeh.models.widgets.markups import Div
from bokeh.models.widgets.sliders import Slider
from bokeh.plotting.figure import Figure
# NOTE: These are the types pyre gives us when using `reveal_type(...)` on the outputs
# of the methods.
StatsAndLabelsData = Dict[str, Dict[str, Any]]
HDIData = Dict[str, Any]
Data = Dict[Any, Any]
Sources = Dict[Any, Any]
Figures = Dict[Any, Any]
Glyphs = Dict[Any, Any]
Annotations = Dict[Any, Any]
Tooltips = Dict[Any, Any]
Widgets = Dict[str, Union[Div, Select, Slider]]
# NOTE: TypedDict objects are for reference only. Due to the way pyre accesses keys in
# dictionaries, and how NumPy casts arrays when using tolist(), we are unable to
# use them, but they provide semantic information for the different types. We must
# ignore a lot of lines due to the issue discussed here
# https://pyre-check.org/docs/errors/#13-uninitialized-attribute.
class _DistributionData(TypedDict): # pyre-ignore
x: List[float]
y: List[float]
bandwidth: float
class _HDIData(TypedDict): # pyre-ignore
base: List[float]
lower: List[float]
upper: List[float]
class _StatsData(TypedDict): # pyre-ignore
x: List[float]
y: List[float]
text: List[str]
class _LabelsData(TypedDict): # pyre-ignore
x: List[float]
y: List[float]
text: List[str]
text_align: List[str]
x_offset: List[int]
y_offset: List[int]
class _GlyphData(TypedDict): # pyre-ignore
distribtution: _DistributionData
hdi: _HDIData
stats: _StatsData
labels: _LabelsData
class _Data(TypedDict): # pyre-ignore
marginal: _GlyphData
cumulative: _GlyphData
class _Source(TypedDict): # pyre-ignore
distribution: ColumnDataSource
hdi: ColumnDataSource
stats: ColumnDataSource
labels: ColumnDataSource
class _Sources(TypedDict): # pyre-ignore
marginal: _Source
cumulative: _Source
class _Figures(TypedDict): # pyre-ignore
marginal: Figure
cumulative: Figure
class _DistributionGlyph(TypedDict): # pyre-ignore
glyph: Line
hover_glyph: Line
class _StatsGlyph(TypedDict): # pyre-ignore
glyph: Circle
hover_glyph: Circle
class _FigureGlyphs(TypedDict): # pyre-ignore
distribution: _DistributionGlyph
stats: _StatsGlyph
class _Glyphs(TypedDict): # pyre-ignore
marginal: _FigureGlyphs
cumulative: _FigureGlyphs
class _FigureAnnotations(TypedDict): # pyre-ignore
hdi: Band
labels: LabelSet
class _Annotations(TypedDict): # pyre-ignore
marginal: _FigureAnnotations
cumulative: _FigureAnnotations
class _Tooltip(TypedDict): # pyre-ignore
distribution: HoverTool
stats: HoverTool
class _Tooltips(TypedDict): # pyre-ignore
marginal: _Tooltip
cumulative: _Tooltip
class _Widgets(TypedDict): # pyre-ignore
rv_select: Select
bw_factor_slider: Slider
bw_div: Div
hdi_slider: Slider
| beanmachine-main | src/beanmachine/ppl/diagnostics/tools/marginal1d/typing.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Marginal 1D diagnostic tool for a Bean Machine model."""
from __future__ import annotations
from beanmachine.ppl.diagnostics.tools.marginal1d import utils
from beanmachine.ppl.diagnostics.tools.utils.diagnostic_tool_base import (
DiagnosticToolBaseClass,
)
from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples
from bokeh.models import Model
from bokeh.models.callbacks import CustomJS
class Marginal1d(DiagnosticToolBaseClass):
"""
Marginal 1D diagnostic tool.
Args:
mcs (MonteCarloSamples): The return object from running a Bean Machine model.
Attributes:
data (Dict[str, List[List[float]]]): JSON serializable representation of the
given `mcs` object.
rv_names (List[str]): The list of random variables string names for the given
model.
num_chains (int): The number of chains of the model.
num_draws (int): The number of draws of the model for each chain.
palette (List[str]): A list of color values used for the glyphs in the figures.
The colors are specifically chosen from the Colorblind palette defined in
Bokeh.
tool_js (str):The JavaScript callbacks needed to render the Bokeh tool
independently from a Python server.
"""
def __init__(self: Marginal1d, mcs: MonteCarloSamples) -> None:
super(Marginal1d, self).__init__(mcs)
def create_document(self: Marginal1d) -> Model:
# Initialize widget values using Python.
rv_name = self.rv_names[0]
bw_factor = 1.0
bandwidth = 1.0
# NOTE: We are going to use Python and Bokeh to render the tool in the notebook
# output cell, however, we WILL NOT use Python to calculate any of the
# statistics displayed in the tool. We do this so we can make the BROWSER
# run all the calculations based on user interactions. If we did not
# employ this strategy, then the initial display a user would receive
# would be calculated by Python, and any subsequent updates would be
# calculated by JavaScript. The side-effect of having two backends
# calculate data could cause the figures to flicker, which would not be a
# good end user experience.
#
# Bokeh 3.0 is implementing an "on load" feature, which would nullify this
# requirement, and until that version is released, we have to employ this
# work-around.
# Create empty Bokeh sources using Python.
sources = utils.create_sources()
# Create empty figures for the tool using Python.
figures = utils.create_figures(rv_name=rv_name)
# Create empty glyphs and attach them to the figures using Python.
glyphs = utils.create_glyphs()
utils.add_glyphs(sources=sources, figures=figures, glyphs=glyphs)
# Create empty annotations and attach them to the figures using Python.
annotations = utils.create_annotations(sources=sources)
utils.add_annotations(figures=figures, annotations=annotations)
# Create empty tool tips and attach them to the figures using Python.
tooltips = utils.create_tooltips(figures=figures, rv_name=rv_name)
utils.add_tooltips(figures=figures, tooltips=tooltips)
# Create the widgets for the tool using Python.
widgets = utils.create_widgets(
rv_names=self.rv_names,
rv_name=rv_name,
bandwidth=bandwidth,
bw_factor=bw_factor,
)
# Create the view of the tool and serialize it into HTML using static resources
# from Bokeh. Embedding the tool in this manner prevents external CDN calls for
# JavaScript resources, and prevents the user from having to know where the
# Bokeh server is.
tool_view = utils.create_view(figures=figures, widgets=widgets)
# Create callbacks for the tool using JavaScript.
callback_js = f"""
const rvName = widgets.rv_select.value;
const rvData = data[rvName].flat();
let bw = 0.0;
// Remove the CSS classes that dim the tool output on initial load.
const toolTab = toolView.tabs[0];
const toolChildren = toolTab.child.children;
const dimmedComponent = toolChildren[1];
dimmedComponent.css_classes = [];
try {{
bw = marginal1d.update(
rvData,
rvName,
bwFactor,
hdiProbability,
sources,
figures,
tooltips,
);
}} catch (error) {{
{self.tool_js}
bw = marginal1d.update(
rvData,
rvName,
bwFactor,
hdiProbability,
sources,
figures,
tooltips,
);
}}
"""
# Each widget requires the following dictionary for the CustomJS method. Notice
# that the callback_js object above uses the names defined as keys in the below
# object with values defined by the Python objects.
callback_arguments = {
"data": self.data,
"widgets": widgets,
"sources": sources,
"figures": figures,
"tooltips": tooltips,
"toolView": tool_view,
}
# Each widget requires slightly different JS, except for the sliders.
rv_select_js = f"""
const bwFactor = 1.0;
const hdiProbability = 0.89;
widgets.bw_factor_slider.value = bwFactor;
widgets.hdi_slider.value = 100 * hdiProbability;
{callback_js};
widgets.bw_div.text = `Bandwidth: ${{bwFactor * bw}}`;
figures.marginal.reset.emit();
"""
slider_js = f"""
const bwFactor = widgets.bw_factor_slider.value;
const hdiProbability = widgets.hdi_slider.value / 100;
{callback_js};
widgets.bw_div.text = `Bandwidth: ${{bwFactor * bw}}`;
"""
rv_select_callback = CustomJS(args=callback_arguments, code=rv_select_js)
slider_callback = CustomJS(args=callback_arguments, code=slider_js)
# Tell Python to use the JavaScript.
widgets["rv_select"].js_on_change("value", rv_select_callback)
widgets["bw_factor_slider"].js_on_change("value", slider_callback)
widgets["hdi_slider"].js_on_change("value", slider_callback)
return tool_view
| beanmachine-main | src/beanmachine/ppl/diagnostics/tools/marginal1d/tool.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Plotting utilities for the diagnostic tools."""
from typing import List
from bokeh.core.property.nullable import Nullable
from bokeh.core.property.primitive import Null
from bokeh.core.property.wrappers import PropertyValueList
from bokeh.models.layouts import Column, Row
from bokeh.models.renderers import GlyphRenderer
from bokeh.models.tools import ProxyToolbar, ResetTool, SaveTool, ToolbarBox
from bokeh.palettes import Colorblind
from bokeh.plotting.figure import Figure
def style_figure(figure: Figure) -> None:
"""
Style the given Bokeh `figure`.
Args:
figure (Figure): A Bokeh `Figure` object.
Returns:
None: Styles the given figure without copying.
"""
figure.grid.grid_line_alpha = 0.3
figure.grid.grid_line_color = "grey"
figure.grid.grid_line_width = 0.3
figure.xaxis.minor_tick_line_color = "grey"
figure.yaxis.minor_tick_line_color = "grey"
def choose_palette(num_colors: int) -> List[str]:
"""
Determine which palette from Bokeh's Colorblind to use.
Args:
num_colors (int): The number of colors to use for the palette.
Returns:
List[str]: A list of colors to be used as the palette for a figure.
"""
palette_indices = [key for key in Colorblind.keys() if num_colors <= key]
if not palette_indices:
palette_index = max(Colorblind.keys())
else:
palette_index = min(palette_indices)
return Colorblind[palette_index]
def create_toolbar(figures: List[Figure]) -> ToolbarBox:
"""
Create a single toolbar for the given list of figures.
This method ignores all `HoverTool` entries in the final toolbar object. The
rational for ignoring them is to prevent the final toolbar from having too much
visual clutter.
Args:
figures (List[Figure]): A list of Bokeh `Figure` objects that all have their own
toolbars that will be merged into one.
Returns:
ToolbarBox: The merged toolbar.
"""
toolbars = []
for figure in figures:
toolbars.append(figure.toolbar)
figure.toolbar_location = Nullable(Null)._default
tools = []
for toolbar in toolbars:
tools.extend(toolbar.tools)
tools = [tool for tool in tools if type(tool).__name__ != "HoverTool"]
if len(tools) == 0:
tools = [SaveTool(), ResetTool()]
return ToolbarBox(
toolbar=ProxyToolbar(toolbars=toolbars, tools=tools),
toolbar_location="right",
)
def create_figure_grid(figures: List[Figure], num_figure_columns: int) -> Row:
"""
Similar to Bokeh's `grid_plot` method, except we merge toolbars in this method.
Args:
figures (List[Figure]): A list of Bokeh `Figure` objects.
num_figure_columns (int): The number of columns for the grid.
Returns:
Row: Returns a single Bokeh `Row` object that contains all the given figures.
"""
toolbar = create_toolbar(figures)
figure_rows = []
while len(figures):
figs = figures[:num_figure_columns]
for i, fig in enumerate(figs):
if i != 0:
fig.yaxis.axis_label = None
figure_rows.append(figs)
for fig in figs:
figures.pop(figures.index(fig))
for i, figure_row in enumerate(figure_rows):
if i != len(figure_rows) - 1:
for fig in figure_row:
fig.xaxis.axis_label = None
figure_layout = []
for i in range(len(figure_rows)):
figure_layout.append(Row(children=figure_rows[i]))
return Row(children=[Column(children=figure_layout), toolbar])
def filter_renderers(
figure: Figure,
search: str,
glyph_type: str = "GlyphRenderer",
substring: bool = False,
) -> List[GlyphRenderer]:
"""
Find renderers in the given figure using the `search` string.
Filters renderers from the given figure based on renderer names that match the given
search parameters.
Args:
figure (Figure): A Bokeh `Figure` object.
search (str): A string to filter renderer names with.
glyph_type (:obj:`str`, optional): The type of renderer to search for in the
figure. Default is `GlyphRenderer`.
substring (:obj:`bool`, optional): Flag to indicate if the given `search` string
should be used as a substring search. Default is `False`.
Returns:
List[GlyphRenderer]: A list of renderers that match the search parameters.
"""
output = []
renderers = PropertyValueList(figure.renderers)
for renderer in renderers:
if renderer.name is not None and type(renderer).__name__ == glyph_type:
if substring and search in renderer.name:
output.append(renderer)
if not substring and renderer.name == search:
output.append(renderer)
return output
| beanmachine-main | src/beanmachine/ppl/diagnostics/tools/utils/plotting_utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Accessor definition for extending Bean Machine `MonteCarloSamples` objects.
These methods are heavily influenced by the implementations by pandas and xarray.
- `pandas`: https://github.com/pandas-dev/pandas/blob/main/pandas/core/accessor.py
- `xarray`: https://github.com/pydata/xarray/blob/main/xarray/core/extensions.py
"""
from __future__ import annotations
import contextlib
import warnings
from typing import Callable
from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples
class CachedAccessor:
"""
A descriptor for caching accessors.
Args:
name (str): Namespace for the accessor.
accessor (object): Class that defines the extension methods.
Attributes:
_name (str): Namespace for the accessor.
_accessor (object): Class that defines the extension methods.
Raises:
RuntimeError: Returned if attempting to overwrite an existing accessor on the
object.
"""
def __init__(self: CachedAccessor, name: str, accessor: object) -> None:
self._name = name
self._accessor = accessor
def __get__(self: CachedAccessor, obj: object, cls: object) -> object:
"""
Method to retrieve the accessor namespace.
Args:
obj (object): Object that the accessor is attached to.
cls (object): Needed for registering the accessor.
Returns:
object: The accessor object.
"""
# Accessing an attribute of the class.
if obj is None:
return self._accessor
try:
cache = obj._cache # type: ignore
except AttributeError:
cache = obj._cache = {}
try:
return cache[self._name]
except KeyError:
contextlib.suppress(KeyError)
try:
accessor_obj = self._accessor(obj) # type: ignore
except Exception as error:
msg = f"error initializing {self._name!r} accessor."
raise RuntimeError(msg) from error
cache[self._name] = accessor_obj
return accessor_obj # noqa: R504 (unnecessary variable assignment)
def _register_accessor(name: str, cls: object) -> Callable:
"""
Method used for registering an accessor to a given object.
Args:
name (str): The name for the accessor.
cls (object): The object the accessor should be attached to.
Returns:
Callable: A decorator for creating accessors.
Raises:
RuntimeError: Returned if attempting to overwrite an existing accessor on the
object.
"""
def decorator(accessor: object) -> object:
if hasattr(cls, name):
warnings.warn(
f"registration of accessor {repr(accessor)} under name "
f"{repr(name)} for type {repr(cls)} is overriding a preexisting "
f"attribute with the same name.",
UserWarning,
stacklevel=2,
)
setattr(cls, name, CachedAccessor(name, accessor))
return accessor
return decorator
def register_mcs_accessor(name: str) -> Callable:
"""
Register an accessor object for `MonteCarloSamples` objects.
Args:
name (str): The name for the accessor.
Returns:
Callable: A decorator for creating the `MonteCarloSamples` accessor.
Raises:
RuntimeError: Returned if attempting to overwrite an existing accessor on the
object.
Example:
>>> from __future__ import annotations
>>> from typing import Dict, List
>>>
>>> import beanmachine.ppl as bm
>>> import numpy as np
>>> import torch.distributions as dist
>>> from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples
>>> from beanmachine.ppl.diagnostics.tools.utils import accessor
>>> from torch import tensor
>>>
>>> @bm.random_variable
>>> def alpha():
>>> return dist.Normal(0, 1)
>>>
>>> @bm.random_variable
>>> def beta():
>>> return dist.Normal(0, 1)
>>>
>>> @accessor.register_mcs_accessor("magic")
>>> class MagicAccessor:
>>> def __init__(self: MagicAccessor, mcs: MonteCarloSamples) -> None:
>>> self.mcs = mcs
>>> def show_me(self: MagicAccessor) -> Dict[str, List[List[float]]]:
>>> # Return a JSON serializable object from a MonteCarloSamples object.
>>> return dict(
>>> sorted(
>>> {
>>> str(key): value.tolist()
>>> for key, value in self.mcs.items()
>>> }.items(),
>>> key=lambda item: item[0],
>>> ),
>>> )
>>>
>>> chain_results = {
>>> beta(): tensor([4, 3], [2, 1]),
>>> alpha(): tensor([[1, 2], [3, 4]]),
>>> }
>>> samples = MonteCarloSamples(chain_results=chain_results)
>>> samples.magic.show_me()
{'alpha()': [[1, 2], [3, 4]], 'beta()': [[4, 3], [2, 1]]}
"""
return _register_accessor(name, MonteCarloSamples)
| beanmachine-main | src/beanmachine/ppl/diagnostics/tools/utils/accessor.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Base class for diagnostic tools of a Bean Machine model."""
from __future__ import annotations
import re
from abc import ABC, abstractmethod
from typing import Any, Mapping
from beanmachine.ppl.diagnostics.tools import JS_DIST_DIR
from beanmachine.ppl.diagnostics.tools.utils import plotting_utils
from beanmachine.ppl.diagnostics.tools.utils.model_serializers import serialize_bm
from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples
from bokeh.embed import file_html, json_item
from bokeh.models import Model
from bokeh.resources import INLINE
class DiagnosticToolBaseClass(ABC):
"""
Base class for visual diagnostic tools.
Args:
mcs (MonteCarloSamples): The return object from running a Bean Machine model.
Attributes:
data (Dict[str, List[List[float]]]): JSON serializable representation of the
given `mcs` object.
rv_names (List[str]): The list of random variables string names for the given
model.
num_chains (int): The number of chains of the model.
num_draws (int): The number of draws of the model for each chain.
palette (List[str]): A list of color values used for the glyphs in the figures.
The colors are specifically chosen from the Colorblind palette defined in
Bokeh.
tool_js (str):The JavaScript callbacks needed to render the Bokeh tool
independently from a Python server.
"""
@abstractmethod
def __init__(self: DiagnosticToolBaseClass, mcs: MonteCarloSamples) -> None:
self.data = serialize_bm(mcs)
self.rv_names = ["Select a random variable..."] + list(self.data.keys())
self.num_chains = mcs.num_chains
self.num_draws = mcs.get_num_samples()
self.palette = plotting_utils.choose_palette(self.num_chains)
self.tool_js = self.load_tool_js()
def load_tool_js(self: DiagnosticToolBaseClass) -> str:
"""
Load the JavaScript for the diagnostic tool.
Tools must be built by `yarn` in order for this method to find the appropriate
file. If no file is found, then the tools will not function, and an error will
be shown to the user.
Returns:
str: A string containing all the JavaScript necessary to run the tool in a
notebook.
Raises:
FileNotFoundError: Raised if the diagnostic tool has not been built by
`yarn` prior to its use.
"""
name = self.__class__.__name__
name_tokens = re.findall(r"[A-Z][^A-Z]*", name)
name = "_".join(name_tokens)
path = JS_DIST_DIR.joinpath(f"{name.lower()}.js")
with path.open() as f:
tool_js = f.read()
return tool_js
def show(self: DiagnosticToolBaseClass) -> None:
"""
Show the diagnostic tool in the notebook.
This method uses IPython's `display` and `HTML` methods in order to display the
diagnostic tool in a notebook. The Bokeh `Model` object returned by the
`create_document` method is converted to HTML using Bokeh's `file_html` method.
The advantage of encapsulating the tool in this manner is that it allows all the
JavaScript needed to render the tool to exist in the output cell of the
notebook. Doing so will allow the Bokeh Application to render in Google's Colab
or Meta's Bento notebooks, which do not allow calls to third party JavaScript to
be loaded and executed. The disadvantage is that it embeds duplicate JavaScript
if more than one tool is used in a notebook.
"""
# import Ipython only when we need to render the plot, so that we don't
# need to have jupyter notebook as one of our dependencies
from IPython.display import display, HTML
doc = self.create_document()
html = file_html(doc, resources=INLINE, template=self.html_template())
display(HTML(html))
def html_template(self: DiagnosticToolBaseClass) -> str:
"""
HTML template object used to inject CSS styles for Bokeh Applications.
We inject CSS into the output for the diagnostic tools because we need users to
interact with the tool before it renders any statistics. The reason for this is
due to the lack of a callback between Bokeh and BokehJS for an "on load" event.
The necessary callback for an "on load" event is being worked on for the Bokeh
3.0 release. Until Bokeh 3.0 is released, this is a work-around that makes the
user interact with the tool before any rendering occurs.
Returns:
str: Template for injecting CSS in the HTML returned by `create_document`.
"""
return """
{% block postamble %}
<style>
.bk.bm-tool-loading {
overflow: hidden;
}
.bk.bm-tool-loading:before {
position: absolute;
height: 100%;
width: 100%;
content: '';
z-index: 1000;
background-color: rgb(255, 255, 255, 0.75);
border-color: lightgray;
background-repeat: no-repeat;
background-position: center;
background-size: auto 50%;
border-width: 1px;
cursor: progress|;
}
.bk.bm-tool-loading.arcs:hover:before {
content: "Please select a Query from the Select menu above.";
font: x-large Arial, sans-serif;
color: black;
cursor: progress;
display: flex;
justify-content: center;
align-items: center;
}
</style>
{% endblock %}
"""
@abstractmethod
def create_document(self: DiagnosticToolBaseClass) -> Model:
"""To be implemented by the inheriting class."""
...
def _tool_json(self: DiagnosticToolBaseClass) -> Mapping[Any, Any]:
"""
Debugging method used primarily when creating a new diagnostic tool.
Returns:
Dict[Any, Any]: Creates a JSON serializable object using Bokeh's `json_item`
method and the output from `create_document`.
"""
doc = self.create_document()
json_data = json_item(doc)
return json_data
| beanmachine-main | src/beanmachine/ppl/diagnostics/tools/utils/diagnostic_tool_base.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| beanmachine-main | src/beanmachine/ppl/diagnostics/tools/utils/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Collection of serializers for the diagnostics tool use."""
from typing import Dict, List
from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples
def serialize_bm(samples: MonteCarloSamples) -> Dict[str, List[List[float]]]:
"""
Convert Bean Machine models to a JSON serializable object.
Args:
samples (MonteCarloSamples): Output of a model from Bean Machine.
Returns
Dict[str, List[List[float]]]: The JSON serializable object for use in the
diagnostics tools.
"""
rv_identifiers = list(samples.keys())
reshaped_data = {}
for rv_identifier in rv_identifiers:
rv_data = samples[rv_identifier]
rv_shape = rv_data.shape
num_rv_chains = rv_shape[0]
reshaped_data[f"{str(rv_identifier)}"] = []
for rv_chain in range(num_rv_chains):
chain_data = rv_data[rv_chain, :]
chain_shape = chain_data.shape
if len(chain_shape) > 3 and 1 not in list(chain_shape):
msg = (
"Unable to handle data with dimensionality larger than " "mxnxkx1."
)
raise ValueError(msg)
elif len(chain_shape) == 3 and 1 in list(chain_shape):
if chain_shape[1] == 1 in list(chain_shape):
reshape_dimensions = chain_shape[2]
else:
reshape_dimensions = chain_shape[1]
for i, reshape_dimension in enumerate(range(reshape_dimensions)):
data = rv_data[rv_chain, :, reshape_dimension].reshape(-1)
if f"{str(rv_identifier)}[{i}]" not in reshaped_data:
reshaped_data[f"{str(rv_identifier)}[{i}]"] = []
reshaped_data[f"{str(rv_identifier)}[{i}]"].append(data.tolist())
elif len(chain_shape) == 1:
reshaped_data[f"{str(rv_identifier)}"].append(
rv_data[rv_chain, :].tolist(),
)
model = dict(sorted(reshaped_data.items(), key=lambda item: item[0]))
return model
| beanmachine-main | src/beanmachine/ppl/diagnostics/tools/utils/model_serializers.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from beanmachine.ppl.inference.proposer.single_site_uniform_proposer import (
SingleSiteUniformProposer,
)
from beanmachine.ppl.inference.single_site_inference import SingleSiteInference
class SingleSiteUniformMetropolisHastings(SingleSiteInference):
"""
Single site uniform Metropolis-Hastings. This single site algorithm proposes
from a uniform distribution (uniform Categorical for discrete variables).
"""
def __init__(self):
super().__init__(SingleSiteUniformProposer)
| beanmachine-main | src/beanmachine/ppl/inference/single_site_uniform_mh.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import (
SingleSiteAncestralProposer,
)
from beanmachine.ppl.inference.single_site_inference import SingleSiteInference
class SingleSiteAncestralMetropolisHastings(SingleSiteInference):
def __init__(self):
super().__init__(SingleSiteAncestralProposer)
| beanmachine-main | src/beanmachine/ppl/inference/single_site_ancestral_mh.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional, Union
import torch
from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples
from beanmachine.ppl.inference.single_site_ancestral_mh import (
SingleSiteAncestralMetropolisHastings,
)
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world import init_from_prior, RVDict, World
from torch import Tensor
from torch.distributions import Categorical
from tqdm.auto import trange
def _concat_rv_dicts(rvdict: List) -> Dict:
out_dict = {}
keys = list(rvdict[0].keys())
for k in keys:
t = []
for x in rvdict:
t.append(x[k])
out_dict[k] = torch.cat(t, -1).squeeze(0)
return out_dict
class Predictive(object):
"""
Class for the posterior predictive distribution.
"""
@staticmethod
def _extract_values_from_world(
world: World, queries: List[RVIdentifier]
) -> Dict[RVIdentifier, Tensor]:
query_dict = {query: [] for query in queries}
# Extract samples
for query in queries:
raw_val = world.call(query)
if not isinstance(raw_val, torch.Tensor):
raise TypeError(
"The value returned by a queried function must be a tensor."
)
query_dict[query].append(raw_val)
query_dict = {node: torch.stack(val) for node, val in query_dict.items()}
return query_dict
@staticmethod # noqa: C901
def simulate( # noqa: C901
queries: List[RVIdentifier],
posterior: Optional[Union[MonteCarloSamples, RVDict]] = None,
num_samples: Optional[int] = None,
vectorized: Optional[bool] = False,
progress_bar: Optional[bool] = True,
) -> MonteCarloSamples:
"""
Generates predictives from a generative model.
For example::
obs_queries = [likelihood(i) for i in range(10))]
posterior = SinglesiteHamiltonianMonteCarlo(10, 0.1).infer(...)
# generates one sample per world (same shape as `posterior` samples)
predictives = simulate(obs_queries, posterior=posterior)
To generate prior predictives::
queries = [prior(), likelihood()] # specify the full generative model
# Monte carlo samples of shape (num_samples, sample_shape)
predictives = simulate(queries, num_samples=1000)
:param query: list of `random_variable`'s corresponding to the observations.
:param posterior: Optional `MonteCarloSamples` or `RVDict` of the latent variables.
:param num_samples: Number of prior predictive samples, defaults to 1. Should
not be specified if `posterior` is specified.
:returns: `MonteCarloSamples` of the generated predictives.
"""
assert (
(posterior is not None) + (num_samples is not None)
) == 1, "Only one of posterior or num_samples should be set."
inference = SingleSiteAncestralMetropolisHastings()
if posterior is not None:
if isinstance(posterior, dict):
posterior = MonteCarloSamples([posterior])
obs = dict(posterior)
if vectorized:
sampler = inference.sampler(
queries, obs, num_samples, initialize_fn=init_from_prior
)
query_dict = Predictive._extract_values_from_world(
next(sampler), queries
)
for rvid, rv in query_dict.items():
if rv.dim() > 2:
query_dict[rvid] = rv.squeeze(0)
post_pred = MonteCarloSamples(
query_dict,
default_namespace="posterior_predictive",
)
post_pred.add_groups(posterior)
return post_pred
else:
# predictives are sequentially sampled
preds = []
for c in range(posterior.num_chains):
rv_dicts = []
for i in trange(
posterior.get_num_samples(),
desc="Samples collected",
disable=not progress_bar,
):
obs = {rv: posterior.get_chain(c)[rv][i] for rv in posterior}
sampler = inference.sampler(
queries, obs, num_samples, initialize_fn=init_from_prior
)
rv_dicts.append(
Predictive._extract_values_from_world(
next(sampler), queries
)
)
preds.append(_concat_rv_dicts(rv_dicts))
post_pred = MonteCarloSamples(
preds,
default_namespace="posterior_predictive",
)
post_pred.add_groups(posterior)
return post_pred
else:
obs = {}
predictives = []
for _ in trange(
# pyre-fixme[6]: For 1st param expected `int` but got `Optional[int]`.
num_samples,
desc="Samples collected",
disable=not progress_bar,
):
sampler = inference.sampler(
queries, obs, num_samples, initialize_fn=init_from_prior
)
query_dict = Predictive._extract_values_from_world(
next(sampler), queries
)
predictives.append(query_dict)
rv_dict = {}
for k in predictives:
for rvid, rv in k.items():
if rvid not in rv_dict:
rv_dict[rvid] = []
if rv.dim() < 2:
rv = rv.unsqueeze(0)
rv_dict[rvid].append(rv)
for k, v in rv_dict.items():
rv_dict[k] = torch.cat(v, dim=1)
prior_pred = MonteCarloSamples(
rv_dict,
default_namespace="prior_predictive",
)
return prior_pred
@staticmethod
def empirical(
queries: List[RVIdentifier],
samples: MonteCarloSamples,
num_samples: Optional[int] = 1,
) -> MonteCarloSamples:
"""
Samples from the empirical (marginal) distribution of the queried variables.
:param queries: list of `random_variable`'s to be sampled.
:param samples: `MonteCarloSamples` of the distribution.
:param num_samples: Number of samples to sample (with replacement). Defaults to 1.
:returns: `MonteCarloSamples` object containing the sampled random variables.
"""
rv_dict = {}
num_chains = samples.num_chains
total_num_samples = samples.get_num_samples()
chain_indices = Categorical(torch.ones(num_chains)).sample((num_samples,))
sample_indices = Categorical(torch.ones(total_num_samples)).sample(
(num_samples,)
)
for q in queries:
rv_dict[q] = samples.get_variable(q, include_adapt_steps=False)[
chain_indices, sample_indices
]
return MonteCarloSamples([rv_dict])
simulate = Predictive.simulate
empirical = Predictive.empirical
| beanmachine-main | src/beanmachine/ppl/inference/predictive.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import inspect
from collections import defaultdict
from typing import (
Callable,
cast,
Dict,
List,
Optional,
Set,
Tuple,
TYPE_CHECKING,
Union,
)
from beanmachine.ppl.experimental.torch_jit_backend import get_backend
from beanmachine.ppl.inference.base_inference import BaseInference
from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer
from beanmachine.ppl.inference.proposer.nuts_proposer import NUTSProposer
from beanmachine.ppl.inference.proposer.sequential_proposer import SequentialProposer
from beanmachine.ppl.inference.proposer.single_site_uniform_proposer import (
SingleSiteUniformProposer,
)
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world import World
if TYPE_CHECKING:
from enum import Enum
class EllipsisClass(Enum):
Ellipsis = "..."
def __iter__(self):
pass
Ellipsis: EllipsisClass = EllipsisClass.Ellipsis
else:
EllipsisClass = type(Ellipsis)
class _DefaultInference(BaseInference):
"""
Mixed inference class that handles both discrete and continuous RVs
"""
def __init__(self, nnc_compile: bool = True):
self._disc_proposers = {}
self._cont_proposer = None
self._continuous_rvs = set()
self._jit_backend = get_backend(nnc_compile, False)
def get_proposers(
self,
world: World,
target_rvs: Set[RVIdentifier],
num_adaptive_sample: int,
) -> List[BaseProposer]:
proposers = []
for node in target_rvs:
if node not in self._disc_proposers:
support = world.get_variable(node).distribution.support
if not support.is_discrete:
self._continuous_rvs.add(node)
continue
else:
self._disc_proposers[node] = SingleSiteUniformProposer(node)
proposers.append(self._disc_proposers[node])
if self._cont_proposer is not None:
if len(self._cont_proposer._target_rvs) != len(self._continuous_rvs):
raise ValueError(
"Graph has changed between iterations. NUTS requires a"
" static model."
)
proposers.append(self._cont_proposer)
else:
if len(self._continuous_rvs):
continuous_proposer = NUTSProposer(
world,
self._continuous_rvs,
num_adaptive_sample,
jit_backend=self._jit_backend,
)
self._cont_proposer = continuous_proposer
proposers.append(self._cont_proposer)
return proposers
def _get_rv_family(rv_wrapper: Callable) -> Callable:
"""A helper function that return the unbounded function for a give random variable
wrapper"""
if inspect.ismethod(rv_wrapper):
# For methods, we'll need to use the unbounded function instead of the
# bounded method to determine which proposer to apply
return cast(Callable, rv_wrapper.__func__)
else:
return rv_wrapper
def _get_nodes_for_rv_family(
rv_families: Union[Callable, Tuple[Callable, ...]],
rv_family_to_node: Dict[Callable, Set[RVIdentifier]],
) -> Set[RVIdentifier]:
"""A helper function that returns a list of nodes that belong to a particular RV
family (or a particular tuple of RV families)"""
# collect all nodes that belong to rv_families
families = {rv_families} if isinstance(rv_families, Callable) else set(rv_families)
nodes = set().union(*(rv_family_to_node.get(family, set()) for family in families))
return nodes
class CompositionalInference(BaseInference):
"""
The ``CompositionalInference`` class enables combining multiple inference algorithms
and blocking random variables together. By default, continuous variables will be
blocked together and use the ``GlobalNoUTurnProposer``. Discrete variables will
be proposed independently with ``SingleSiteUniformProposer``.
To override the default behavior, you can pass an ``inference_dict``. To learn more
about Compositional Inference, please see the `Compositional Inference
<https://beanmachine.org/docs/compositional_inference/>`_ page on our website.
Example 0 (use different inference method for different random variable families)::
CompositionalInference({
model.foo: bm.SingleSiteAncestralMetropolisHastings(),
model.bar: bm.SingleSiteNewtonianMonteCarlo(),
})
Example 1 (override default inference method)::
CompositionalInference({...: bm.SingleSiteAncestralMetropolisHastings()})
Example 2 (block inference (jointly propose) ``model.foo`` and ``model.bar``)::
CompositionalInference({(model.foo, model.bar): bm.GlobalNoUTurnSampler()})
.. warning::
When using the default inference behavior, graphs (i.e. the number of latent variables)
must be static and cannot change between iterations.
Args:
inference_dict: an optional inference configuration as shown above.
nnc_compile: where available, use NNC to compile proposers.
"""
def __init__(
self,
inference_dict: Optional[
Dict[
Union[Callable, Tuple[Callable, ...], EllipsisClass],
Union[BaseInference, Tuple[BaseInference, ...], EllipsisClass],
]
] = None,
nnc_compile: bool = True,
):
self.config: Dict[Union[Callable, Tuple[Callable, ...]], BaseInference] = {}
# create a set for the RV families that are being covered in the config; this is
# useful in get_proposers to determine which RV needs to be handle by the
# default inference method
self._covered_rv_families = set()
default_inference = _DefaultInference(nnc_compile=nnc_compile)
if inference_dict is not None:
default_inference = inference_dict.pop(Ellipsis, default_inference)
assert isinstance(default_inference, BaseInference)
# preprocess inference dict
for rv_families, inference in inference_dict.items():
# parse key
if isinstance(rv_families, Callable):
config_key = _get_rv_family(rv_families)
self._covered_rv_families.add(config_key)
else:
# key is a tuple/block of families
config_key = tuple(map(_get_rv_family, rv_families))
self._covered_rv_families.update(config_key)
# parse value
if isinstance(inference, BaseInference):
config_val = inference
elif inference == Ellipsis:
config_val = default_inference
else:
# value is a tuple of inferences
assert isinstance(inference, tuple)
# there should be a one to one relationship between key and value
assert isinstance(config_key, tuple) and len(config_key) == len(
inference
)
# convert to an equivalent nested compositional inference
config_val = CompositionalInference(
{
rv_family: algorithm
for rv_family, algorithm in zip(config_key, inference)
}
)
self.config[config_key] = config_val
self._default_inference = default_inference
def _get_default_num_adaptive_samples(self, num_samples: int) -> int:
"""Returns the default number of adaptive samples for CompositionalInference,
which equals to the maximum number of adaptive samples recommended by each
algorithm in the inference config."""
num_adaptive_per_algorithm = [
self._default_inference._get_default_num_adaptive_samples(num_samples)
]
for inference in self.config.values():
num_adaptive_per_algorithm.append(
inference._get_default_num_adaptive_samples(num_samples)
)
return max(num_adaptive_per_algorithm)
def get_proposers(
self,
world: World,
target_rvs: Set[RVIdentifier],
num_adaptive_sample: int,
) -> List[BaseProposer]:
# create a RV family to RVIdentifier lookup map
rv_family_to_node = defaultdict(set)
for node in target_rvs:
rv_family_to_node[node.wrapper].add(node)
all_proposers = []
for target_families, inference in self.config.items():
nodes = _get_nodes_for_rv_family(target_families, rv_family_to_node)
if len(nodes) > 0:
proposers = inference.get_proposers(world, nodes, num_adaptive_sample)
if isinstance(target_families, tuple):
# tuple of RVs == block into a single accept/reject step
proposers = [SequentialProposer(proposers)]
all_proposers.extend(proposers)
# apply default proposers on nodes whose family are not covered by any of the
# proposers listed in the config
remaining_families = rv_family_to_node.keys() - self._covered_rv_families
remaining_nodes = _get_nodes_for_rv_family(
tuple(remaining_families), rv_family_to_node
)
if len(remaining_nodes) > 0:
proposers = self._default_inference.get_proposers(
world, remaining_nodes, num_adaptive_sample
)
all_proposers.extend(proposers)
return all_proposers
| beanmachine-main | src/beanmachine/ppl/inference/compositional_infer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import warnings
from abc import ABCMeta, abstractmethod
from functools import partial
from typing import List, Optional, Set, Tuple
import torch
from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples
from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer
from beanmachine.ppl.inference.sampler import Sampler
from beanmachine.ppl.inference.utils import (
_execute_in_new_thread,
_verify_queries_and_observations,
seed as set_seed,
VerboseLevel,
)
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world import init_to_uniform, InitializeFn, RVDict, World
from torch import multiprocessing as mp
from tqdm.auto import tqdm
from tqdm.notebook import tqdm as notebook_tqdm
from typing_extensions import Literal
class BaseInference(metaclass=ABCMeta):
"""
Abstract class all inference methods should inherit from.
"""
# maximum value of a seed
_MAX_SEED_VAL: int = 2**32 - 1
@abstractmethod
def get_proposers(
self,
world: World,
target_rvs: Set[RVIdentifier],
num_adaptive_sample: int,
) -> List[BaseProposer]:
"""
Returns the proposer(s) corresponding to every non-observed variable
in target_rvs. Should be implemented by the specific inference algorithm.
"""
raise NotImplementedError
def _get_default_num_adaptive_samples(self, num_samples: int) -> int:
"""
Returns a reasonable default number of adaptive samples for the algorithm.
"""
return 0
def _single_chain_infer(
self,
queries: List[RVIdentifier],
observations: RVDict,
num_samples: int,
num_adaptive_samples: int,
show_progress_bar: bool,
initialize_fn: InitializeFn,
max_init_retries: int,
chain_id: int,
seed: Optional[int] = None,
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
"""
Run a single chain of inference. Return a list of samples (in the same order as
the queries) and a list of log likelihood on observations
Args:
queries: A list of queries.
observations: A dictionary of observations.
num_samples: Number of samples.
num_adaptive_samples: Number of adaptive samples.
show_progress_bar: Whether to display the progress bar.
initialize_fn: A callable that takes in a distribution and returns a Tensor.
max_init_retries: The number of attempts to make to initialize values for an
inference before throwing an error.
chain_id: The index of the current chain.
seed: If provided, the seed will be used to initialize the state of the
random number generators for the current chain
"""
if seed is not None:
set_seed(seed)
# A hack to fix the issue where tqdm doesn't render progress bar correctly in
# subprocess in Jupyter notebook (https://github.com/tqdm/tqdm/issues/485)
if show_progress_bar and issubclass(tqdm, notebook_tqdm):
print(" ", end="", flush=True)
sampler = self.sampler(
queries,
observations,
num_samples,
num_adaptive_samples,
initialize_fn,
max_init_retries,
)
samples = [[] for _ in queries]
log_likelihoods = [[] for _ in observations]
# Main inference loop
for world in tqdm(
sampler,
total=num_samples + num_adaptive_samples,
desc="Samples collected",
disable=not show_progress_bar,
position=chain_id,
):
for idx, obs in enumerate(observations):
log_likelihoods[idx].append(world.log_prob([obs]))
# Extract samples
for idx, query in enumerate(queries):
raw_val = world.call(query)
if not isinstance(raw_val, torch.Tensor):
raise TypeError(
"The value returned by a queried function must be a tensor."
)
samples[idx].append(raw_val)
samples = [torch.stack(val) for val in samples]
log_likelihoods = [torch.stack(val) for val in log_likelihoods]
return samples, log_likelihoods
def infer(
self,
queries: List[RVIdentifier],
observations: RVDict,
num_samples: int,
num_chains: int = 4,
num_adaptive_samples: Optional[int] = None,
show_progress_bar: bool = True,
initialize_fn: InitializeFn = init_to_uniform,
max_init_retries: int = 100,
run_in_parallel: bool = False,
mp_context: Optional[Literal["fork", "spawn", "forkserver"]] = None,
verbose: Optional[VerboseLevel] = None,
) -> MonteCarloSamples:
"""
Performs inference and returns a ``MonteCarloSamples`` object with samples from the posterior.
Args:
queries: List of queries
observations: Observations as an RVDict keyed by RVIdentifier
num_samples: Number of samples.
num_chains: Number of chains to run, defaults to 4.
num_adaptive_samples: Number of adaptive samples. If not provided, BM will
fall back to algorithm-specific default value based on num_samples.
show_progress_bar: Whether to display the progress bar, defaults to True.
initialize_fn: A callable that takes in a distribution and returns a Tensor.
The default behavior is to sample from Uniform(-2, 2) then biject to
the support of the distribution.
max_init_retries: The number of attempts to make to initialize values for an
inference before throwing an error (default to 100).
run_in_parallel: Whether to run multiple chains in parallel (with multiple
processes).
mp_context: The `multiprocessing context <https://docs.python.org/3.8/library/multiprocessing.html#contexts-and-start-methods>`_
to used for parallel inference.
verbose: (Deprecated) Whether to display the progress bar. This option
is deprecated, please use ``show_progress_bar`` instead.
"""
if verbose is not None:
warnings.warn(
"The `verbose` argument and `VerboseLevel` are "
"deprecated and will be removed in the next release of Bean "
"Machine. Please use `show_progress_bar` instead.",
DeprecationWarning,
stacklevel=2, # show the caller rather than this line
)
show_progress_bar = bool(verbose)
_verify_queries_and_observations(
queries, observations, observations_must_be_rv=True
)
if num_adaptive_samples is None:
num_adaptive_samples = self._get_default_num_adaptive_samples(num_samples)
single_chain_infer = partial(
self._single_chain_infer,
queries,
observations,
num_samples,
num_adaptive_samples,
show_progress_bar,
initialize_fn,
max_init_retries,
)
if not run_in_parallel:
chain_results = map(single_chain_infer, range(num_chains))
else:
ctx = mp.get_context(mp_context)
# We'd like to explicitly set a different seed for each process to avoid
# duplicating the same RNG state for all chains
first_seed = torch.randint(self._MAX_SEED_VAL, ()).item()
seeds = [
(first_seed + 31 * chain_id) % self._MAX_SEED_VAL
for chain_id in range(num_chains)
]
# run single chain inference in a new thread in subprocesses to avoid
# forking corrupted internal states
# (https://github.com/pytorch/pytorch/issues/17199)
single_chain_infer = partial(_execute_in_new_thread, single_chain_infer)
with ctx.Pool(
processes=num_chains, initializer=tqdm.set_lock, initargs=(ctx.Lock(),)
) as p:
chain_results = p.starmap(single_chain_infer, enumerate(seeds))
all_samples, all_log_liklihoods = zip(*chain_results)
# the hash of RVIdentifier can change when it is being sent to another process,
# so we have to rely on the order of the returned list to determine which samples
# correspond to which RVIdentifier
all_samples = [dict(zip(queries, samples)) for samples in all_samples]
# in python the order of keys in a dict is fixed, so we can rely on it
all_log_liklihoods = [
dict(zip(observations.keys(), log_likelihoods))
for log_likelihoods in all_log_liklihoods
]
return MonteCarloSamples(
all_samples,
num_adaptive_samples,
all_log_liklihoods,
observations,
)
def sampler(
self,
queries: List[RVIdentifier],
observations: RVDict,
num_samples: Optional[int] = None,
num_adaptive_samples: Optional[int] = None,
initialize_fn: InitializeFn = init_to_uniform,
max_init_retries: int = 100,
) -> Sampler:
"""
Returns a generator that returns a new world (represents a new state of the
graph) each time it is iterated. If num_samples is not provided, this method
will return an infinite generator.
Args:
queries: List of queries
observations: Observations as an RVDict keyed by RVIdentifier
num_samples: Number of samples, defaults to None for an infinite sampler.
num_adaptive_samples: Number of adaptive samples. If not provided, BM will
fall back to algorithm-specific default value based on num_samples. If
num_samples is not provided either, then defaults to 0.
initialize_fn: A callable that takes in a distribution and returns a Tensor.
The default behavior is to sample from Uniform(-2, 2) then biject to
the support of the distribution.
max_init_retries: The number of attempts to make to initialize values for an
inference before throwing an error (default to 100).
"""
_verify_queries_and_observations(
queries, observations, observations_must_be_rv=True
)
if num_adaptive_samples is None:
if num_samples is None:
num_adaptive_samples = 0
else:
num_adaptive_samples = self._get_default_num_adaptive_samples(
num_samples
)
world = World.initialize_world(
queries,
observations,
initialize_fn,
max_init_retries,
)
# start inference with a copy of self to ensure that multi-chain or multi
# inference runs all start with the same pristine state
kernel = copy.deepcopy(self)
sampler = Sampler(kernel, world, num_samples, num_adaptive_samples)
return sampler
| beanmachine-main | src/beanmachine/ppl/inference/base_inference.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import List, Set
import torch.distributions as dist
from beanmachine.ppl.inference.base_inference import BaseInference
from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer
from beanmachine.ppl.inference.proposer.nmc import (
SingleSiteHalfSpaceNMCProposer,
SingleSiteRealSpaceNMCProposer,
SingleSiteSimplexSpaceNMCProposer,
)
from beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import (
SingleSiteAncestralProposer,
)
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world import World
from beanmachine.ppl.world.utils import BetaDimensionTransform, is_constraint_eq
LOGGER = logging.getLogger("beanmachine")
class SingleSiteNewtonianMonteCarlo(BaseInference):
"""
Single site Newtonian Monte Carlo [1]. This algorithm selects a proposer
based on the support of the random variable. Valid supports include real, positive real, and simplex.
Each site is proposed independently.
[1] Arora, Nim, et al. `Newtonian Monte Carlo: single-site MCMC meets second-order gradient methods`
Args:
real_space_alpha: alpha value for real space as specified in [1], defaults to 10.0
real_space_beta: beta value for real space as specified in [1], defaults to 1.0
"""
def __init__(
self,
real_space_alpha: float = 10.0,
real_space_beta: float = 1.0,
):
self._proposers = {}
self.alpha = real_space_alpha
self.beta = real_space_beta
def get_proposers(
self,
world: World,
target_rvs: Set[RVIdentifier],
num_adaptive_sample: int,
) -> List[BaseProposer]:
proposers = []
for node in target_rvs:
if node not in self._proposers:
self._proposers[node] = self._init_nmc_proposer(node, world)
proposers.append(self._proposers[node])
return proposers
def _init_nmc_proposer(self, node: RVIdentifier, world: World) -> BaseProposer:
"""
A helper function that initialize a NMC proposer for the given node. The type
of NMC proposer will be chosen based on a node's support.
"""
distribution = world.get_variable(node).distribution
support = distribution.support
if is_constraint_eq(support, dist.constraints.real):
return SingleSiteRealSpaceNMCProposer(node, self.alpha, self.beta)
elif any(
is_constraint_eq(
support,
(dist.constraints.greater_than, dist.constraints.greater_than_eq),
)
):
return SingleSiteHalfSpaceNMCProposer(node)
elif is_constraint_eq(support, dist.constraints.simplex) or (
isinstance(support, dist.constraints.independent)
and (support.base_constraint == dist.constraints.unit_interval)
):
return SingleSiteSimplexSpaceNMCProposer(node)
elif isinstance(distribution, dist.Beta):
return SingleSiteSimplexSpaceNMCProposer(
node, transform=BetaDimensionTransform()
)
else:
LOGGER.warning(
f"Node {node} has unsupported constraints. "
+ "Proposer falls back to SingleSiteAncestralProposer.\n"
)
return SingleSiteAncestralProposer(node)
| beanmachine-main | src/beanmachine/ppl/inference/single_site_nmc.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from beanmachine.ppl.inference.bmg_inference import BMGInference
from beanmachine.ppl.inference.compositional_infer import CompositionalInference
from beanmachine.ppl.inference.hmc_inference import (
GlobalHamiltonianMonteCarlo,
SingleSiteHamiltonianMonteCarlo,
)
from beanmachine.ppl.inference.nuts_inference import (
GlobalNoUTurnSampler,
SingleSiteNoUTurnSampler,
)
from beanmachine.ppl.inference.predictive import empirical, simulate
from beanmachine.ppl.inference.single_site_ancestral_mh import (
SingleSiteAncestralMetropolisHastings,
)
from beanmachine.ppl.inference.single_site_nmc import SingleSiteNewtonianMonteCarlo
from beanmachine.ppl.inference.single_site_random_walk import SingleSiteRandomWalk
from beanmachine.ppl.inference.single_site_uniform_mh import (
SingleSiteUniformMetropolisHastings,
)
from beanmachine.ppl.inference.utils import seed, VerboseLevel
__all__ = [
"BMGInference",
"CompositionalInference",
"GlobalHamiltonianMonteCarlo",
"GlobalNoUTurnSampler",
"SingleSiteAncestralMetropolisHastings",
"SingleSiteHamiltonianMonteCarlo",
"SingleSiteNewtonianMonteCarlo",
"SingleSiteNoUTurnSampler",
"SingleSiteRandomWalk",
"SingleSiteUniformMetropolisHastings",
"VerboseLevel",
"empirical",
"seed",
"simulate",
]
| beanmachine-main | src/beanmachine/ppl/inference/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Set
from beanmachine.ppl.experimental.torch_jit_backend import get_backend
from beanmachine.ppl.inference.base_inference import BaseInference
from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer
from beanmachine.ppl.inference.proposer.hmc_proposer import HMCProposer
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world import World
class GlobalHamiltonianMonteCarlo(BaseInference):
"""
Global (multi-site) Hamiltonian Monte Carlo [1] sampler. This global sampler blocks
all of the target random_variables in the World together and proposes them jointly.
[1] Neal, Radford. `MCMC Using Hamiltonian Dynamics`.
Args:
trajectory_length (float): Length of single trajectory.
initial_step_size (float): Defaults to 1.0.
adapt_step_size (bool): Whether to adapt the step size, Defaults to True,
adapt_mass_matrix (bool): Whether to adapt the mass matrix. Defaults to True,
target_accept_prob (float): Target accept prob. Increasing this value would lead
to smaller step size. Defaults to 0.8.
nnc_compile: If True, NNC compiler will be used to accelerate the
inference.
experimental_inductor_compile: If True, TorchInductor will be used to
accelerate the inference.
"""
def __init__(
self,
trajectory_length: float,
initial_step_size: float = 1.0,
adapt_step_size: bool = True,
adapt_mass_matrix: bool = True,
full_mass_matrix: bool = False,
target_accept_prob: float = 0.8,
nnc_compile: bool = False,
experimental_inductor_compile: bool = False,
):
self.trajectory_length = trajectory_length
self.initial_step_size = initial_step_size
self.adapt_step_size = adapt_step_size
self.adapt_mass_matrix = adapt_mass_matrix
self.full_mass_matrix = full_mass_matrix
self.target_accept_prob = target_accept_prob
self.jit_backend = get_backend(nnc_compile, experimental_inductor_compile)
self._proposer = None
def _get_default_num_adaptive_samples(self, num_samples: int) -> int:
return num_samples // 2
def get_proposers(
self,
world: World,
target_rvs: Set[RVIdentifier],
num_adaptive_sample: int,
) -> List[BaseProposer]:
if self._proposer is None:
self._proposer = HMCProposer(
world,
target_rvs,
num_adaptive_sample,
self.trajectory_length,
self.initial_step_size,
self.adapt_step_size,
self.adapt_mass_matrix,
self.full_mass_matrix,
self.target_accept_prob,
self.jit_backend,
)
return [self._proposer]
class SingleSiteHamiltonianMonteCarlo(BaseInference):
"""
Single site Hamiltonian Monte Carlo [1] sampler. During inference, each random
variable is proposed through its own leapfrog trajectory while fixing the rest of
World as constant.
[1] Neal, Radford. `MCMC Using Hamiltonian Dynamics`.
Args:
trajectory_length (float): Length of single trajectory.
initial_step_size (float): Defaults to 1.0.
adapt_step_size (bool): Whether to adapt the step size, Defaults to True,
adapt_mass_matrix (bool): Whether to adapt the mass matrix. Defaults to True,
target_accept_prob (float): Target accept prob. Increasing this value would lead
to smaller step size. Defaults to 0.8.
nnc_compile: If True, NNC compiler will be used to accelerate the
inference.
experimental_inductor_compile: If True, TorchInductor will be used to
accelerate the inference.
"""
def __init__(
self,
trajectory_length: float,
initial_step_size: float = 1.0,
adapt_step_size: bool = True,
adapt_mass_matrix: bool = True,
full_mass_matrix: bool = False,
target_accept_prob: float = 0.8,
nnc_compile: bool = True,
experimental_inductor_compile: bool = False,
):
self.trajectory_length = trajectory_length
self.initial_step_size = initial_step_size
self.adapt_step_size = adapt_step_size
self.adapt_mass_matrix = adapt_mass_matrix
self.full_mass_matrix = full_mass_matrix
self.target_accept_prob = target_accept_prob
self.jit_backend = get_backend(nnc_compile, experimental_inductor_compile)
self._proposers = {}
def _get_default_num_adaptive_samples(self, num_samples: int) -> int:
return num_samples // 2
def get_proposers(
self,
world: World,
target_rvs: Set[RVIdentifier],
num_adaptive_sample: int,
) -> List[BaseProposer]:
proposers = []
for node in target_rvs:
if node not in self._proposers:
self._proposers[node] = HMCProposer(
world,
{node},
num_adaptive_sample,
self.trajectory_length,
self.initial_step_size,
self.adapt_step_size,
self.adapt_mass_matrix,
self.full_mass_matrix,
self.target_accept_prob,
self.jit_backend,
)
proposers.append(self._proposers[node])
return proposers
| beanmachine-main | src/beanmachine/ppl/inference/hmc_inference.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Set, Type
from beanmachine.ppl.inference.base_inference import BaseInference
from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer
from beanmachine.ppl.inference.proposer.base_single_site_mh_proposer import (
BaseSingleSiteMHProposer,
)
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world import World
class SingleSiteInference(BaseInference):
"""
Base class for single site inference algorithms.
Args:
proposer_class: Class of proposer to initialize with
"""
def __init__(self, proposer_class: Type[BaseSingleSiteMHProposer], **kwargs):
self.proposer_class = proposer_class
self.inference_args = kwargs
self._proposers = {}
def get_proposers(
self,
world: World,
target_rvs: Set[RVIdentifier],
num_adaptive_sample: int,
) -> List[BaseProposer]:
proposers = []
for node in target_rvs:
if node not in self._proposers:
self._proposers[node] = self.proposer_class( # pyre-ignore [45]
node, **self.inference_args
)
proposers.append(self._proposers[node])
return proposers
| beanmachine-main | src/beanmachine/ppl/inference/single_site_inference.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Set
from beanmachine.ppl.experimental.torch_jit_backend import get_backend
from beanmachine.ppl.inference.base_inference import BaseInference
from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer
from beanmachine.ppl.inference.proposer.nuts_proposer import NUTSProposer
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world import World
class GlobalNoUTurnSampler(BaseInference):
"""
Global No U-turn sampler [1]. This sampler blocks multiple variables together in the
World and samples them jointly. This sampler adaptively sets the hyperparameters of
the HMC kernel.
[1] Hoffman and Gelman. `The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo`.
[2] Betancourt, Michael. `A Conceptual Introduction to Hamiltonian Monte Carlo`.
Args:
max_tree_depth (int): Maximum tree depth, defaults to 10.
max_delta_energy (float): Maximum delta energy (for numerical stability),
defaults to 1000.
initial_step_size (float): Defaults to 1.0.
adapt_step_size (bool): Whether to adapt step size with Dual averaging as
suggested in [1], defaults to True.
adapt_mass_matrix (bool) Whether to adapt mass matrix using Welford Scheme,
defaults to True.
multinomial_sampling (bool): Whether to use multinomial sampling as in [2],
defaults to True.
target_accept_prob (float): Target accept probability. Increasing this would
lead to smaller step size. Defaults to 0.8.
nnc_compile: If True, NNC compiler will be used to accelerate the
inference.
experimental_inductor_compile: If True, TorchInductor will be used to
accelerate the inference.
"""
def __init__(
self,
max_tree_depth: int = 10,
max_delta_energy: float = 1000.0,
initial_step_size: float = 1.0,
adapt_step_size: bool = True,
adapt_mass_matrix: bool = True,
full_mass_matrix: bool = False,
multinomial_sampling: bool = True,
target_accept_prob: float = 0.8,
nnc_compile: bool = True,
experimental_inductor_compile: bool = False,
):
self.max_tree_depth = max_tree_depth
self.max_delta_energy = max_delta_energy
self.initial_step_size = initial_step_size
self.adapt_step_size = adapt_step_size
self.adapt_mass_matrix = adapt_mass_matrix
self.full_mass_matrix = full_mass_matrix
self.multinomial_sampling = multinomial_sampling
self.target_accept_prob = target_accept_prob
self.jit_backend = get_backend(nnc_compile, experimental_inductor_compile)
self._proposer = None
def _get_default_num_adaptive_samples(self, num_samples: int) -> int:
return num_samples // 2
def get_proposers(
self,
world: World,
target_rvs: Set[RVIdentifier],
num_adaptive_sample: int,
) -> List[BaseProposer]:
if self._proposer is None:
self._proposer = NUTSProposer(
world,
target_rvs,
num_adaptive_sample,
self.max_tree_depth,
self.max_delta_energy,
self.initial_step_size,
self.adapt_step_size,
self.adapt_mass_matrix,
self.full_mass_matrix,
self.multinomial_sampling,
self.target_accept_prob,
self.jit_backend,
)
return [self._proposer]
class SingleSiteNoUTurnSampler(BaseInference):
"""
Single site No U-turn sampler [1]. This sampler proposes value for each random
variable in the World one at a time. This sampler adaptively sets the
hyperparameters of the HMC kernel.
[1] Hoffman and Gelman. `The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo`.
[2] Betancourt, Michael. `A Conceptual Introduction to Hamiltonian Monte Carlo`.
Args:
max_tree_depth (int): Maximum tree depth, defaults to 10.
max_delta_energy (float): Maximum delta energy (for numerical stability),
defaults to 1000.
initial_step_size (float): Defaults to 1.0.
adapt_step_size (bool): Whether to adapt step size with Dual averaging as
suggested in [1], defaults to True.
adapt_mass_matrix (bool) Whether to adapt mass matrix using Welford Scheme,
defaults to True.
multinomial_sampling (bool): Whether to use multinomial sampling as in [2],
defaults to True.
target_accept_prob (float): Target accept probability. Increasing this would
lead to smaller step size. Defaults to 0.8.
nnc_compile: If True, NNC compiler will be used to accelerate the
inference.
experimental_inductor_compile: If True, TorchInductor will be used to
accelerate the inference.
"""
def __init__(
self,
max_tree_depth: int = 10,
max_delta_energy: float = 1000.0,
initial_step_size: float = 1.0,
adapt_step_size: bool = True,
adapt_mass_matrix: bool = True,
full_mass_matrix: bool = False,
multinomial_sampling: bool = True,
target_accept_prob: float = 0.8,
nnc_compile: bool = False,
experimental_inductor_compile: bool = False,
):
self.max_tree_depth = max_tree_depth
self.max_delta_energy = max_delta_energy
self.initial_step_size = initial_step_size
self.adapt_step_size = adapt_step_size
self.adapt_mass_matrix = adapt_mass_matrix
self.full_mass_matrix = full_mass_matrix
self.multinomial_sampling = multinomial_sampling
self.target_accept_prob = target_accept_prob
self.jit_backend = get_backend(nnc_compile, experimental_inductor_compile)
self._proposers = {}
def _get_default_num_adaptive_samples(self, num_samples: int) -> int:
return num_samples // 2
def get_proposers(
self,
world: World,
target_rvs: Set[RVIdentifier],
num_adaptive_sample: int,
) -> List[BaseProposer]:
proposers = []
for node in target_rvs:
if node not in self._proposers:
self._proposers[node] = NUTSProposer(
world,
{node},
num_adaptive_sample,
self.max_tree_depth,
self.max_delta_energy,
self.initial_step_size,
self.adapt_step_size,
self.adapt_mass_matrix,
self.full_mass_matrix,
self.multinomial_sampling,
self.target_accept_prob,
self.jit_backend,
)
proposers.append(self._proposers[node])
return proposers
| beanmachine-main | src/beanmachine/ppl/inference/nuts_inference.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Iterator, List, Mapping, NamedTuple, Optional, Union
import arviz as az
import torch
import xarray as xr
from beanmachine.ppl.inference.utils import detach_samples, merge_dicts
from beanmachine.ppl.model.rv_identifier import RVIdentifier
RVDict = Dict[RVIdentifier, torch.Tensor]
class Samples(NamedTuple):
samples: RVDict
adaptive_samples: RVDict
class MonteCarloSamples(Mapping[RVIdentifier, torch.Tensor]):
"""
Represents a view of the data representing the results of infer
If no chain is specified, the data across all chains is accessible
If a chain is specified, only the data from the chain will be accessible
"""
def __init__(
self,
chain_results: Union[List[RVDict], RVDict],
num_adaptive_samples: int = 0,
logll_results: Optional[Union[List[RVDict], RVDict]] = None,
observations: Optional[RVDict] = None,
stack_not_cat: bool = True,
default_namespace: str = "posterior",
):
self.namespaces = {}
self.default_namespace = default_namespace
if default_namespace not in self.namespaces:
self.namespaces[default_namespace] = {}
if isinstance(chain_results, list):
self.num_chains = len(chain_results)
chain_results = merge_dicts(chain_results, 0, stack_not_cat)
else:
self.num_chains = next(iter(chain_results.values())).shape[0]
self.num_adaptive_samples = num_adaptive_samples
self.namespaces[default_namespace] = Samples({}, {})
for rv, val in chain_results.items():
self.adaptive_samples[rv] = val[:, :num_adaptive_samples]
self.samples[rv] = val[:, num_adaptive_samples:]
if logll_results is not None:
if isinstance(logll_results, list):
logll = merge_dicts(logll_results, 0, stack_not_cat)
else:
logll = logll_results
self.log_likelihoods = {}
self.adaptive_log_likelihoods = {}
for rv, val in logll.items():
self.adaptive_log_likelihoods[rv] = val[:, :num_adaptive_samples]
self.log_likelihoods[rv] = val[:, num_adaptive_samples:]
else:
self.log_likelihoods = None
self.adaptive_log_likelihoods = None
self.observations = observations
# single_chain_view is only set when self.get_chain is called
self.single_chain_view = False
@property
def samples(self):
return self.namespaces[self.default_namespace].samples
@property
def adaptive_samples(self):
return self.namespaces[self.default_namespace].adaptive_samples
def __getitem__(self, rv: RVIdentifier) -> torch.Tensor:
"""
:param rv: random variable to view values of
:results: samples drawn during inference for the specified variable
"""
return self.get_variable(rv, include_adapt_steps=False)
def __iter__(self) -> Iterator[RVIdentifier]:
return iter(self.samples)
def __len__(self) -> int:
return len(self.samples)
def __str__(self) -> str:
return str(self.samples)
def get_chain(self, chain: int = 0) -> "MonteCarloSamples":
"""
Return a MonteCarloSamples with restricted view to a specified chain
:param chain: specific chainto view.
:returns: view of the data restricted to specified chain
"""
if self.single_chain_view:
raise ValueError(
"The current MonteCarloSamples object has already been"
" restricted to a single chain"
)
elif chain < 0 or chain >= self.num_chains:
raise IndexError("Please specify a valid chain")
samples = {rv: self.get_variable(rv, True)[[chain]] for rv in self}
if self.log_likelihoods is None:
logll = None
else:
logll = {
rv: self.get_log_likelihoods(rv, True)[[chain]]
for rv in self.log_likelihoods
}
new_mcs = MonteCarloSamples(
chain_results=samples,
num_adaptive_samples=self.num_adaptive_samples,
logll_results=logll,
observations=self.observations,
default_namespace=self.default_namespace,
)
new_mcs.single_chain_view = True
return new_mcs
def get_variable(
self,
rv: RVIdentifier,
include_adapt_steps: bool = False,
thinning: int = 1,
namespace: Optional[str] = None,
) -> torch.Tensor:
"""
Let C be the number of chains,
S be the number of samples
If include_adapt_steps, S' = S.
Else, S' = S - num_adaptive_samples.
if no chain specified:
samples[var] returns a Tensor of (C, S', (shape of Var))
if a chain is specified:
samples[var] returns a Tensor of (S', (shape of Var))
:param rv: random variable to see samples
:param include_adapt_steps: Indicates whether the beginning of the
chain should be included with the healthy samples.
:returns: samples drawn during inference for the specified variable
"""
if not isinstance(rv, RVIdentifier):
raise TypeError(
"The key is required to be a random variable "
+ f"but is of type {type(rv).__name__}."
)
if namespace is None:
namespace = self.default_namespace
samples = self.namespaces[namespace].samples[rv]
if include_adapt_steps:
samples = torch.cat(
[self.namespaces[namespace].adaptive_samples[rv], samples],
dim=1,
)
if thinning > 1:
samples = samples[:, ::thinning]
if self.single_chain_view:
samples = samples.squeeze(0)
return samples
def get_log_likelihoods(
self,
rv: RVIdentifier,
include_adapt_steps: bool = False,
) -> torch.Tensor:
"""
:returns: log_likelihoods computed during inference for the specified variable
"""
if not isinstance(rv, RVIdentifier):
raise TypeError(
"The key is required to be a random variable "
+ f"but is of type {type(rv).__name__}."
)
logll = self.log_likelihoods[rv]
if include_adapt_steps:
logll = torch.cat([self.adaptive_log_likelihoods[rv], logll], dim=1)
if self.single_chain_view:
logll = logll.squeeze(0)
return logll
def get(
self,
rv: RVIdentifier,
default: Any = None,
chain: Optional[int] = None,
include_adapt_steps: bool = False,
thinning: int = 1,
):
"""
Return the value of the random variable if rv is in the dictionary, otherwise
return the default value. This method is analogous to Python's dict.get(). The
chain and include_adapt_steps parameters serve the same purpose as in get_chain
and get_variable.
"""
if rv not in self.samples:
return default
if chain is None:
samples = self
else:
samples = self.get_chain(chain)
return samples.get_variable(rv, include_adapt_steps, thinning)
def get_num_samples(self, include_adapt_steps: bool = False) -> int:
"""
:returns: the number of samples run during inference
"""
num_samples = next(iter(self.samples.values())).shape[1]
if include_adapt_steps:
return num_samples + self.num_adaptive_samples
return num_samples
def to_xarray(self, include_adapt_steps: bool = False) -> xr.Dataset:
"""
Return an xarray.Dataset from MonteCarloSamples.
"""
inference_data = self.to_inference_data(include_adapt_steps)
if not include_adapt_steps:
return inference_data["posterior"]
else:
return xr.concat(
[inference_data["warmup_posterior"], inference_data["posterior"]],
dim="draw",
)
def add_groups(self, mcs: "MonteCarloSamples"):
if self.observations is None:
self.observations = mcs.observations
if self.log_likelihoods is None:
self.log_likelihoods = mcs.log_likelihoods
if self.adaptive_log_likelihoods is None:
self.adaptive_log_likelihoods = mcs.adaptive_log_likelihoods
for n in mcs.namespaces:
if n not in self.namespaces:
self.namespaces[n] = mcs.namespaces[n]
def to_inference_data(self, include_adapt_steps: bool = False) -> az.InferenceData:
"""
Return an az.InferenceData from MonteCarloSamples.
"""
if "posterior" in self.namespaces:
posterior = detach_samples(self.namespaces["posterior"].samples)
if self.num_adaptive_samples > 0:
warmup_posterior = detach_samples(
self.namespaces["posterior"].adaptive_samples
)
else:
warmup_posterior = None
else:
posterior = None
warmup_posterior = None
if self.num_adaptive_samples > 0:
warmup_log_likelihood = self.adaptive_log_likelihoods
if warmup_log_likelihood is not None:
warmup_log_likelihood = detach_samples(warmup_log_likelihood)
else:
warmup_log_likelihood = None
if "posterior_predictive" in self.namespaces:
posterior_predictive = detach_samples(
self.namespaces["posterior_predictive"].samples
)
if self.num_adaptive_samples > 0:
warmup_posterior_predictive = detach_samples(
self.namespaces["posterior"].adaptive_samples
)
else:
warmup_posterior_predictive = None
else:
posterior_predictive = None
warmup_posterior_predictive = None
if "prior_predictive" in self.namespaces:
prior_predictive = detach_samples(
self.namespaces["prior_predictive"].samples
)
else:
prior_predictive = None
if self.log_likelihoods is not None:
log_likelihoods = detach_samples(self.log_likelihoods)
else:
log_likelihoods = None
if self.observations is not None:
observed_data = detach_samples(self.observations)
else:
observed_data = None
return az.from_dict(
posterior=posterior,
warmup_posterior=warmup_posterior,
posterior_predictive=posterior_predictive,
warmup_posterior_predictive=warmup_posterior_predictive,
prior_predictive=prior_predictive,
save_warmup=include_adapt_steps,
warmup_log_likelihood=warmup_log_likelihood,
log_likelihood=log_likelihoods,
observed_data=observed_data,
)
| beanmachine-main | src/beanmachine/ppl/inference/monte_carlo_samples.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
from concurrent.futures import ThreadPoolExecutor
from enum import Enum
from typing import Any, Callable, Dict, List
import numpy as np
import numpy.random
import torch
from beanmachine.ppl.model.rv_identifier import RVIdentifier
RVDict = Dict[RVIdentifier, torch.Tensor]
# Detect and report if a user fails to meet the inference contract.
def _verify_queries(queries: List[RVIdentifier]) -> None:
if not isinstance(queries, list):
t = type(queries).__name__
raise TypeError(
f"Parameter 'queries' is required to be a list but is of type {t}."
)
for query in queries:
if not isinstance(query, RVIdentifier):
t = type(query).__name__
raise TypeError(
f"A query is required to be a random variable but is of type {t}."
)
for arg in query.arguments:
if isinstance(arg, RVIdentifier):
raise TypeError(
"The arguments to a query must not be random variables."
)
def _verify_observations(
observations: Dict[RVIdentifier, torch.Tensor], must_be_rv: bool
) -> None:
if not isinstance(observations, dict):
t = type(observations).__name__
raise TypeError(
f"Parameter 'observations' is required to be a dictionary but is of type {t}."
)
for rv, value in observations.items():
if not isinstance(rv, RVIdentifier):
t = type(rv).__name__
raise TypeError(
f"An observation is required to be a random variable but is of type {t}."
)
if not isinstance(value, torch.Tensor):
t = type(value).__name__
raise TypeError(
f"An observed value is required to be a tensor but is of type {t}."
)
if must_be_rv and rv.is_functional:
raise TypeError(
"An observation must observe a random_variable, not a functional."
)
for arg in rv.arguments:
if isinstance(arg, RVIdentifier):
raise TypeError(
"The arguments to an observation must not be random variables."
)
def _verify_queries_and_observations(
queries: List[RVIdentifier],
observations: Dict[RVIdentifier, torch.Tensor],
observations_must_be_rv: bool,
) -> None:
_verify_queries(queries)
_verify_observations(observations, observations_must_be_rv)
class VerboseLevel(Enum):
"""
Enum class which is used to set how much output is printed during inference.
LOAD_BAR enables tqdm for full inference loop.
"""
OFF = 0
LOAD_BAR = 1
def safe_log_prob_sum(distrib, value: torch.Tensor) -> torch.Tensor:
"Computes log_prob, converting out of support exceptions to -Infinity."
try:
return distrib.log_prob(value).sum()
except (RuntimeError, ValueError) as e:
if not distrib.support.check(value).all():
return torch.tensor(float("-Inf")).to(value.device)
else:
raise e
def merge_dicts(
dicts: List[RVDict], dim: int = 0, stack_not_cat: bool = True
) -> RVDict:
"""
A helper function that merge multiple dicts of samples into a single dictionary,
stacking across a new dimension
"""
rv_keys = set().union(*(rv_dict.keys() for rv_dict in dicts))
for idx, d in enumerate(dicts):
if not rv_keys.issubset(d.keys()):
raise ValueError(f"{rv_keys - d.keys()} are missing in dict {idx}")
if stack_not_cat:
return {rv: torch.stack([d[rv] for d in dicts], dim=dim) for rv in rv_keys}
else:
return {rv: torch.cat([d[rv] for d in dicts], dim=dim) for rv in rv_keys}
def seed(seed: int) -> None:
torch.manual_seed(seed)
random.seed(seed)
numpy.random.seed(seed)
def _execute_in_new_thread(f: Callable, *args, **kwargs) -> Any:
"""A helper function to execute the given function in a new thread. This is used
to resolve the deadlock issue with fork-based multiprocessing (see this PyTorch
issue for details
<https://github.com/pytorch/pytorch/issues/17199#issuecomment-833226969>_)"""
with ThreadPoolExecutor() as executor:
return executor.submit(f, *args, **kwargs).result()
def detach_samples(
samples: Dict[RVIdentifier, torch.Tensor],
) -> Dict[RVIdentifier, np.ndarray]:
"""Detach pytorch tensors.
Args:
samples (Dict[RVIdentifier, torch.Tensor]): Dictionary of RVIdentifiers with
original torch tensors.
Returns:
Dict[RVIdentifier, np.ndarray]: Dictionary of RVIdentifiers with converted
NumPy arrays.
"""
return {key: value.detach().numpy() for key, value in samples.items()}
| beanmachine-main | src/beanmachine/ppl/inference/utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import random
import warnings
from types import TracebackType
from typing import Generator, NoReturn, Optional, Type, TYPE_CHECKING
import torch
if TYPE_CHECKING:
from beanmachine.ppl.inference.base_inference import BaseInference
from beanmachine.ppl.world import World
class Sampler(Generator[World, Optional[World], None]):
"""
Samplers are generators of Worlds that generate samples from the joint.
It is used to generate Monte Carlo samples during MCMC inference.
At each iteration, the proposer(s) proposer a values for the random variables, which
are then accepted according to the MH ratio. The next world is then returned.
Args:
kernel (BaseInference): Inference class to get proposers from.
initial_world (World): Optional initial world to initialize from.
num_samples (int, Optional): Number of samples. If none is specified, num_samples = inf.
num_adaptive_samples (int, Optional): Number of adaptive samples, defaults to 0.
"""
def __init__(
self,
kernel: BaseInference,
initial_world: World,
num_samples: Optional[int] = None,
num_adaptive_samples: int = 0,
):
self.kernel = kernel
self.world = initial_world
self._num_samples_remaining = (
float("inf") if num_samples is None else num_samples
)
self._num_samples_remaining += num_adaptive_samples
self._num_adaptive_sample_remaining = num_adaptive_samples
def send(self, world: Optional[World] = None) -> World:
"""
At each iteration, the following is executed:
1. Shuffle all the proposers in the world.
2. For each proposer, propose a world and accept/reject it based on MH ratio.
3. Run adaptation method if applicable.
4. Update the new current world as `self.world`.
Args:
world: Optional World to use to propose. If none is provided, `self.world` is used.
"""
if world is None:
world = self.world
if self._num_samples_remaining <= 0:
raise StopIteration
proposers = self.kernel.get_proposers(
world, world.latent_nodes, self._num_adaptive_sample_remaining
)
random.shuffle(proposers)
for proposer in proposers:
try:
new_world, accept_log_prob = proposer.propose(world)
accept_log_prob = accept_log_prob.clamp(max=0.0)
accepted = torch.rand_like(accept_log_prob).log() < accept_log_prob
if accepted:
world = new_world
except RuntimeError as e:
if "singular U" in str(e) or "input is not positive-definite" in str(e):
# since it's normal to run into cholesky error during GP, instead of
# throwing an error, we simply skip current proposer (which is
# equivalent to a rejection) and will retry in the next iteration
warnings.warn(f"Proposal rejected: {e}", RuntimeWarning)
accepted = False
accept_log_prob = -torch.inf
else:
raise e
if self._num_adaptive_sample_remaining > 0:
proposer.do_adaptation(
world=world, accept_log_prob=accept_log_prob, is_accepted=accepted
)
if self._num_adaptive_sample_remaining == 1:
# we just reach the end of adaptation period
proposer.finish_adaptation()
# update attributes at last, so that exceptions during inference won't leave
# self in an invalid state
self.world = world
if self._num_adaptive_sample_remaining > 0:
self._num_adaptive_sample_remaining -= 1
self._num_samples_remaining -= 1
return self.world
def throw(
self,
typ: Type[BaseException],
val: Optional[BaseException] = None,
tb: Optional[TracebackType] = None,
) -> NoReturn:
"""Use the default error handling behavior (thorw Exception as-is)"""
# pyre-fixme[7]: Function declared non-returnable, but got `None`.
super().throw(typ, val, tb)
| beanmachine-main | src/beanmachine/ppl/inference/sampler.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""An inference engine which uses Bean Machine Graph to make
inferences on Bean Machine models."""
from typing import Dict, List, Optional, Set, Tuple
import beanmachine.ppl.compiler.performance_report as pr
import beanmachine.ppl.compiler.profiler as prof
import graphviz
import torch
from beanmachine.graph import Graph, InferConfig, InferenceType
from beanmachine.ppl.compiler.bm_graph_builder import rv_to_query
from beanmachine.ppl.compiler.fix_problems import default_skip_optimizations
from beanmachine.ppl.compiler.gen_bm_python import to_bm_python
from beanmachine.ppl.compiler.gen_bmg_cpp import to_bmg_cpp
from beanmachine.ppl.compiler.gen_bmg_graph import to_bmg_graph
from beanmachine.ppl.compiler.gen_bmg_python import to_bmg_python
from beanmachine.ppl.compiler.gen_dot import to_dot
from beanmachine.ppl.compiler.gen_mini import to_mini
from beanmachine.ppl.compiler.performance_report import PerformanceReport
from beanmachine.ppl.compiler.runtime import BMGRuntime
from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples
from beanmachine.ppl.inference.utils import _verify_queries_and_observations
from beanmachine.ppl.model.rv_identifier import RVIdentifier
# TODO[Walid]: At some point, to facilitate checking the idea that this works pretty
# much like any other BM inference, we should probably make this class a subclass of
# AbstractMCInference.
class BMGInference:
"""
Interface to Bean Machine Graph (BMG) Inference,
an experimental framework for high-performance implementations of
inference algorithms.
Internally, BMGInference consists of a compiler
and C++ runtime implementations of various inference algorithms. Currently,
only Newtonian Monte Carlo (NMC) inference is supported, and is the
algorithm used by default.
Please note that this is a highly experimental implementation under active
development, and that the subset of Bean Machine model is limited. Limitations
include that the runtime graph should be static (meaning, it does not change
during inference), and that the types of primitive distributions supported
is currently limited.
"""
_fix_observe_true: bool = False
_pd: Optional[prof.ProfilerData] = None
def __init__(self):
pass
def _begin(self, s: str) -> None:
pd = self._pd
if pd is not None:
pd.begin(s)
def _finish(self, s: str) -> None:
pd = self._pd
if pd is not None:
pd.finish(s)
def _accumulate_graph(
self,
queries: List[RVIdentifier],
observations: Dict[RVIdentifier, torch.Tensor],
) -> BMGRuntime:
_verify_queries_and_observations(queries, observations, True)
rt = BMGRuntime()
rt._pd = self._pd
bmg = rt.accumulate_graph(queries, observations)
# TODO: Figure out a better way to pass this flag around
bmg._fix_observe_true = self._fix_observe_true
return rt
def _transpose_samples(self, raw):
self._begin(prof.transpose_samples)
samples = []
num_samples = len(raw)
bmg_query_count = len(raw[0])
# Suppose we have two queries and three samples;
# the shape we get from BMG is:
#
# [
# [s00, s01],
# [s10, s11],
# [s20, s21]
# ]
#
# That is, each entry in the list has values from both queries.
# But what we need in the final dictionary is:
#
# {
# RV0: tensor([[s00, s10, s20]]),
# RV1: tensor([[s01, s11, s21]])
# }
transposed = [torch.tensor([x]) for x in zip(*raw)]
assert len(transposed) == bmg_query_count
assert len(transposed[0]) == 1
assert len(transposed[0][0]) == num_samples
# We now have
#
# [
# tensor([[s00, s10, s20]]),
# tensor([[s01, s11, s21]])
# ]
#
# which looks like what we need. But we have an additional problem:
# if the the sample is a matrix then it is in columns but we need it in rows.
#
# If an element of transposed is (1 x num_samples x rows x 1) then we
# will just reshape it to (1 x num_samples x rows).
#
# If it is (1 x num_samples x rows x columns) for columns > 1 then
# we transpose it to (1 x num_samples x columns x rows)
#
# If it is any other shape we leave it alone.
for i in range(len(transposed)):
t = transposed[i]
if len(t.shape) == 4:
if t.shape[3] == 1:
assert t.shape[0] == 1
assert t.shape[1] == num_samples
samples.append(t.reshape(1, num_samples, t.shape[2]))
else:
samples.append(t.transpose(2, 3))
else:
samples.append(t)
assert len(samples) == bmg_query_count
assert len(samples[0]) == 1
assert len(samples[0][0]) == num_samples
self._finish(prof.transpose_samples)
return samples
def _build_mcsamples(
self,
rv_to_query,
samples,
query_to_query_id,
num_samples: int,
num_chains: int,
num_adaptive_samples: int,
) -> MonteCarloSamples:
self._begin(prof.build_mcsamples)
assert len(samples) == num_chains
results = []
for chain_num in range(num_chains):
result: Dict[RVIdentifier, torch.Tensor] = {}
for (rv, query) in rv_to_query.items():
query_id = query_to_query_id[query]
result[rv] = samples[chain_num][query_id]
results.append(result)
# MonteCarloSamples almost provides just what we need here,
# but it requires the input to be of a different type in the
# cases of num_chains==1 and !=1 respectively. Furthermore,
# we had to tweak it to support the right operator for merging
# saumple values when num_chains!=1.
if num_chains == 1:
mcsamples = MonteCarloSamples(
results[0], num_adaptive_samples, stack_not_cat=True
)
else:
mcsamples = MonteCarloSamples(
results, num_adaptive_samples, stack_not_cat=False
)
self._finish(prof.build_mcsamples)
return mcsamples
def _infer(
self,
queries: List[RVIdentifier],
observations: Dict[RVIdentifier, torch.Tensor],
num_samples: int,
num_chains: int = 1,
num_adaptive_samples: int = 0,
inference_type: InferenceType = InferenceType.NMC,
produce_report: bool = True,
skip_optimizations: Set[str] = default_skip_optimizations,
) -> Tuple[MonteCarloSamples, PerformanceReport]:
if produce_report:
self._pd = prof.ProfilerData()
rt = self._accumulate_graph(queries, observations)
bmg = rt._bmg
report = pr.PerformanceReport()
self._begin(prof.infer)
generated_graph = to_bmg_graph(bmg, skip_optimizations)
g = generated_graph.graph
query_to_query_id = generated_graph.query_to_query_id
samples = []
# BMG requires that we have at least one query.
if len(query_to_query_id) != 0:
g.collect_performance_data(produce_report)
self._begin(prof.graph_infer)
default_config = InferConfig()
default_config.num_warmup = num_adaptive_samples
num_adaptive_samples = 0
# TODO[Walid]: In the following we were previously silently using the default seed
# specified in pybindings.cpp (and not passing the local one in). In the current
# code we are explicitly passing in the same default value used in that file (5123401).
# We really need a way to defer to the value defined in pybindings.py here.
try:
raw = g.infer(
num_samples, inference_type, 5123401, num_chains, default_config
)
except RuntimeError as e:
raise RuntimeError(
"Error during BMG inference\n"
+ "Note: the runtime error from BMG may not be interpretable.\n"
) from e
self._finish(prof.graph_infer)
if produce_report:
self._begin(prof.deserialize_perf_report)
js = g.performance_report()
report = pr.json_to_perf_report(js)
self._finish(prof.deserialize_perf_report)
assert len(raw) == num_chains
assert all([len(r) == num_samples for r in raw])
samples = [self._transpose_samples(r) for r in raw]
# TODO: Make _rv_to_query public. Add it to BMGraphBuilder?
mcsamples = self._build_mcsamples(
rv_to_query(generated_graph.bmg),
samples,
query_to_query_id,
num_samples,
num_chains,
num_adaptive_samples,
)
self._finish(prof.infer)
if produce_report:
report.profiler_report = self._pd.to_report() # pyre-ignore
return mcsamples, report
def infer(
self,
queries: List[RVIdentifier],
observations: Dict[RVIdentifier, torch.Tensor],
num_samples: int,
num_chains: int = 4,
num_adaptive_samples: int = 0,
inference_type: InferenceType = InferenceType.NMC,
skip_optimizations: Set[str] = default_skip_optimizations,
) -> MonteCarloSamples:
"""
Perform inference by (runtime) compilation of Python source code associated
with its parameters, constructing a BMG graph, and then calling the
BMG implementation of a particular inference method on this graph.
Args:
queries: queried random variables
observations: observations dict
num_samples: number of samples in each chain
num_chains: number of chains generated
num_adaptive_samples: number of burn in samples to discard
inference_type: inference method, currently only NMC is supported
skip_optimizations: list of optimization to disable in this call
Returns:
MonteCarloSamples: The requested samples
"""
# TODO: Add verbose level
# TODO: Add logging
samples, _ = self._infer(
queries,
observations,
num_samples,
num_chains,
num_adaptive_samples,
inference_type,
False,
skip_optimizations,
)
return samples
def to_dot(
self,
queries: List[RVIdentifier],
observations: Dict[RVIdentifier, torch.Tensor],
after_transform: bool = True,
label_edges: bool = False,
skip_optimizations: Set[str] = default_skip_optimizations,
) -> str:
"""Produce a string containing a program in the GraphViz DOT language
representing the graph deduced from the model."""
node_types = False
node_sizes = False
edge_requirements = False
bmg = self._accumulate_graph(queries, observations)._bmg
return to_dot(
bmg,
node_types,
node_sizes,
edge_requirements,
after_transform,
label_edges,
skip_optimizations,
)
def _to_mini(
self,
queries: List[RVIdentifier],
observations: Dict[RVIdentifier, torch.Tensor],
indent=None,
) -> str:
"""Internal test method for Neal's MiniBMG prototype."""
bmg = self._accumulate_graph(queries, observations)._bmg
return to_mini(bmg, indent)
def to_graphviz(
self,
queries: List[RVIdentifier],
observations: Dict[RVIdentifier, torch.Tensor],
after_transform: bool = True,
label_edges: bool = False,
skip_optimizations: Set[str] = default_skip_optimizations,
) -> graphviz.Source:
"""Small wrapper to generate an actual graphviz object"""
s = self.to_dot(
queries, observations, after_transform, label_edges, skip_optimizations
)
return graphviz.Source(s)
def to_cpp(
self,
queries: List[RVIdentifier],
observations: Dict[RVIdentifier, torch.Tensor],
) -> str:
"""Produce a string containing a C++ program fragment which
produces the graph deduced from the model."""
bmg = self._accumulate_graph(queries, observations)._bmg
return to_bmg_cpp(bmg).code
def to_python(
self,
queries: List[RVIdentifier],
observations: Dict[RVIdentifier, torch.Tensor],
) -> str:
"""Produce a string containing a Python program fragment which
produces the graph deduced from the model."""
bmg = self._accumulate_graph(queries, observations)._bmg
return to_bmg_python(bmg).code
def to_bm_python(
self,
queries: List[RVIdentifier],
observations: Dict[RVIdentifier, torch.Tensor],
) -> str:
"""Produce a string containing a BM Python program from the graph."""
bmg = self._accumulate_graph(queries, observations)._bmg
return to_bm_python(bmg)
def to_graph(
self,
queries: List[RVIdentifier],
observations: Dict[RVIdentifier, torch.Tensor],
) -> Tuple[Graph, Dict[RVIdentifier, int]]:
"""Produce a BMG graph and a map from queried RVIdentifiers to the corresponding
indices of the inference results."""
rt = self._accumulate_graph(queries, observations)
bmg = rt._bmg
generated_graph = to_bmg_graph(bmg)
g = generated_graph.graph
query_to_query_id = generated_graph.query_to_query_id
rv_to_query_map = rv_to_query(generated_graph.bmg)
rv_to_query_id = {rv: query_to_query_id[rv_to_query_map[rv]] for rv in queries}
return g, rv_to_query_id
| beanmachine-main | src/beanmachine/ppl/inference/bmg_inference.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Set
from beanmachine.ppl.inference.base_inference import BaseInference
from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer
from beanmachine.ppl.inference.proposer.single_site_random_walk_proposer import (
SingleSiteRandomWalkProposer,
)
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world import World
class SingleSiteRandomWalk(BaseInference):
"""
Single Site random walk Metropolis-Hastings. This single site algorithm uses a Normal distribution
proposer.
Args:
step_size: Step size, defaults to 1.0
"""
def __init__(self, step_size: float = 1.0):
self.step_size = step_size
self._proposers = {}
def get_proposers(
self,
world: World,
target_rvs: Set[RVIdentifier],
num_adaptive_sample: int,
) -> List[BaseProposer]:
proposers = []
for node in target_rvs:
if node not in self._proposers:
self._proposers[node] = SingleSiteRandomWalkProposer(
node, self.step_size
)
proposers.append(self._proposers[node])
return proposers
| beanmachine-main | src/beanmachine/ppl/inference/single_site_random_walk.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABCMeta, abstractmethod
from typing import Iterable
import torch
from beanmachine import ppl as bm
from beanmachine.ppl.distributions.delta import Delta
from beanmachine.ppl.inference.vi.variational_infer import VariationalInfer
from beanmachine.ppl.inference.vi.variational_world import VariationalWorld
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world import RVDict
from torch import distributions as dist
from torch.distributions.constraint_registry import biject_to, transform_to
from torch.nn.functional import softplus
class AutoGuideVI(VariationalInfer, metaclass=ABCMeta):
"""VI with guide distributions automatically generated."""
def __init__(
self,
queries: Iterable[RVIdentifier],
observations: RVDict,
**kwargs,
):
queries_to_guides = {}
# runs all queries to discover their dimensions
world = VariationalWorld(
observations=observations,
params={},
queries_to_guides=queries_to_guides,
)
# automatically instantiate `queries_to_guides`
for query in queries:
world.call(query)
if query.is_random_variable:
distrib = world.get_variable(query).distribution
queries_to_guides[query] = self.get_guide(query, distrib)
super().__init__(
queries_to_guides=queries_to_guides,
observations=observations,
**kwargs,
)
@staticmethod
@abstractmethod
def get_guide(query, distrib) -> RVIdentifier:
pass
class ADVI(AutoGuideVI):
"""Automatic Differentiation Variational Inference (ADVI).
ADVI automates construction of guides by initializing variational distributions
as Gaussians and possibly bijecting them so the supports match.
See https://arxiv.org/abs/1506.03431.
"""
@staticmethod
def get_guide(query, distrib):
@bm.param
def param_loc():
return (
torch.rand_like(biject_to(distrib.support).inv(distrib.sample())) * 4.0
- 2.0
)
@bm.param
def param_scale():
return (
0.01
+ torch.rand_like(biject_to(distrib.support).inv(distrib.sample()))
* 4.0
- 2.0
)
def f():
loc = param_loc()
scale = softplus(param_scale())
q = dist.Normal(loc, scale)
if distrib.support != dist.constraints.real:
if distrib.support == dist.constraints.positive:
# override exp transform with softplus
q = dist.TransformedDistribution(
q, [dist.transforms.SoftplusTransform()]
)
else:
q = dist.TransformedDistribution(q, [biject_to(distrib.support)])
return q
f.__name__ = "guide_" + str(query)
return bm.random_variable(f)()
class MAP(AutoGuideVI):
"""Maximum A Posteriori (MAP) Inference.
Uses ``Delta`` distributions to perform a point estimate
of the posterior mode.
"""
@staticmethod
def get_guide(query, distrib):
@bm.param
def param_loc():
# TODO: use event shape
return (
torch.rand_like(transform_to(distrib.support).inv(distrib.sample()))
* 4.0
- 2.0
)
def f():
loc = param_loc()
if distrib.support != dist.constraints.real:
if distrib.support == dist.constraints.positive:
loc = dist.transforms.SoftplusTransform()(loc)
else:
loc = transform_to(distrib.support)(loc)
q = Delta(loc)
return q
f.__name__ = "guide_" + str(query)
return bm.random_variable(f)()
| beanmachine-main | src/beanmachine/ppl/inference/vi/autoguide.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .autoguide import ADVI, MAP
from .variational_infer import VariationalInfer
__all__ = [
"ADVI",
"MAP",
"VariationalInfer",
]
| beanmachine-main | src/beanmachine/ppl/inference/vi/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Mapping, MutableMapping, Optional
import torch
import torch.distributions as dist
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world import init_from_prior, World
from beanmachine.ppl.world.initialize_fn import InitializeFn
from beanmachine.ppl.world.world import RVDict
class VariationalWorld(World):
"""A World which also contains (variational) parameters."""
def __init__(
self,
observations: Optional[RVDict] = None,
initialize_fn: InitializeFn = init_from_prior,
params: Optional[MutableMapping[RVIdentifier, torch.Tensor]] = None,
queries_to_guides: Optional[Mapping[RVIdentifier, RVIdentifier]] = None,
) -> None:
self._params = params or {}
self._queries_to_guides = queries_to_guides or {}
super().__init__(observations, initialize_fn)
def copy(self):
world_copy = VariationalWorld(
observations=self.observations.copy(),
initialize_fn=self._initialize_fn,
params=self._params.copy(),
queries_to_guides=self._queries_to_guides.copy(),
)
world_copy._variables = self._variables.copy()
return world_copy
# TODO: distinguish params vs random_variables at the type-level
def get_param(self, param: RVIdentifier) -> torch.Tensor:
"""Gets a parameter or initializes it if not found."""
if param not in self._params:
init_value = param.function(*param.arguments)
assert isinstance(init_value, torch.Tensor)
self._params[param] = init_value
self._params[param].requires_grad = True
return self._params[param]
def set_params(self, params: MutableMapping[RVIdentifier, torch.Tensor]):
"""Sets the parameters in this World to specified values."""
self._params = params
def get_guide_distribution(self, rv: RVIdentifier) -> dist.Distribution:
guide_rv = self._queries_to_guides[rv]
return self.get_variable(guide_rv).distribution
def update_graph(self, node: RVIdentifier) -> torch.Tensor:
"""
Initialize a new node using its guide if available and
the prior otherwise.
Args:
node (RVIdentifier): RVIdentifier of node to update in the graph.
Returns:
The value of the node stored in world (in original space).
"""
if node in self._queries_to_guides:
node = self._queries_to_guides[node]
return super().update_graph(node)
| beanmachine-main | src/beanmachine/ppl/inference/vi/variational_world.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"Gradient estimators of f-divergences."
from typing import Callable, Mapping
import torch
from beanmachine.ppl.inference.vi.variational_world import VariationalWorld
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world import RVDict, World
_CPU_DEVICE = torch.device("cpu")
DiscrepancyFn = Callable[[torch.Tensor], torch.Tensor]
# NOTE: right now it is either all reparameterizable
# or all score function gradient estimators. We should
# be able to support both depending on the guide used.
def monte_carlo_approximate_reparam(
observations: RVDict,
num_samples: int,
discrepancy_fn: DiscrepancyFn,
params: Mapping[RVIdentifier, torch.Tensor],
queries_to_guides: Mapping[RVIdentifier, RVIdentifier],
subsample_factor: float = 1.0,
device: torch.device = _CPU_DEVICE,
) -> torch.Tensor:
"""The pathwise derivative / reparameterization trick
(https://arxiv.org/abs/1312.6114) gradient estimator."""
loss = torch.zeros(1).to(device)
for _ in range(num_samples):
variational_world = VariationalWorld.initialize_world(
queries=queries_to_guides.values(),
observations=observations,
initialize_fn=lambda d: d.rsample(),
params=params,
queries_to_guides=queries_to_guides,
)
world = World.initialize_world(
queries=[],
observations={
**{
query: variational_world[guide]
for query, guide in queries_to_guides.items()
},
**observations,
},
)
# form log density ratio logu = logp - logq
# We want to avoid using world.latent_nodes/world.observations.
# The preceding World.initialize_world puts everything into observations, which results in latent_nodes being empty.
# That results in everything being scaled by the scaling factor (we don't want that)
logu = (
world.log_prob(queries_to_guides.keys())
+ (1.0 / subsample_factor) * world.log_prob(observations.keys())
- variational_world.log_prob(queries_to_guides.values())
)
loss += discrepancy_fn(logu) # reparameterized estimator
return loss / num_samples
def monte_carlo_approximate_sf(
observations: RVDict,
num_samples: int,
discrepancy_fn: DiscrepancyFn,
params: Mapping[RVIdentifier, torch.Tensor],
queries_to_guides: Mapping[RVIdentifier, RVIdentifier],
subsample_factor: float = 1,
device: torch.device = _CPU_DEVICE,
) -> torch.Tensor:
"""The score function / log derivative trick surrogate loss
(https://arxiv.org/pdf/1506.05254) gradient estimator."""
loss = torch.zeros(1).to(device)
for _ in range(num_samples):
variational_world = VariationalWorld.initialize_world(
queries=queries_to_guides.values(),
observations=observations,
initialize_fn=lambda d: d.sample(),
params=params,
queries_to_guides=queries_to_guides,
)
world = World.initialize_world(
queries=[],
observations={
**{
query: variational_world[guide]
for query, guide in queries_to_guides.items()
},
**observations,
},
)
# form log density ratio logu = logp - logq
# We want to avoid using world.latent_nodes/world.observations.
# The preceding World.initialize_world puts everything into observations, which results in latent_nodes being empty.
# That results in everything being scaled by the scaling factor (we don't want that)
logq = variational_world.log_prob(queries_to_guides.values())
logu = (
world.log_prob(queries_to_guides.keys())
+ (1.0 / subsample_factor) * world.log_prob(observations.keys())
- logq
)
# score function estimator surrogate loss
loss += discrepancy_fn(logu).detach().clone() * logq + discrepancy_fn(logu)
return loss / num_samples
| beanmachine-main | src/beanmachine/ppl/inference/vi/gradient_estimator.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import logging
from typing import Callable, Dict, Optional
import torch
import torch.optim as optim
from beanmachine.ppl.inference.vi.discrepancy import kl_reverse
from beanmachine.ppl.inference.vi.gradient_estimator import (
monte_carlo_approximate_reparam,
)
from beanmachine.ppl.inference.vi.variational_world import VariationalWorld
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world.world import RVDict
from tqdm.auto import tqdm
_CPU_DEVICE = torch.device("cpu")
class VariationalInfer:
def __init__(
self,
queries_to_guides: Dict[RVIdentifier, RVIdentifier],
observations: RVDict,
optimizer: Callable[
[torch.Tensor], optim.Optimizer
] = lambda params: optim.Adam(params, lr=1e-2),
device: torch.device = _CPU_DEVICE,
):
"""
Performs variational inference using reparameterizable guides.
Args:
queries_to_guides: Pairing between random variables and their variational guide/surrogate
observations: Observations as an RVDict keyed by RVIdentifier
optimizer: A function returning a ``torch.Optimizer`` to use for optimizing variational parameters.
device: a ``torch.device`` to use for pytorch tensors
"""
super().__init__()
self.observations = observations
self.queries_to_guides = queries_to_guides
# runs all guides to reify `param`s for `optimizer`
# NOTE: assumes `params` is static and same across all worlds, consider MultiOptimizer (see Pyro)
# TODO: what happens if not all the params are encountered
# in this execution pass, eg an if/else, consider MultiOptimizer
world = VariationalWorld(
observations=observations,
params={},
queries_to_guides=queries_to_guides,
)
for guide in queries_to_guides.values():
world.call(guide)
self.params = world._params
self._optimizer = optimizer(self.params.values())
self._device = device
def infer(
self,
num_steps: int,
num_samples: int = 1,
discrepancy_fn=kl_reverse,
mc_approx=monte_carlo_approximate_reparam, # TODO: support both reparam and SF in same guide
step_callback: Optional[
Callable[[int, torch.Tensor, VariationalInfer], None]
] = None,
subsample_factor: float = 1,
) -> VariationalWorld:
"""
Perform variatonal inference.
Args:
num_steps: number of optimizer steps
num_samples: number of samples per Monte-Carlo gradient estimate of E[f(logp - logq)]
discrepancy_fn: discrepancy function f, use ``kl_reverse`` to minimize negative ELBO
mc_approx: Monte-Carlo gradient estimator to use
step_callback: callback function invoked each optimizer step
subsample_factor: subsampling factor used for subsampling, helps scale the observations to avoid overshrinking towards the prior
Returns:
VariationalWorld: A world with variational guide distributions
initialized with optimized parameters
"""
assert subsample_factor > 0 and subsample_factor <= 1
for it in tqdm(range(num_steps)):
loss = self.step(num_samples, discrepancy_fn, mc_approx, subsample_factor)
if step_callback:
step_callback(it, loss, self)
return self.initialize_world()
def step(
self,
num_samples: int = 1,
discrepancy_fn=kl_reverse,
mc_approx=monte_carlo_approximate_reparam, # TODO: support both reparam and SF in same guide
subsample_factor: float = 1,
) -> torch.Tensor:
"""
Perform one step of variatonal inference.
Args:
num_samples: number of samples per Monte-Carlo gradient estimate of E[f(logp - logq)]
discrepancy_fn: discrepancy function f, use ``kl_reverse`` to minimize negative ELBO
mc_approx: Monte-Carlo gradient estimator to use
subsample_factor: subsampling factor used for subsampling, helps scale the observations to avoid overshrinking towards the prior
Returns:
torch.Tensor: the loss value (before the step)
"""
self._optimizer.zero_grad()
loss = mc_approx(
self.observations,
num_samples,
discrepancy_fn,
self.params,
self.queries_to_guides,
subsample_factor=subsample_factor,
device=self._device,
)
if not torch.isnan(loss) and not torch.isinf(loss):
loss.backward()
self._optimizer.step()
else:
logging.warn("Encountered NaN/inf loss, skipping step.")
return loss
def initialize_world(self) -> VariationalWorld:
"""
Initializes a `VariationalWorld` using samples from guide distributions
evaluated at the current parameter values.
Returns:
VariationalWorld: a `World` where guide samples and distributions
have replaced their corresponding queries
"""
return VariationalWorld.initialize_world(
queries=self.queries_to_guides.values(),
observations=self.observations,
params=self.params,
queries_to_guides=self.queries_to_guides,
initialize_fn=lambda d: d.sample(),
)
| beanmachine-main | src/beanmachine/ppl/inference/vi/variational_infer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"Csiszar f-functions in log-space."
import torch
def kl_reverse(logu: torch.Tensor) -> torch.Tensor:
"""
Log-space Csiszar function for reverse KL-divergence D_f(p,q) = KL(q||p).
Also known as the exclusive KL-divergence and negative ELBO, minimizing
results in zero-forcing / mode-seeking behavior.
Args:
logu (torch.Tensor): ``p.log_prob``s evaluated at samples from q.
"""
return -logu
def kl_forward(logu: torch.Tensor) -> torch.Tensor:
"""
Log-space Csiszar function for forward KL-divergence D_f(p,q) = KL(p||q).
Also known as the inclusive KL-divergence, minimizing results in
zero-avoiding / mass-covering behavior.
Args:
logu (torch.Tensor): ``p.log_prob``s evaluated at samples from q.
"""
return torch.exp(logu) * logu
| beanmachine-main | src/beanmachine/ppl/inference/vi/discrepancy.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import NamedTuple, Set, Tuple
import torch
from beanmachine.ppl.experimental.torch_jit_backend import jit_compile, TorchJITBackend
from beanmachine.ppl.inference.proposer.hmc_proposer import HMCProposer
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world import World
class _TreeNode(NamedTuple):
positions: torch.Tensor
momentums: torch.Tensor
pe_grad: torch.Tensor
class _Tree(NamedTuple):
left: _TreeNode
right: _TreeNode
proposal: torch.Tensor
pe: torch.Tensor
pe_grad: torch.Tensor
log_weight: torch.Tensor
sum_momentums: torch.Tensor
sum_accept_prob: torch.Tensor
num_proposals: torch.Tensor
turned_or_diverged: torch.Tensor
class _TreeArgs(NamedTuple):
log_slice: torch.Tensor
direction: torch.Tensor
step_size: torch.Tensor
initial_energy: torch.Tensor
mass_inv: torch.Tensor
class NUTSProposer(HMCProposer):
"""
The No-U-Turn Sampler (NUTS) as described in [1]. Unlike vanilla HMC, it does not
require users to specify a trajectory length. The current implementation roughly
follows Algorithm 6 of [1]. If multinomial_sampling is True, then the next state
will be drawn from a multinomial distribution (weighted by acceptance probability,
as introduced in Appendix 2 of [2]) instead of drawn uniformly.
Reference:
[1] Matthew Hoffman and Andrew Gelman. "The No-U-Turn Sampler: Adaptively
Setting Path Lengths in Hamiltonian Monte Carlo" (2014).
https://arxiv.org/abs/1111.4246
[2] Michael Betancourt. "A Conceptual Introduction to Hamiltonian Monte Carlo"
(2017). https://arxiv.org/abs/1701.02434
Args:
initial_world: Initial world to propose from.
target_rvs: Set of RVIdentifiers to indicate which variables to propose.
num_adaptive_samples: Number of adaptive samples to run.
max_tree_depth: Maximum tree depth, defaults to 10.
max_delta_energy: Maximum delta energy (for numerical stability), defaults to 1000.
initial_step_size: Defaults to 1.0.
adapt_step_size: Whether to adapt step size with Dual averaging as suggested in [1], defaults to True.
adapt_mass_matrix: Whether to adapt mass matrix using Welford Scheme, defaults to True.
multinomial_sampling: Whether to use multinomial sampling as in [2], defaults to True.
target_accept_prob: Target accept probability. Increasing this would lead to smaller step size. Defaults to 0.8.
nnc_compile: If True, NNC compiler will be used to accelerate the
inference.
"""
def __init__(
self,
initial_world: World,
target_rvs: Set[RVIdentifier],
num_adaptive_sample: int,
max_tree_depth: int = 10,
max_delta_energy: float = 1000.0,
initial_step_size: float = 1.0,
adapt_step_size: bool = True,
adapt_mass_matrix: bool = True,
full_mass_matrix: bool = False,
multinomial_sampling: bool = True,
target_accept_prob: float = 0.8,
jit_backend: TorchJITBackend = TorchJITBackend.NNC,
):
# note that trajectory_length is not used in NUTS
super().__init__(
initial_world,
target_rvs,
num_adaptive_sample,
trajectory_length=0.0,
initial_step_size=initial_step_size,
adapt_step_size=adapt_step_size,
adapt_mass_matrix=adapt_mass_matrix,
full_mass_matrix=full_mass_matrix,
target_accept_prob=target_accept_prob,
jit_backend=TorchJITBackend.NONE, # we will use NNC at NUTS level, not at HMC level
)
self._max_tree_depth = max_tree_depth
self._max_delta_energy = max_delta_energy
self._multinomial_sampling = multinomial_sampling
# pyre-ignore[8]
self._build_tree_base_case = jit_compile(
self._build_tree_base_case, jit_backend
)
def _is_u_turning(
self,
mass_inv: torch.Tensor,
left_momentums: torch.Tensor,
right_momentums: torch.Tensor,
sum_momentums: torch.Tensor,
) -> torch.Tensor:
"""The generalized U-turn condition, as described in [2] Appendix 4.2"""
rho = self._scale_r(sum_momentums, mass_inv)
return (torch.dot(left_momentums, rho) <= 0) or (
torch.dot(right_momentums, rho) <= 0
)
def _build_tree_base_case(self, root: _TreeNode, args: _TreeArgs) -> _Tree:
"""Base case of the recursive tree building algorithm: take a single leapfrog
step in the specified direction and return a subtree."""
positions, momentums, pe, pe_grad = self._leapfrog_step(
root.positions,
root.momentums,
args.step_size * args.direction,
args.mass_inv,
root.pe_grad,
)
new_energy = torch.nan_to_num(
self._hamiltonian(positions, momentums, args.mass_inv, pe),
float("inf"),
)
# initial_energy == -L(\theta^{m-1}) + 1/2 r_0^2 in Algorithm 6 of [1]
delta_energy = new_energy - args.initial_energy
if self._multinomial_sampling:
log_weight = -delta_energy
else:
# slice sampling as introduced in the original NUTS paper [1]
log_weight = (args.log_slice <= -new_energy).log()
tree_node = _TreeNode(positions=positions, momentums=momentums, pe_grad=pe_grad)
return _Tree(
left=tree_node,
right=tree_node,
proposal=positions,
pe=pe,
pe_grad=pe_grad,
log_weight=log_weight,
sum_momentums=momentums,
sum_accept_prob=torch.clamp(torch.exp(-delta_energy), max=1.0),
num_proposals=torch.tensor(1),
turned_or_diverged=args.log_slice >= self._max_delta_energy - new_energy,
)
def _build_tree(self, root: _TreeNode, tree_depth: int, args: _TreeArgs) -> _Tree:
"""Build the binary tree by recursively build the left and right subtrees and
combine the two."""
if tree_depth == 0:
return self._build_tree_base_case(root, args)
# build the first half of the tree
sub_tree = self._build_tree(root, tree_depth - 1, args)
if sub_tree.turned_or_diverged:
return sub_tree
# build the other half of the tree
other_sub_tree = self._build_tree(
root=sub_tree.left if args.direction == -1 else sub_tree.right,
tree_depth=tree_depth - 1,
args=args,
)
return self._combine_tree(
sub_tree, other_sub_tree, args.direction, args.mass_inv, biased=False
)
def _combine_tree(
self,
old_tree: _Tree,
new_tree: _Tree,
direction: torch.Tensor,
mass_inv: torch.Tensor,
biased: bool,
) -> _Tree:
"""Combine the old tree and the new tree into a single (large) tree. The new
tree will be add to the left of the old tree if direction is -1, otherwise it
will be add to the right. If biased is True, then we will prefer choosing from
new tree (which is away from the starting location) than old tree when sampling
the next state from the trajectory. This function assumes old_tree is not
turned or diverged."""
# if old tree hsa turned or diverged, then we shouldn't build the new tree in
# the first place
assert not old_tree.turned_or_diverged
# log of the sum of the weights from both trees
log_weight = torch.logaddexp(old_tree.log_weight, new_tree.log_weight)
if new_tree.turned_or_diverged:
selected_subtree = old_tree
else:
# progressively sample from the trajectory
if biased:
# biased progressive sampling (Appendix 3.2 of [2])
log_tree_prob = new_tree.log_weight - old_tree.log_weight
else:
# uniform progressive sampling (Appendix 3.1 of [2])
log_tree_prob = new_tree.log_weight - log_weight
if torch.rand_like(log_tree_prob).log() < log_tree_prob:
selected_subtree = new_tree
else:
selected_subtree = old_tree
if direction == -1:
left_tree, right_tree = new_tree, old_tree
else:
left_tree, right_tree = old_tree, new_tree
sum_momentums = left_tree.sum_momentums + right_tree.sum_momentums
turned_or_diverged = new_tree.turned_or_diverged or self._is_u_turning(
mass_inv,
left_tree.left.momentums,
right_tree.right.momentums,
sum_momentums,
)
# More robust U-turn condition
# https://discourse.mc-stan.org/t/nuts-misses-u-turns-runs-in-circles-until-max-treedepth/9727
if not turned_or_diverged and right_tree.num_proposals > 1:
extended_sum_momentums = left_tree.sum_momentums + right_tree.left.momentums
turned_or_diverged = self._is_u_turning(
mass_inv,
left_tree.left.momentums,
right_tree.left.momentums,
extended_sum_momentums,
)
if not turned_or_diverged and left_tree.num_proposals > 1:
extended_sum_momentums = (
right_tree.sum_momentums + left_tree.right.momentums
)
turned_or_diverged = self._is_u_turning(
mass_inv,
left_tree.right.momentums,
right_tree.right.momentums,
extended_sum_momentums,
)
return _Tree(
left=left_tree.left,
right=right_tree.right,
proposal=selected_subtree.proposal,
pe=selected_subtree.pe,
pe_grad=selected_subtree.pe_grad,
log_weight=log_weight,
sum_momentums=sum_momentums,
sum_accept_prob=old_tree.sum_accept_prob + new_tree.sum_accept_prob,
num_proposals=old_tree.num_proposals + new_tree.num_proposals,
turned_or_diverged=turned_or_diverged,
)
def propose(self, world: World) -> Tuple[World, torch.Tensor]:
if world is not self.world:
# re-compute cached values since world was modified by other sources
self.world = world
self._positions = self._dict2vec.to_vec(
self._to_unconstrained({node: world[node] for node in self._target_rvs})
)
self._pe, self._pe_grad = self._potential_grads(self._positions)
momentums = self._initialize_momentums(self._positions)
current_energy = self._hamiltonian(
self._positions, momentums, self._mass_inv, self._pe
)
if self._multinomial_sampling:
# log slice is only used to check the divergence
log_slice = -current_energy
else:
# this is a more stable way to sample from log(Uniform(0, exp(-current_energy)))
log_slice = torch.log1p(-torch.rand_like(current_energy)) - current_energy
tree_node = _TreeNode(self._positions, momentums, self._pe_grad)
tree = _Tree(
left=tree_node,
right=tree_node,
proposal=self._positions,
pe=self._pe,
pe_grad=self._pe_grad,
# log accept prob of staying at current state
log_weight=torch.zeros_like(log_slice),
sum_momentums=momentums,
sum_accept_prob=torch.zeros_like(log_slice),
num_proposals=torch.tensor(0),
turned_or_diverged=torch.tensor(False),
)
for j in range(self._max_tree_depth):
direction = torch.tensor(1 if torch.rand(()) > 0.5 else -1)
tree_args = _TreeArgs(
log_slice,
direction,
self.step_size,
current_energy,
self._mass_inv,
)
if direction == -1:
new_tree = self._build_tree(tree.left, j, tree_args)
else:
new_tree = self._build_tree(tree.right, j, tree_args)
tree = self._combine_tree(
tree, new_tree, direction, self._mass_inv, biased=True
)
if tree.turned_or_diverged:
break
if tree.proposal is not self._positions:
positions_dict = self._dict2vec.to_dict(tree.proposal)
self.world = self.world.replace(self._to_unconstrained.inv(positions_dict))
self._positions, self._pe, self._pe_grad = (
tree.proposal,
tree.pe,
tree.pe_grad,
)
self._alpha = tree.sum_accept_prob / tree.num_proposals
return self.world, torch.zeros_like(self._alpha)
| beanmachine-main | src/beanmachine/ppl/inference/proposer/nuts_proposer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional, Tuple, Union
import torch
import torch.distributions as dist
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.utils import tensorops
from beanmachine.ppl.world import World
from torch import Tensor, tensor
from torch.autograd import grad
def is_scalar(val: Union[float, Tensor]) -> bool:
"""
:returns: whether val is a scalar
"""
return isinstance(val, float) or (isinstance(val, Tensor) and not val.shape)
def is_valid(vec: Tensor) -> bool:
"""
:returns: whether a tensor is valid or not (not nan and not inf)
"""
return not (torch.isnan(vec).any() or torch.isinf(vec).any())
def zero_grad(node_val: Tensor) -> None:
"""
Zeros the gradient.
"""
if node_val.is_leaf and hasattr(node_val, "grad") and node_val.grad is not None:
node_val.grad.zero_()
def compute_first_gradient(
score: Tensor,
node_val: Tensor,
create_graph: bool = False,
retain_graph: Optional[bool] = None,
) -> Tuple[bool, Tensor]:
"""
Computes the first gradient.
:param score: the score to compute the gradient of
:param node_val: the value to compute the gradient against
:returns: the first gradient
"""
if not node_val.requires_grad:
raise ValueError("requires_grad_ needs to be set for node values")
# pyre expects attributes to be defined in constructors or at class
# top levels and doesn't support attributes that get dynamically added.
elif node_val.is_leaf and node_val.grad is not None:
node_val.grad.zero_()
first_gradient = grad(
score, node_val, create_graph=create_graph, retain_graph=retain_graph
)[0]
return is_valid(first_gradient), first_gradient
def compute_hessian(first_gradient: Tensor, node_val: Tensor) -> Tuple[bool, Tensor]:
"""
Computes the hessian
:param first_gradient: the first gradient of score with respect to
node_val
:param node_val: the value to compute the hessian against
:returns: computes hessian
"""
hessian = None
size = first_gradient.shape[0]
for i in range(size):
second_gradient = (
grad(
first_gradient.index_select(0, tensor([i])),
node_val,
create_graph=True,
)[0]
).reshape(-1)
hessian = (
torch.cat((hessian, (second_gradient).unsqueeze(0)), 0)
if hessian is not None
else (second_gradient).unsqueeze(0)
)
if hessian is None:
raise ValueError("Something went wrong with gradient computation")
if not is_valid(hessian):
return False, tensor(0.0)
return True, hessian
def soft_abs_inverse(neg_hessian: Tensor, alpha: float = 1e6) -> Tuple[Tensor, Tensor]:
"""
Compute inverse of a symmetric matrix and returns inverse, eigen values
and eigen vectors.
:param neg_hessian: the value that we'd like to compute the inverse of
:param alpha: the hardness parameter alpha for the SoftAbs map, see
(https://arxiv.org/pdf/1212.4693.pdf)
:returns: eigen value and eigen vector of the negative hessian inverse
"""
eig_vals, eig_vecs = torch.linalg.eigh(neg_hessian)
# pyre-fixme[6]: For 1st param expected `Tensor` but got `float`.
inverse_eig_vals = torch.tanh(alpha * eig_vals) / eig_vals
return eig_vecs, inverse_eig_vals
def compute_eigvals_eigvecs(
score: Tensor, node_val: Tensor
) -> Tuple[bool, Tensor, Tensor, Tensor]:
"""
Compute hessian and returns eigen values and eigen vectors of the negative
hessian inverse.
:param score: the score function
:param node_val: the value to compute the hessian against
:returns: first gradient, eigen values and eigen vectors of the negative
hessian inverse
"""
first_gradient, hessian = tensorops.gradients(score, node_val)
is_valid_first_grad_and_hessian = is_valid(first_gradient) or is_valid(hessian)
if not is_valid_first_grad_and_hessian:
return False, tensor(0.0), tensor(0.0), tensor(0.0)
neg_hessian = -1 * hessian.detach()
eig_vecs, eig_vals = soft_abs_inverse(neg_hessian)
return True, first_gradient, eig_vecs, eig_vals
def hessian_of_log_prob(
world: World,
node: RVIdentifier,
transformed_node_val: torch.Tensor,
hessian_fn: Callable,
transform: dist.Transform = dist.identity_transform,
) -> Tuple[torch.Tensor, torch.Tensor]:
y = transformed_node_val.detach().clone()
y.requires_grad = True
x = transform.inv(y)
world_with_grad = world.replace({node: x})
children = world_with_grad.get_variable(node).children
score = (
world_with_grad.log_prob(children | {node})
- transform.log_abs_det_jacobian(x, y).sum()
)
return hessian_fn(score, y)
| beanmachine-main | src/beanmachine/ppl/inference/proposer/newtonian_monte_carlo_utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.base_single_site_mh_proposer import (
BaseSingleSiteMHProposer,
)
from beanmachine.ppl.world import World
class SingleSiteAncestralProposer(BaseSingleSiteMHProposer):
def get_proposal_distribution(self, world: World) -> dist.Distribution:
"""Propose a new value for self.node using the prior distribution."""
return world.get_variable(self.node).distribution
| beanmachine-main | src/beanmachine/ppl/inference/proposer/single_site_ancestral_proposer.py |
beanmachine-main | src/beanmachine/ppl/inference/proposer/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from abc import abstractmethod
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer
from beanmachine.ppl.inference.utils import safe_log_prob_sum
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world import World
class BaseSingleSiteMHProposer(BaseProposer):
def __init__(self, target_rv: RVIdentifier):
self.node = target_rv
def propose(self, world: World):
"""
Propose a new value for self.node with `Metropolis-Hasting algorithm
<https://en.wikipedia.org/wiki/Metropolis%E2%80%93Hastings_algorithm#Formal_derivation>`_
Classes that inherit this proposer should override `get_proposal_distribution`
to define the algorithm-specific way to sample the next state.
Args:
world: World to calculate proposal for.
"""
proposal_dist = forward_dist = self.get_proposal_distribution(world)
old_value = world[self.node]
proposed_value = proposal_dist.sample()
new_world = world.replace({self.node: proposed_value})
backward_dist = self.get_proposal_distribution(new_world)
# calculate MH acceptance probability
# log P(x, y)
old_log_prob = world.log_prob()
# log P(x', y)
new_log_prob = new_world.log_prob()
# log g(x'|x)
forward_log_prob = forward_dist.log_prob(proposed_value).sum()
# log g(x|x')
# because proposed_value is sampled from forward_dist, it is guaranteed to be
# within the valid range. However, there's no guarantee that the old value
# is in the support of backward_dist
backward_log_prob = safe_log_prob_sum(backward_dist, old_value)
# log [(P(x', y) * g(x|x')) / (P(x, y) * g(x'|x))]
accept_log_prob = (
new_log_prob + backward_log_prob - old_log_prob - forward_log_prob
)
# model size adjustment log (n/n')
accept_log_prob += math.log(len(world)) - math.log(len(new_world))
if torch.isnan(accept_log_prob):
accept_log_prob = torch.tensor(
float("-inf"),
device=accept_log_prob.device,
dtype=accept_log_prob.dtype,
)
return new_world, accept_log_prob
@abstractmethod
def get_proposal_distribution(self, world: World) -> dist.Distribution:
"""Return a probability distribution of moving self.node to a new value
conditioned on its current value in world.
"""
raise NotImplementedError
| beanmachine-main | src/beanmachine/ppl/inference/proposer/base_single_site_mh_proposer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.distributions as dist
class NormalEig(dist.Distribution):
"""
A multivariate normal distribution where the covariance is specified
through its eigen decomposition
"""
def __init__(self, mean, eig_vals, eig_vecs):
"""
mean - The mean of the multivariate normal.
eig_vals - 1d vector of the eigen values (all positive) of the covar
eig_vecs - 2d vector whose columns are the eigen vectors of the covar
The covariance matrix of the multivariate normal is given by:
eig_vecs @ (torch.eye(len(eig_vals)) * eig_vals) @ eig_vecs.T
"""
assert mean.dim() == 1
self.n = mean.shape[0]
assert eig_vals.shape == (self.n,)
assert eig_vecs.shape == (self.n, self.n)
self._mean = mean
self.eig_vecs = eig_vecs
self.sqrt_eig_vals = eig_vals.sqrt().unsqueeze(0)
# square root of the covariance matrix
self.sqrt_covar = self.sqrt_eig_vals * eig_vecs
# log of sqrt of determinant
self.log_sqrt_det = eig_vals.log().sum() / 2.0
# a base distribution of independent normals is used to draw
# samples that will be stretched along the eigen directions
self.base_dist = torch.distributions.normal.Normal(
torch.zeros(1, self.n).to(dtype=eig_vals.dtype),
torch.ones(1, self.n).to(dtype=eig_vals.dtype),
)
self.singular_eig_decompositions = eig_vals, eig_vecs
event_shape = self._mean.shape[-1:]
batch_shape = torch.broadcast_shapes(
self.sqrt_covar.shape[:-2], self._mean.shape[:-1]
)
super().__init__(batch_shape, event_shape)
def sample(self, sample_shape=torch.Size()): # noqa
with torch.no_grad():
z = torch.normal(mean=0.0, std=1.0, size=(self.n, 1))
z = z.to(dtype=self._mean.dtype)
return self._mean + (self.sqrt_covar @ z).squeeze(1)
def log_prob(self, value):
assert value.shape == (self.n,)
z = ((value - self._mean).unsqueeze(0) @ self.eig_vecs) / self.sqrt_eig_vals
return self.base_dist.log_prob(z).sum() - self.log_sqrt_det
| beanmachine-main | src/beanmachine/ppl/inference/proposer/normal_eig.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.base_single_site_mh_proposer import (
BaseSingleSiteMHProposer,
)
from beanmachine.ppl.world import World
class SingleSiteUniformProposer(BaseSingleSiteMHProposer):
def get_proposal_distribution(self, world: World) -> dist.Distribution:
"""Propose a new value for self.node using the prior distribution."""
node_dist = world.get_variable(self.node).distribution
if isinstance(node_dist, dist.Bernoulli):
return dist.Bernoulli(torch.ones(node_dist.param_shape) / 2.0)
elif isinstance(node_dist, dist.Categorical):
return dist.Categorical(torch.ones(node_dist.param_shape))
else:
# default to ancestral sampling
# TODO: we should sample from a transformed dist
# that transforms uniform to the support of the node
return node_dist
| beanmachine-main | src/beanmachine/ppl/inference/proposer/single_site_uniform_proposer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import warnings
from typing import cast, Dict, Set, Union
import torch
import torch.distributions as dist
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world import RVDict, World
from beanmachine.ppl.world.utils import get_default_transforms
class WindowScheme:
"""
Spliting adaptation iterations into a series of monotonically increasing windows,
which can be used to learn the mass matrices in HMC.
Reference:
[1] "HMC algorithm parameters" from Stan Reference Manual
https://mc-stan.org/docs/2_26/reference-manual/hmc-algorithm-parameters.html#automatic-parameter-tuning
"""
def __init__(self, num_adaptive_samples: int):
# from Stan
if num_adaptive_samples < 20:
# do not create any window for adapting mass matrix
self._start_iter = self._end_iter = num_adaptive_samples
self._window_size = 0
elif num_adaptive_samples < 150:
self._start_iter = int(0.15 * num_adaptive_samples)
self._end_iter = int(0.9 * num_adaptive_samples)
self._window_size = self._end_iter - self._start_iter
else:
self._start_iter = 75
self._end_iter = num_adaptive_samples - 50
self._window_size = 25
self._iteration = 0
@property
def is_in_window(self):
return self._iteration >= self._start_iter and self._iteration < self._end_iter
@property
def is_end_window(self):
return self._iteration - self._start_iter == self._window_size - 1
def step(self):
if self.is_end_window:
# prepare for next window
self._start_iter = self._iteration + 1
if self._end_iter - self._start_iter < self._window_size * 4:
# window sizes should increase monotonically
self._window_size = self._end_iter - self._start_iter
else:
self._window_size *= 2
self._iteration += 1
class DualAverageAdapter:
"""
The dual averaging mechanism that's introduced in [1] and was applied to HMC and
NUTS for adapting step size in [2]. The implementation and notations follows [2].
Reference:
[1] Yurii Nesterov. "Primal-dual subgradient methods for convex problems" (2009).
https://doi.org/10.1007/s10107-007-0149-x
[2] Matthew Hoffman and Andrew Gelman. "The No-U-Turn Sampler: Adaptively
Setting Path Lengths in Hamiltonian Monte Carlo" (2014).
https://arxiv.org/abs/1111.4246
"""
def __init__(self, initial_epsilon: torch.Tensor, delta: float = 0.8):
self._log_avg_epsilon = torch.zeros_like(initial_epsilon)
self._H = torch.zeros_like(initial_epsilon)
self._mu = torch.log(10 * initial_epsilon)
self._t0 = 10
self._delta = delta # target mean accept prob
self._gamma = 0.05
self._kappa = 0.75
self._m = 1.0 # iteration count
def step(self, alpha: torch.Tensor) -> torch.Tensor:
H_frac = 1.0 / (self._m + self._t0)
self._H = ((1 - H_frac) * self._H) + H_frac * (
self._delta - alpha.to(self._log_avg_epsilon)
)
log_epsilon = self._mu - (math.sqrt(self._m) / self._gamma) * self._H
step_frac = self._m ** (-self._kappa)
self._log_avg_epsilon = (
step_frac * log_epsilon + (1 - step_frac) * self._log_avg_epsilon
)
self._m += 1
return torch.exp(cast(torch.Tensor, log_epsilon))
def finalize(self) -> torch.Tensor:
return torch.exp(self._log_avg_epsilon)
class MassMatrixAdapter:
"""
Adapts the mass matrix. The (inverse) mass matrix is initialized to identity
and will be updated during adaptation windows.
Args:
matrix_size: The size of the mass matrix. This value should be the same
as the length of the flattened position tensor.
Reference:
[1] "HMC algorithm parameters" from Stan Reference Manual
https://mc-stan.org/docs/2_26/reference-manual/hmc-algorithm-parameters.html#euclidean-metric
"""
def __init__(self, initial_positions: torch.Tensor, full_mass_matrix: bool = False):
# inverse mass matrices, aka the inverse "metric"
self.mass_inv = torch.ones_like(initial_positions)
# distribution objects for generating momentums
self.momentum_dist: dist.Distribution = dist.Normal(0.0, self.mass_inv)
if full_mass_matrix:
self.mass_inv = torch.diag(self.mass_inv)
self.diagonal = not full_mass_matrix
self._adapter = WelfordCovariance(diagonal=self.diagonal)
def initialize_momentums(self, positions: torch.Tensor) -> torch.Tensor:
"""
Randomly draw momentum from MultivariateNormal(0, M). This momentum variable
is denoted as p in [1] and r in [2].
Args:
positions: the positions of the energy function.
"""
return self.momentum_dist.sample().to(positions.dtype)
def step(self, positions: torch.Tensor):
self._adapter.step(positions)
def finalize(self) -> None:
try:
mass_inv = self._adapter.finalize()
if self.diagonal:
self.momentum_dist = dist.Normal(
torch.zeros_like(mass_inv), torch.sqrt(mass_inv).reciprocal()
)
else:
self.momentum_dist = dist.MultivariateNormal(
torch.zeros_like(mass_inv.diag()), precision_matrix=mass_inv
)
self.mass_inv = mass_inv
except RuntimeError as e:
warnings.warn(str(e))
# reset adapters to get ready for the next window
self._adapter = WelfordCovariance(diagonal=self.diagonal)
class WelfordCovariance:
"""
An implementation of Welford's online algorithm for estimating the (co)variance of
samples.
Reference:
[1] "Algorithms for calculating variance" on Wikipedia
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
"""
def __init__(self, diagonal: bool = True):
self._mean: Union[float, torch.Tensor] = 0.0
self._count = 0
self._M2: Union[float, torch.Tensor] = 0.0
self._diagonal = diagonal
def step(self, sample: torch.Tensor) -> None:
self._count += 1
delta = sample - self._mean
self._mean += delta / self._count
delta2 = sample - self._mean
if self._diagonal:
self._M2 += delta * delta2
else:
self._M2 += torch.outer(delta, delta2)
def finalize(self, regularize: bool = True) -> torch.Tensor:
if self._count < 2:
raise RuntimeError(
"Number of samples is too small to estimate the (co)variance"
)
covariance = cast(torch.Tensor, self._M2) / (self._count - 1)
if not regularize:
return covariance
# from Stan: regularize mass matrix for numerical stability
covariance *= self._count / (self._count + 5.0)
padding = 1e-3 * 5.0 / (self._count + 5.0)
# bring covariance closer to a unit diagonal mass matrix
if self._diagonal:
covariance += padding
else:
covariance += padding * torch.eye(covariance.shape[0])
return covariance
class DictTransform:
"""
A general class for applying a dictionary of Transforms to a dictionary of
Tensors
Args:
transforms: Dict of torch.distributions.Transform keyed by the RVIdentifier
"""
def __init__(self, transforms: Dict[RVIdentifier, dist.Transform]):
self.transforms = transforms
def __call__(self, node_vals: RVDict) -> RVDict:
"""Apply each Transform to the corresponding Tensor in node_vals"""
return {node: self.transforms[node](val) for node, val in node_vals.items()}
def inv(self, node_vals: RVDict) -> RVDict:
"""Apply the inverse of each Transform to the corresponding Tensor in node_vals"""
return {node: self.transforms[node].inv(val) for node, val in node_vals.items()}
def log_abs_det_jacobian(
self, untransformed_vals: RVDict, transformed_vals: RVDict
) -> torch.Tensor:
"""Computes the sum of log det jacobian `log |dy/dx|` on the pairs of Tensors"""
jacobian = torch.tensor(0.0)
for node in untransformed_vals:
jacobian = jacobian + (
self.transforms[node]
.log_abs_det_jacobian(untransformed_vals[node], transformed_vals[node])
.sum()
)
return jacobian
class RealSpaceTransform(DictTransform):
"""
Transform a dictionary of Tensor values from a constrained space to the unconstrained
(real) space.
Args:
world: World which contains the random variables of interest.
target_rvs: Set of RVIdentifiers corresponding to the random variables of interest.
"""
def __init__(self, world: World, target_rvs: Set[RVIdentifier]):
transforms = {}
for node in target_rvs:
node_distribution = world.get_variable(node).distribution
if node_distribution.support.is_discrete:
raise TypeError(
f"HMC can perform inference only on continuous latent random variables, but node {node} is discrete."
)
transforms[node] = get_default_transforms(node_distribution)
super().__init__(transforms)
| beanmachine-main | src/beanmachine/ppl/inference/proposer/hmc_utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import (
SingleSiteAncestralProposer,
)
from beanmachine.ppl.world import World
from beanmachine.ppl.world.utils import is_constraint_eq
class SingleSiteRandomWalkProposer(SingleSiteAncestralProposer):
def __init__(
self,
node,
step_size: float,
):
self.step_size = step_size
self.target_acc_rate = {False: torch.tensor(0.44), True: torch.tensor(0.234)}
self._iter = 0
super().__init__(node)
def do_adaptation(self, world, accept_log_prob, *args, **kwargs) -> None:
if torch.isnan(accept_log_prob):
return
accept_prob = accept_log_prob.exp()
val_shape = world[self.node].shape
if len(val_shape) == 0 or val_shape[0] == 1:
target_acc_rate = self.target_acc_rate[False]
c = torch.reciprocal(target_acc_rate)
else:
target_acc_rate = self.target_acc_rate[True]
# pyre-fixme[6]: For 1st param expected `Tensor` but got `float`.
c = torch.reciprocal(1.0 - target_acc_rate)
new_step_size = self.step_size * torch.exp(
(accept_prob - target_acc_rate) * c / (self._iter + 1.0)
)
self._iter += 1
self.step_size = new_step_size.item()
def get_proposal_distribution(self, world: World) -> dist.Distribution:
"""Propose a new value for self.node using the prior distribution."""
node = world.get_variable(self.node)
node_support = node.distribution.support
if is_constraint_eq(node_support, dist.constraints.real):
return dist.Normal(node.value, self.step_size)
elif any(
is_constraint_eq(
node_support,
(dist.constraints.greater_than, dist.constraints.greater_than_eq),
)
):
lower_bound = node_support.lower_bound
proposal_distribution = self.gamma_dist_from_moments(
node.value - lower_bound, self.step_size**2
)
transform = dist.AffineTransform(loc=lower_bound, scale=1.0)
transformed_proposal = dist.TransformedDistribution(
proposal_distribution, transform
)
return transformed_proposal
elif is_constraint_eq(node_support, dist.constraints.interval):
lower_bound = node_support.lower_bound
width = node_support.upper_bound - lower_bound
mu = (node.value - lower_bound) / width
sigma = (
torch.ones(node.value.shape, device=node.value.device)
* self.step_size
/ width
)
proposal_distribution = self.beta_dist_from_moments(mu, sigma)
transform = dist.AffineTransform(loc=lower_bound, scale=width)
transformed_proposal = dist.TransformedDistribution(
proposal_distribution, transform
)
return transformed_proposal
elif is_constraint_eq(node_support, dist.constraints.simplex):
proposal_distribution = self.dirichlet_dist_from_moments(
node.value, self.step_size
)
return proposal_distribution
else:
# default to ancestral
return super().get_proposal_distribution(world)
def gamma_dist_from_moments(self, expectation, sigma):
"""
Returns a Gamma distribution.
:param expectation: expectation value
:param sigma: sigma value
:returns: returns the Beta distribution given mu and sigma.
"""
beta = expectation / (sigma**2)
beta = torch.clamp(beta, min=1e-3)
alpha = expectation * beta
alpha = torch.clamp(alpha, min=1e-3)
distribution = dist.Gamma(concentration=alpha, rate=beta)
return distribution
def beta_dist_from_moments(self, mu, sigma):
"""
Returns a Beta distribution.
:param mu: mu value
:param sigma: sigma value
:returns: returns the Beta distribution given mu and sigma.
"""
mu = torch.clamp(mu, 1e-3, 1 - 1e-3)
sigma = torch.clamp(sigma, 1e-3, (mu * (1 - mu)).min().item())
"""
https://stats.stackexchange.com/questions/12232/calculating-the-
parameters-of-a-beta-distribution-using-the-mean-and-variance
"""
alpha = ((1.0 - mu) / (sigma**2) - (1.0 / mu)) * (mu**2)
beta = alpha * (1.0 / mu - 1.0)
distribution = dist.Beta(concentration1=alpha, concentration0=beta)
return distribution
def dirichlet_dist_from_moments(self, mu, sigma):
"""
Returns a Dirichlet distribution. The variances of a Dirichlet
distribution are inversely proportional to the norm of the concentration
vector. However, variance is only set as a scalar, not as a vector.
So the individual variances of the Dirichlet are not tuned, only the
magnitude of the entire vector.
:param mu: mu value
:param sigma: sigma value
:returns: returns the Dirichlet distribution given mu and sigma.
"""
alpha = mu / (torch.norm(mu) * sigma**2)
return dist.Dirichlet(concentration=alpha)
| beanmachine-main | src/beanmachine/ppl/inference/proposer/single_site_random_walk_proposer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Generic, TypeVar
import torch
KeyType = TypeVar("KeyType")
class DictToVecConverter(Generic[KeyType]):
"""
A utility class to convert a dictionary of Tensors into a single flattened
Tensor or the other way around.
Args:
example_dict: A dict that will be used to determine the order of the
keys and the size of the flattened Tensor.
"""
def __init__(self, example_dict: Dict[KeyType, torch.Tensor]) -> None:
# determine the order of the keys
self._keys = list(example_dict.keys())
# store the size of the values, which will be used when we want to
# reshape them back
self._val_shapes = [example_dict[key].shape for key in self._keys]
# compute the indicies that each of the entry corresponds to. e.g. for
# keys[0], its value will correspond to flatten_vec[idxs[0] : idxs[1]]
val_sizes = [example_dict[key].numel() for key in self._keys]
self._idxs = list(torch.cumsum(torch.tensor([0] + val_sizes), dim=0))
def to_vec(self, dict_in: Dict[KeyType, torch.Tensor]) -> torch.Tensor:
"""Concatenate the entries of a dictionary to a flattened Tensor"""
return torch.cat([dict_in[key].flatten() for key in self._keys])
def to_dict(self, vec_in: torch.Tensor) -> Dict[KeyType, torch.Tensor]:
"""Reconstruct a dictionary out of a flattened Tensor"""
retval = {}
for key, shape, idx_begin, idx_end in zip(
self._keys, self._val_shapes, self._idxs, self._idxs[1:]
):
retval[key] = vec_in[idx_begin:idx_end].reshape(shape)
return retval
| beanmachine-main | src/beanmachine/ppl/inference/proposer/utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABCMeta, abstractmethod
from typing import Tuple
import torch
from beanmachine.ppl.world import World
class BaseProposer(metaclass=ABCMeta):
@abstractmethod
def propose(self, world: World) -> Tuple[World, torch.Tensor]:
raise NotImplementedError
def do_adaptation(self, world, accept_log_prob, *args, **kwargs) -> None:
...
def finish_adaptation(self) -> None:
...
| beanmachine-main | src/beanmachine/ppl/inference/proposer/base_proposer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import warnings
from typing import Callable, Optional, Set, Tuple
import torch
from beanmachine.ppl.experimental.torch_jit_backend import jit_compile, TorchJITBackend
from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer
from beanmachine.ppl.inference.proposer.hmc_utils import (
DualAverageAdapter,
MassMatrixAdapter,
RealSpaceTransform,
WindowScheme,
)
from beanmachine.ppl.inference.proposer.utils import DictToVecConverter
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.world import World
class HMCProposer(BaseProposer):
"""
The basic Hamiltonian Monte Carlo (HMC) algorithm as described in [1] plus a
dual-averaging mechanism for dynamically adjusting the step size [2].
Reference:
[1] Radford Neal. "MCMC Using Hamiltonian Dynamics" (2011).
https://arxiv.org/abs/1206.1901
[2] Matthew Hoffman and Andrew Gelman. "The No-U-Turn Sampler: Adaptively
Setting Path Lengths in Hamiltonian Monte Carlo" (2014).
https://arxiv.org/abs/1111.4246
The current implementation does not use nor adapt a mass matrix -- which is
equivalent to setting the matrix M to I.
Args:
initial_world: Initial world to propose from.
target_rvs: Set of RVIdentifiers to indicate which variables to propose.
num_adaptive_samples: Number of adaptive samples to run.
trajectory_length: Length of single trajectory.
initial_step_size: Initial step size.
adapt_step_size: Flag whether to adapt step size, defaults to True.
adapt_mass_matrix: Flat whether to adapt mass matrix, defaults to True.
target_accept_prob: Target accept prob, defaults to 0.8.
nnc_compile: If True, NNC compiler will be used to accelerate the
inference.
"""
def __init__(
self,
initial_world: World,
target_rvs: Set[RVIdentifier],
num_adaptive_samples: int,
trajectory_length: float,
initial_step_size: float = 1.0,
adapt_step_size: bool = True,
adapt_mass_matrix: bool = True,
full_mass_matrix: bool = False,
target_accept_prob: float = 0.8,
jit_backend: TorchJITBackend = TorchJITBackend.NNC,
):
self.world = initial_world
self._target_rvs = target_rvs
self._to_unconstrained = RealSpaceTransform(initial_world, target_rvs)
# concatenate and flatten the positions into a single tensor
positions_dict = self._to_unconstrained(
{node: initial_world[node] for node in self._target_rvs}
)
self._dict2vec = DictToVecConverter(positions_dict)
self._positions = self._dict2vec.to_vec(positions_dict)
# cache pe and pe_grad to prevent re-computation
self._pe, self._pe_grad = self._potential_grads(self._positions)
# initialize parameters
self.trajectory_length = trajectory_length
# initialize adapters
self.adapt_step_size = adapt_step_size
self.adapt_mass_matrix = adapt_mass_matrix
# we need mass matrix adapter to sample momentums
self._mass_matrix_adapter = MassMatrixAdapter(self._positions, full_mass_matrix)
if self.adapt_step_size:
self.step_size = self._find_reasonable_step_size(
torch.as_tensor(initial_step_size),
self._positions,
self._pe,
self._pe_grad,
)
self._step_size_adapter = DualAverageAdapter(
self.step_size, target_accept_prob
)
else:
self.step_size = torch.as_tensor(initial_step_size)
if self.adapt_mass_matrix:
self._window_scheme = WindowScheme(num_adaptive_samples)
else:
self._window_scheme = None
# alpha will store the accept prob and will be used to adapt step size
self._alpha = None
# pyre-ignore[8]
self._leapfrog_step = jit_compile(self._leapfrog_step, jit_backend)
@property
def _initialize_momentums(self) -> Callable:
return self._mass_matrix_adapter.initialize_momentums
@property
def _mass_inv(self) -> torch.Tensor:
return self._mass_matrix_adapter.mass_inv
def _scale_r(self, momentums: torch.Tensor, mass_inv: torch.Tensor) -> torch.Tensor:
"""Return the momentums (r) scaled by M^{-1} @ r"""
if self._mass_matrix_adapter.diagonal:
return mass_inv * momentums
else:
return mass_inv @ momentums
def _kinetic_energy(
self, momentums: torch.Tensor, mass_inv: torch.Tensor
) -> torch.Tensor:
"""Returns the kinetic energy KE = 1/2 * p^T @ M^{-1} @ p (equation 2.6 in [1])"""
r_scale = self._scale_r(momentums, mass_inv)
return torch.dot(momentums, r_scale) / 2
def _kinetic_grads(
self, momentums: torch.Tensor, mass_inv: torch.Tensor
) -> torch.Tensor:
"""Returns a dictionary of gradients of kinetic energy function with respect to
the momentum at each site, computed as M^{-1} @ p"""
return self._scale_r(momentums, mass_inv)
def _potential_energy(self, positions: torch.Tensor) -> torch.Tensor:
"""Returns the potential energy PE = - L(world) (the joint log likelihood of the
current values)"""
positions_dict = self._dict2vec.to_dict(positions)
constrained_vals = self._to_unconstrained.inv(positions_dict)
log_joint = self.world.replace(constrained_vals).log_prob()
log_joint = log_joint - self._to_unconstrained.log_abs_det_jacobian(
constrained_vals, positions_dict
)
return -log_joint
def _potential_grads(
self, positions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Returns potential energy as well as a dictionary of its gradient with
respect to the value at each site."""
positions.requires_grad = True
try:
pe = self._potential_energy(positions)
grads = torch.autograd.grad(pe, positions)[0]
# We return NaN on Cholesky factorization errors which can be gracefully
# handled by NUTS/HMC.
except RuntimeError as e:
err_msg = str(e)
if "singular U" in err_msg or "input is not positive-definite" in err_msg:
warnings.warn(
"Numerical error in potential energy computation."
" If automatic recovery does not happen, plese file an issue"
" at https://github.com/facebookresearch/beanmachine/issues/."
)
grads = torch.full_like(positions, float("nan"))
pe = torch.tensor(
float("nan"), device=grads[0].device, dtype=grads[0].dtype
)
else:
raise e
positions.requires_grad = False
return pe.detach(), grads
def _hamiltonian(
self,
positions: torch.Tensor,
momentums: torch.Tensor,
mass_inv: torch.Tensor,
pe: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Returns the value of Hamiltonian equation (equatino 2.5 in [1]). This function
will be more efficient if pe is provided as it only needs to compute the
kinetic energy"""
ke = self._kinetic_energy(momentums, mass_inv)
if pe is None:
pe = self._potential_energy(positions)
return pe + ke
def _leapfrog_step(
self,
positions: torch.Tensor,
momentums: torch.Tensor,
step_size: torch.Tensor,
mass_inv: torch.Tensor,
pe_grad: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Performs a single leapfrog integration (alson known as the velocity Verlet
method) as described in equation 2.28-2.30 in [1]. If the values of potential
grads of the current world is provided, then we only needs to compute the
gradient once per step."""
if pe_grad is None:
_, pe_grad = self._potential_grads(positions)
new_momentums = momentums - step_size * pe_grad / 2
ke_grad = self._kinetic_grads(new_momentums, mass_inv)
new_positions = positions + step_size * ke_grad
pe, pe_grad = self._potential_grads(new_positions)
new_momentums = new_momentums - step_size * pe_grad / 2
return new_positions, new_momentums, pe, pe_grad
def _leapfrog_updates(
self,
positions: torch.Tensor,
momentums: torch.Tensor,
trajectory_length: float,
step_size: torch.Tensor,
mass_inv: torch.Tensor,
pe_grad: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Run multiple iterations of leapfrog integration until the length of the
trajectory is greater than the specified trajectory_length."""
# we should run at least 1 step
num_steps = max(math.ceil(trajectory_length / step_size.item()), 1)
for _ in range(num_steps):
positions, momentums, pe, pe_grad = self._leapfrog_step(
positions, momentums, step_size, mass_inv, pe_grad
)
# pyre-ignore[61]: `pe` may not be initialized here.
return positions, momentums, pe, pe_grad
def _find_reasonable_step_size(
self,
initial_step_size: torch.Tensor,
positions: torch.Tensor,
pe: torch.Tensor,
pe_grad: torch.Tensor,
) -> torch.Tensor:
"""A heuristic of finding a reasonable initial step size (epsilon) as introduced
in Algorithm 4 of [2]."""
step_size = initial_step_size
# the target is log(0.5) in the paper but is log(0.8) on Stan:
# https://github.com/stan-dev/stan/pull/356
target = math.log(0.8)
momentums = self._initialize_momentums(positions)
energy = self._hamiltonian(
positions, momentums, self._mass_inv, pe
) # -log p(positions, momentums)
new_positions, new_momentums, new_pe, _ = self._leapfrog_step(
positions, momentums, step_size, self._mass_inv, pe_grad
)
new_energy = self._hamiltonian(
new_positions, new_momentums, self._mass_inv, new_pe
)
# NaN will evaluate to False and set direction to -1
new_direction = direction = 1 if energy - new_energy > target else -1
step_size_scale = 2**direction
while new_direction == direction:
step_size *= step_size_scale
if step_size == 0:
raise ValueError(
f"Current step size is {step_size}. No acceptably small step size could be found."
"Perhaps the posterior is not continuous?"
)
if step_size > 1e7:
raise ValueError(
f"Current step size is {step_size}. Posterior is improper. Please check your model"
)
# not covered in the paper, but both Stan and Pyro re-sample the momentum
# after each update
momentums = self._initialize_momentums(positions)
energy = self._hamiltonian(positions, momentums, self._mass_inv, pe)
new_positions, new_momentums, new_pe, _ = self._leapfrog_step(
positions, momentums, step_size, self._mass_inv, pe_grad
)
new_energy = self._hamiltonian(
new_positions, new_momentums, self._mass_inv, new_pe
)
new_direction = 1 if energy - new_energy > target else -1
return step_size
def propose(self, world: World) -> Tuple[World, torch.Tensor]:
if world is not self.world:
# re-compute cached values since world was modified by other sources
self.world = world
self._positions = self._dict2vec.to_vec(
self._to_unconstrained({node: world[node] for node in self._target_rvs})
)
self._pe, self._pe_grad = self._potential_grads(self._positions)
momentums = self._initialize_momentums(self._positions)
current_energy = self._hamiltonian(
self._positions, momentums, self._mass_inv, self._pe
)
positions, momentums, pe, pe_grad = self._leapfrog_updates(
self._positions,
momentums,
self.trajectory_length,
self.step_size,
self._mass_inv,
self._pe_grad,
)
new_energy = torch.nan_to_num(
self._hamiltonian(positions, momentums, self._mass_inv, pe),
float("inf"),
)
delta_energy = new_energy - current_energy
self._alpha = torch.clamp(torch.exp(-delta_energy), max=1.0)
# accept/reject new world
if torch.bernoulli(self._alpha):
positions_dict = self._dict2vec.to_dict(positions)
self.world = self.world.replace(self._to_unconstrained.inv(positions_dict))
# update cache
self._positions, self._pe, self._pe_grad = positions, pe, pe_grad
return self.world, torch.zeros_like(self._alpha)
def do_adaptation(self, *args, **kwargs) -> None:
if self._alpha is None:
return
if self.adapt_step_size:
self.step_size = self._step_size_adapter.step(self._alpha)
if self.adapt_mass_matrix:
window_scheme = self._window_scheme
assert window_scheme is not None
if window_scheme.is_in_window:
self._mass_matrix_adapter.step(self._positions)
if window_scheme.is_end_window:
# update mass matrix at the end of a window
self._mass_matrix_adapter.finalize()
if self.adapt_step_size:
self.step_size = self._step_size_adapter.finalize()
self.step_size = self._find_reasonable_step_size(
self.step_size,
self._positions,
self._pe,
self._pe_grad,
)
self._step_size_adapter = DualAverageAdapter(self.step_size)
window_scheme.step()
self._alpha = None
def finish_adaptation(self) -> None:
if self.adapt_step_size:
self.step_size = self._step_size_adapter.finalize()
| beanmachine-main | src/beanmachine/ppl/inference/proposer/hmc_proposer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import cast, List, Tuple
import torch
from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer
from beanmachine.ppl.world import World
class SequentialProposer(BaseProposer):
def __init__(self, proposers: List[BaseProposer]):
self.proposers = proposers
def propose(self, world: World) -> Tuple[World, torch.Tensor]:
"""
Computes the joint log prob of all the proposers
for the world.
Args:
world: World to compute joint log prob of
"""
accept_log_prob = 0.0
for proposer in self.proposers:
world, log_prob = proposer.propose(world)
accept_log_prob += log_prob
return world, cast(torch.Tensor, accept_log_prob)
def do_adaptation(self, *args, **kwargs) -> None:
"""
Run `do_adaptation` for all of the proposers
"""
for proposer in self.proposers:
proposer.do_adaptation(*args, **kwargs)
def finish_adaptation(self) -> None:
for proposer in self.proposers:
proposer.finish_adaptation()
| beanmachine-main | src/beanmachine/ppl/inference/proposer/sequential_proposer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Callable, Optional, Tuple, TypeVar
from typing_extensions import ParamSpec
logger = logging.getLogger(__name__)
P = ParamSpec("P")
R = TypeVar("R")
def nnc_jit(
f: Callable[P, R], static_argnums: Optional[Tuple[int]] = None
) -> Callable[P, R]:
"""
A helper function that lazily imports the NNC utils, which initialize the compiler
and displaying a experimental warning, then invoke the underlying nnc_jit on
the function f.
"""
try:
# The setup code in `nnc.utils` will only be executed once in a Python session
from beanmachine.ppl.inference.proposer.nnc.utils import nnc_jit as raw_nnc_jit
except Exception as e:
logger.warning(
f"Fails to initialize NNC due to the following error: {str(e)}\n"
"Falling back to default inference engine."
)
# return original function without change
return f
return raw_nnc_jit(f, static_argnums)
__all__ = ["nnc_jit"]
| beanmachine-main | src/beanmachine/ppl/inference/proposer/nnc/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import torch
import torch.jit
from functorch.compile import nnc_jit
# the warning will only be shown to user once when this module is imported
warnings.warn(
"The support of NNC compiler is experimental and the API is subject to"
"change in the future releases of Bean Machine. For questions regarding NNC, please"
"checkout the functorch project (https://github.com/pytorch/functorch)."
)
# allows reductions to be compiled by NNC
# pyre-fixme[16]: Module `_C` has no attribute `_jit_set_texpr_reductions_enabled`.
torch._C._jit_set_texpr_reductions_enabled(True)
__all__ = ["nnc_jit"]
| beanmachine-main | src/beanmachine/ppl/inference/proposer/nnc/utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Tuple
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.newtonian_monte_carlo_utils import (
hessian_of_log_prob,
is_valid,
)
from beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import (
SingleSiteAncestralProposer,
)
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.utils import tensorops
from beanmachine.ppl.world import World
LOGGER = logging.getLogger("beanmachine")
class SingleSiteSimplexSpaceNMCProposer(SingleSiteAncestralProposer):
"""
Single-Site Simplex Newtonian Monte Carlo Proposer
See sec. 3.2 of [1]
[1] Arora, Nim, et al. `Newtonian Monte Carlo: single-site MCMC meets second-order gradient methods`
"""
def __init__(
self, node: RVIdentifier, transform: dist.Transform = dist.identity_transform
):
super().__init__(node)
self._transform = transform
self._proposal_distribution = None
def compute_alpha(
self, world: World, min_alpha_value: float = 1e-3
) -> Tuple[bool, torch.Tensor]:
"""
Computes alpha of the Dirichlet proposal given the node.
alpha = 1 - (x^2) (hessian[i, i] - max(hessian[i]))
where max(hessian[i]) is maximum of the hessian at ith row
excluding the diagonal values.
:param node_var: the node Variable we're proposing a new value for
:returns: alpha of the Dirichlet distribution as proposal distribution
"""
node_val = self._transform(world[self.node])
first_gradient, hessian_diag_minus_max = hessian_of_log_prob(
world, self.node, node_val, tensorops.simplex_gradients, self._transform
)
if not is_valid(first_gradient) or not is_valid(hessian_diag_minus_max):
LOGGER.warning(
"Gradient or Hessian is invalid at node {n}.\n".format(n=str(self.node))
)
return False, torch.tensor(0.0)
node_val_reshaped = node_val.reshape(-1)
predicted_alpha = (
1 - ((node_val_reshaped * node_val_reshaped) * (hessian_diag_minus_max))
).reshape(node_val.shape)
mean = self._transform(world.get_variable(self.node).distribution.mean)
predicted_alpha = torch.where(
predicted_alpha < -1 * min_alpha_value, mean, predicted_alpha
)
predicted_alpha = torch.where(
predicted_alpha > 0, predicted_alpha, torch.tensor(min_alpha_value)
)
return True, predicted_alpha
def get_proposal_distribution(self, world: World) -> dist.Distribution:
"""
Returns the proposal distribution of the node.
Args:
world: the world in which we're proposing a new value for node.
Returns:
The proposal distribution.
"""
# if the number of variables in the world is 1 and proposal distribution
# has already been computed, we can use the old proposal distribution
# and skip re-computing the gradient, since there are no other variable
# in the world that may change the gradient and the old one is still
# correct.
if self._proposal_distribution is not None and len(world.latent_nodes) == 1:
return self._proposal_distribution
is_valid, alpha = self.compute_alpha(world)
if not is_valid:
LOGGER.warning(
"Node {n} has invalid proposal solution. ".format(n=self.node)
+ "Proposer falls back to SingleSiteAncestralProposer.\n"
)
return super().get_proposal_distribution(world)
self._proposal_distribution = dist.TransformedDistribution(
dist.Dirichlet(alpha), self._transform.inv
)
return self._proposal_distribution
| beanmachine-main | src/beanmachine/ppl/inference/proposer/nmc/single_site_simplex_space_nmc_proposer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Tuple
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.newtonian_monte_carlo_utils import (
hessian_of_log_prob,
is_valid,
)
from beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import (
SingleSiteAncestralProposer,
)
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.utils import tensorops
from beanmachine.ppl.world import World
LOGGER = logging.getLogger("beanmachine")
class SingleSiteHalfSpaceNMCProposer(SingleSiteAncestralProposer):
"""
Single-Site Half Space Newtonian Monte Carlo Proposers.
See sec. 3.2 of [1]
[1] Arora, Nim, et al. `Newtonian Monte Carlo: single-site MCMC meets second-order gradient methods`
"""
def __init__(self, node: RVIdentifier):
super().__init__(node)
self._proposal_distribution = None
def compute_alpha_beta(
self, world: World
) -> Tuple[bool, torch.Tensor, torch.Tensor]:
"""
Computes alpha and beta of the Gamma proposal given the node.
alpha = 1 - hessian_diag * x^2
beta = -1 * x * hessian_diag - first_grad
"""
node_val = world[self.node]
first_gradient, hessian_diag = hessian_of_log_prob(
world, self.node, node_val, tensorops.halfspace_gradients
)
if not is_valid(first_gradient) or not is_valid(hessian_diag):
LOGGER.warning(
"Gradient or Hessian is invalid at node {n}.\n".format(n=str(self.node))
)
return False, torch.tensor(0.0), torch.tensor(0.0)
node_val_reshaped = node_val.reshape(-1)
predicted_alpha = (
1 - hessian_diag * (node_val_reshaped * node_val_reshaped)
).t()
predicted_beta = -1 * node_val_reshaped * hessian_diag - first_gradient
condition = (predicted_alpha > 0) & (predicted_beta > 0)
predicted_alpha = torch.where(
condition, predicted_alpha, torch.tensor(1.0).to(dtype=predicted_beta.dtype)
)
node_var = world.get_variable(self.node)
try:
mean = (
node_var.distribution.mean.reshape(-1)
if is_valid(node_var.distribution.mean)
else torch.ones_like(predicted_beta)
)
except NotImplementedError:
# sometimes distribution.mean throws NotImplementedError
mean = torch.ones_like(predicted_beta)
predicted_beta = torch.where(condition, predicted_beta, mean)
predicted_alpha = predicted_alpha.reshape(node_val.shape)
predicted_beta = predicted_beta.reshape(node_val.shape)
return True, predicted_alpha, predicted_beta
def get_proposal_distribution(self, world: World) -> dist.Distribution:
"""
Returns the proposal distribution of the node.
Args:
world: the world in which we're proposing a new value for node.
Returns:
The proposal distribution.
"""
# if the number of variables in the world is 1 and proposal distribution
# has already been computed, we can use the old proposal distribution
# and skip re-computing the gradient, since there are no other variable
# in the world that may change the gradient and the old one is still
# correct.
if self._proposal_distribution is not None and len(world.latent_nodes) == 1:
return self._proposal_distribution
is_valid, alpha, beta = self.compute_alpha_beta(world)
if not is_valid:
LOGGER.warning(
"Node {n} has invalid proposal solution. ".format(n=self.node)
+ "Proposer falls back to SingleSiteAncestralProposer.\n"
)
return super().get_proposal_distribution(world)
self._proposal_distribution = dist.Gamma(alpha, beta)
return self._proposal_distribution
| beanmachine-main | src/beanmachine/ppl/inference/proposer/nmc/single_site_half_space_nmc_proposer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from beanmachine.ppl.inference.proposer.nmc.single_site_half_space_nmc_proposer import (
SingleSiteHalfSpaceNMCProposer,
)
from beanmachine.ppl.inference.proposer.nmc.single_site_real_space_nmc_proposer import (
SingleSiteRealSpaceNMCProposer,
)
from beanmachine.ppl.inference.proposer.nmc.single_site_simplex_space_nmc_proposer import (
SingleSiteSimplexSpaceNMCProposer,
)
__all__ = [
"SingleSiteHalfSpaceNMCProposer",
"SingleSiteRealSpaceNMCProposer",
"SingleSiteSimplexSpaceNMCProposer",
]
| beanmachine-main | src/beanmachine/ppl/inference/proposer/nmc/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import NamedTuple, Optional, Tuple, Union
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.newtonian_monte_carlo_utils import (
hessian_of_log_prob,
is_scalar,
is_valid,
soft_abs_inverse,
)
from beanmachine.ppl.inference.proposer.normal_eig import NormalEig
from beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import (
SingleSiteAncestralProposer,
)
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.utils import tensorops
from beanmachine.ppl.world import World
LOGGER = logging.getLogger("beanmachine")
class _ProposalArgs(NamedTuple):
distance: torch.Tensor
node_val_reshaped: torch.Tensor
scale_tril: Optional[torch.Tensor] = None
eig_vals: Optional[torch.Tensor] = None
eig_vecs: Optional[torch.Tensor] = None
class SingleSiteRealSpaceNMCProposer(SingleSiteAncestralProposer):
"""
Single-Site Real Space Newtonian Monte Carlo Proposer
See sec. 3.1 of [1]
[1] Arora, Nim, et al. `Newtonian Monte Carlo: single-site MCMC meets second-order gradient methods`
"""
def __init__(self, node: RVIdentifier, alpha: float = 10.0, beta: float = 1.0):
super().__init__(node)
self.alpha_: Union[float, torch.Tensor] = alpha
self.beta_: Union[float, torch.Tensor] = beta
self.learning_rate_ = None
self.running_mean_, self.running_var_ = torch.tensor(0.0), torch.tensor(0.0)
self.accepted_samples_ = 0
# cached proposal args
self._proposal_args: Optional[_ProposalArgs] = None
def _sample_frac_dist(self, world: World) -> torch.Tensor:
node_val_flatten = world[self.node].flatten()
# If any of alpha or beta are scalar, we have to reshape them
# random variable shape to allow for per-index learning rate.
if is_scalar(self.alpha_) or is_scalar(self.beta_):
self.alpha_ = self.alpha_ * torch.ones_like(node_val_flatten)
self.beta_ = self.beta_ * torch.ones_like(node_val_flatten)
beta_ = dist.Beta(self.alpha_, self.beta_)
return beta_.sample()
def _get_proposal_distribution_from_args(
self, world: World, frac_dist: torch.Tensor, args: _ProposalArgs
) -> dist.Distribution:
node_val = world[self.node]
mean = (args.node_val_reshaped + args.distance * frac_dist).squeeze(0)
if args.scale_tril is not None:
proposal_dist = dist.MultivariateNormal(mean, scale_tril=args.scale_tril)
else:
assert args.eig_vals is not None and args.eig_vecs is not None
proposal_dist = NormalEig(
mean, eig_vals=args.eig_vals, eig_vecs=args.eig_vecs
)
# reshape to match the original sample shape
reshape_transform = dist.ReshapeTransform(
node_val.flatten().size(), node_val.size()
)
return dist.TransformedDistribution(proposal_dist, reshape_transform)
def get_proposal_distribution(self, world: World) -> dist.Distribution:
"""
Returns the proposal distribution of the node.
Args:
world: the world in which we're proposing a new value for node
required to find a proposal distribution which in this case is the
fraction of distance between the current value and NMC mean that we're
going to pick as our proposer mean.
Returns:
The proposal distribution.
"""
if self.learning_rate_ is None:
self.learning_rate_ = self._sample_frac_dist(world)
frac_dist = self.learning_rate_
if self._proposal_args is not None and len(world.latent_nodes) == 1:
return self._get_proposal_distribution_from_args(
world, frac_dist, self._proposal_args
)
node_var = world.get_variable(self.node)
node_val = node_var.value
node_device = node_val.device
first_gradient, hessian = hessian_of_log_prob(
world, self.node, node_val, tensorops.gradients
)
if not is_valid(first_gradient) or not is_valid(hessian):
LOGGER.warning(
"Gradient or Hessian is invalid at node {nv}.\n".format(
nv=str(node_var)
)
+ "Node {n} has invalid proposal solution. ".format(n=self.node)
+ "Proposer falls back to SingleSiteAncestralProposer.\n"
)
return super().get_proposal_distribution(world)
# node value may of any arbitrary shape, so here, we use reshape to convert a
# 1D vector of size (N) to (1 x N) matrix.
node_val_reshaped = node_val.reshape(1, -1)
neg_hessian = -1 * hessian
# we will first attempt a covariance-inverse-based proposer
# Using chelesky_ex because error propagation is slow in PyTorch (see N1136967)
L, info = torch.linalg.cholesky_ex(neg_hessian.flip([0, 1]), check_errors=False)
if info == 0: # info > 0 means the matrix isn't positive-definite
# See: https://math.stackexchange.com/questions/1434899/is-there-a-decomposition-u-ut
# Let, flip(H) = L @ L' (`flip` flips the x, y axes of X: torch.flip(X, (0, 1)))
# equiv. to applying W @ X @ W'; where W is the permutation matrix
# [[0 ... 1], [0 ... 1 0], ..., [1 ... 0]]
# Note: flip(A @ B) = flip(A) @ flip(B) and flip(A^-1) = (flip(A))^-1
# (flip(H))^-1 = (L @ L')^-1 = L'^-1 @ L^-1
# flip(H^-1) = (L^-1)' @ (L^-1)
# Note that L^-1 is lower triangular and isn't the cholesky factor for (flip(H))^-1.
# flip(flip(H^-1)) = flip((L^-1)') @ flip(L^-1)
# H^-1 = flip(L^-1)' @ flip(L^-1)
# flip(L^-1)' is the lower triangular cholesky factor for H^-1.
L_inv = torch.linalg.solve_triangular(
L,
torch.eye(L.size(-1)).to(dtype=neg_hessian.dtype, device=node_device),
upper=False,
)
L_chol = L_inv.flip([0, 1]).T
distance = torch.cholesky_solve(first_gradient.unsqueeze(1), L).t()
proposal_args = _ProposalArgs(
distance=distance,
node_val_reshaped=node_val_reshaped,
scale_tril=L_chol,
)
else:
LOGGER.warning(
"Error: Cholesky decomposition failed. "
+ "Falls back to Eigen decomposition."
)
eig_vecs, eig_vals = soft_abs_inverse(neg_hessian)
distance = (
eig_vecs
@ (torch.eye(len(eig_vals)) * eig_vals)
@ (eig_vecs.t() @ first_gradient.unsqueeze(1))
).t()
proposal_args = _ProposalArgs(
distance=distance,
node_val_reshaped=node_val_reshaped,
eig_vals=eig_vals,
eig_vecs=eig_vecs,
)
self._proposal_args = proposal_args
return self._get_proposal_distribution_from_args(
world, frac_dist, proposal_args
)
def compute_beta_priors_from_accepted_lr(
self, max_lr_num: int = 5
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Compute Alpha and Beta using Method of Moments.
"""
# Running mean and variance are computed following the link below:
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
old_mu = self.running_mean_
old_var = self.running_var_
n = self.accepted_samples_
xn = self.learning_rate_
new_mu = old_mu + (xn - old_mu) / n
new_var = old_var + ((xn - old_mu) * (xn - new_mu) - old_var) / n
self.running_var_ = new_var
self.running_mean_ = new_mu
if n < max_lr_num:
return (
torch.tensor(1.0, dtype=self.learning_rate_.dtype),
torch.tensor(1.0, dtype=self.learning_rate_.dtype),
)
# alpha and beta are calculated following the link below.
# https://stats.stackexchange.com/questions/12232/calculating-the-
# parameters-of-a-beta-distribution-using-the-mean-and-variance
alpha = ((1.0 - new_mu) / new_var - (1.0 / new_mu)) * (new_mu**2)
beta = alpha * (1.0 - new_mu) / new_mu
# pyre-fixme[6]: For 1st param expected `Tensor` but got `bool`.
# pyre-fixme[6]: For 1st param expected `Tensor` but got `float`.
alpha = torch.where(alpha <= 0, torch.ones_like(alpha), alpha)
# pyre-fixme[6]: For 1st param expected `Tensor` but got `bool`.
# pyre-fixme[6]: For 1st param expected `Tensor` but got `float`.
beta = torch.where(beta <= 0, torch.ones_like(beta), beta)
return alpha, beta
def do_adaptation(
self,
world: World,
accept_log_prob: torch.Tensor,
is_accepted: bool = False,
*args,
**kwargs,
) -> None:
"""
Do adaption based on the learning rates.
Args:
world: the world in which we're operating in.
accept_log_prob: Current accepted log prob (Not used in this particular proposer).
is_accepted: bool representing whether the new value was accepted.
"""
if not is_accepted:
if self.accepted_samples_ == 0:
self.alpha_ = 1.0
self.beta_ = 1.0
else:
self.accepted_samples_ += 1
self.alpha_, self.beta_ = self.compute_beta_priors_from_accepted_lr()
| beanmachine-main | src/beanmachine/ppl/inference/proposer/nmc/single_site_real_space_nmc_proposer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Tensorizing and detensorizing
#
# See the comment at the top of devectorizer_transformer.py for a high-level description of
# what this class is for and how it works with the devectorizer.
import typing
from enum import Enum
from typing import Callable, List
import beanmachine.ppl.compiler.bmg_nodes as bn
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.copy_and_replace import (
Cloner,
NodeTransformer,
TransformAssessment,
)
from beanmachine.ppl.compiler.error_report import ErrorReport
from beanmachine.ppl.compiler.fix_unsupported import UnsupportedNodeFixer
from beanmachine.ppl.compiler.size_assessment import SizeAssessment
from beanmachine.ppl.compiler.sizer import is_scalar, Sizer, Unsized
# The tensorizing transformation does not need to know the *semantic* type of a node;
# that is, whether it is a bool, natural, probability, positive real, and so on. But
# we do need information about what the *tensor shape* was in the original PyTorch
# model.
class ElementType(Enum):
# The node represents a multidimensional tensor that cannot be expressed in BMG.
TENSOR = 1
# The node represents a single value.
SCALAR = 2
# The node represents multiple values that can be expressed in a BMG 2-d matrix.
MATRIX = 3
# We were unable to deduce the size in the original Python model.
UNKNOWN = 4
def _always(node):
return True
class Tensorizer(NodeTransformer):
# A node transformer exposes two operations to its caller:
# * assess_node takes a node and returns an assessement of whether it can be
# transformed.
# * transform_node takes a node and either returns a copy, or a new node to
# replace the given node.
#
# This transformer determines whether a node in the graph accumulated from the
# original Python model should be transformed into a matrix-aware BMG node.
def __init__(self, cloner: Cloner, sizer: Sizer):
self.cloner = cloner
self.sizer = sizer
self.size_assessor = SizeAssessment(self.sizer)
self.transform_cache = {}
self.can_be_transformed_map = {
bn.AdditionNode: _always,
bn.MultiplicationNode: self.mult_can_be_tensorized,
bn.DivisionNode: self.div_can_be_tensorized,
bn.ComplementNode: _always,
bn.ExpNode: _always,
bn.LogNode: _always,
bn.Log1mexpNode: _always,
bn.NegateNode: self.negate_can_be_tensorized,
bn.PhiNode: _always,
bn.SumNode: _always,
}
self.transform_map = {
bn.AdditionNode: lambda node, inputs: self._tensorize_addition(
node, inputs, self.cloner.bmg.add_matrix_addition
),
bn.MultiplicationNode: self._tensorize_multiply,
bn.DivisionNode: self._tensorize_div,
bn.ComplementNode: lambda node, inputs: self._tensorize_unary_elementwise(
node, inputs, self.cloner.bmg.add_matrix_complement
),
bn.ExpNode: lambda node, inputs: self._tensorize_unary_elementwise(
node, inputs, self.cloner.bmg.add_matrix_exp
),
bn.LogNode: lambda node, inputs: self._tensorize_unary_elementwise(
node, inputs, self.cloner.bmg.add_matrix_log
),
bn.Log1mexpNode: lambda node, inputs: self._tensorize_unary_elementwise(
node, inputs, self.cloner.bmg.add_matrix_log1mexp
),
bn.NegateNode: lambda node, inputs: self._tensorize_unary_elementwise(
node, inputs, self.cloner.bmg.add_matrix_negate
),
bn.PhiNode: lambda node, inputs: self._tensorize_unary_elementwise(
node, inputs, self.cloner.bmg.add_matrix_phi
),
bn.SumNode: self._tensorize_sum,
}
def _tensorize_div(
self, node: bn.DivisionNode, new_inputs: List[bn.BMGNode]
) -> bn.BMGNode:
# If we have DIV(matrix, scalar) then we transform that into
# MATRIX_SCALE(matrix, DIV(1, scalar)).
assert len(node.inputs.inputs) == 2
tensor_input = new_inputs[0]
scalar_input = new_inputs[1]
if self._element_type(tensor_input) is not ElementType.MATRIX:
raise ValueError("Expected a matrix as first operand")
if self._element_type(scalar_input) is not ElementType.SCALAR:
raise ValueError("Expected a scalar as second operand")
one = self.cloner.bmg.add_pos_real(1.0)
new_scalar = self.cloner.bmg.add_division(one, scalar_input)
return self.cloner.bmg.add_matrix_scale(new_scalar, tensor_input)
def _tensorize_sum(
self, node: bn.SumNode, new_inputs: List[bn.BMGNode]
) -> bn.BMGNode:
# TODO: Ensure that we correctly insert any necessary broadcasting nodes
# in the requirements-fixing pass.
assert len(new_inputs) >= 1
if any(
self._element_type(operand) == ElementType.MATRIX
for operand in node.inputs.inputs
):
current = new_inputs[0]
for i in range(1, len(new_inputs)):
current = self.cloner.bmg.add_matrix_addition(current, new_inputs[i])
return self.cloner.bmg.add_matrix_sum(current)
return self.cloner.bmg.add_sum(*new_inputs)
def _tensorize_multiply(
self, node: bn.MultiplicationNode, new_inputs: List[bn.BMGNode]
) -> bn.BMGNode:
# Note that this function handles *elementwise* multiplication of tensors, not
# matrix multiplication. There are three cases to consider.
if len(new_inputs) != 2:
raise ValueError(
"Cannot transform a mult into a tensor mult because there are not two operands"
)
lhs_sz = self.sizer[new_inputs[0]]
rhs_sz = self.sizer[new_inputs[1]]
if lhs_sz == Unsized or rhs_sz == Unsized:
raise ValueError(
f"cannot multiply an unsized quantity. Operands: {new_inputs[0]} and {new_inputs[1]}"
)
# Case one: MULT(matrix, matrix) --> ELEMENTWISEMULT(matrix, matrix)
# TODO: Ensure that the requirements fixing pass correctly inserts broadcast operators.
lhs_is_scalar = is_scalar(lhs_sz)
rhs_is_scalar = is_scalar(rhs_sz)
if not lhs_is_scalar and not rhs_is_scalar:
return self.cloner.bmg.add_elementwise_multiplication(
new_inputs[0], new_inputs[1]
)
# Case two: MULT(scalar, scalar) stays just that.
if lhs_is_scalar and not rhs_is_scalar:
scalar_parent_image = new_inputs[0]
tensor_parent_image = new_inputs[1]
assert not is_scalar(rhs_sz)
elif rhs_is_scalar and not lhs_is_scalar:
tensor_parent_image = new_inputs[0]
scalar_parent_image = new_inputs[1]
assert is_scalar(rhs_sz)
else:
return self.cloner.bmg.add_multiplication(new_inputs[0], new_inputs[1])
# Case three: MULT(matrix, scalar) or MULT(scalar, matrix) --> MATRIX_SCALE(matrix, scalar)
return self.cloner.bmg.add_matrix_scale(
scalar_parent_image, tensor_parent_image
)
def _tensorize_unary_elementwise(
self,
node: bn.UnaryOperatorNode,
new_inputs: List[bn.BMGNode],
creator: Callable,
) -> bn.BMGNode:
# Unary operators such as exp, log, and so on, are straightforward. If the operand is
# a matrix, generate the matrix-aware node. Otherwise leave it alone.
assert len(new_inputs) == 1
if self._element_type(new_inputs[0]) == ElementType.MATRIX:
return creator(new_inputs[0])
else:
return self.cloner.clone(node, new_inputs)
def _tensorize_addition(
self,
node: bn.AdditionNode,
new_inputs: List[bn.BMGNode],
creator: Callable,
) -> bn.BMGNode:
# If we have matrix + matrix, scalar + matrix or matrix + scalar, generate
# a matrix add. In the latter cases, the requirements fixing pass will insert a
# matrix fill node to convert the scalar to a matrix of the appropriate size.
assert len(new_inputs) == 2
if (
self._element_type(new_inputs[0]) == ElementType.MATRIX
or self._element_type(new_inputs[1]) == ElementType.MATRIX
):
return creator(new_inputs[0], new_inputs[1])
else:
return self.cloner.clone(node, new_inputs)
def _element_type(self, node: bn.BMGNode) -> ElementType:
size = self.sizer[node]
if size == Unsized:
return ElementType.UNKNOWN
length = len(size)
if length == 0 or is_scalar(size):
return ElementType.SCALAR
if length == 1 and size[0] > 1:
return ElementType.MATRIX
elif length == 2:
return ElementType.MATRIX
else:
return ElementType.TENSOR
def div_can_be_tensorized(self, node: bn.DivisionNode) -> bool:
if len(node.inputs.inputs) == 2:
return (
self._element_type(node.inputs.inputs[0]) == ElementType.MATRIX
and self._element_type(node.inputs.inputs[1]) == ElementType.SCALAR
)
return False
def negate_can_be_tensorized(self, node: bn.NegateNode) -> bool:
# We want to fix unsupported nodes first before we tensorize them.
# For example, when we have log1p(-Sample(Beta(...))) this gets changed to log(1-Sample(Beta(...))),
# which is log(complement(Sample(Beta(...)))). But if we run the tensorizer, it does negate first
# and then the log1p fixing. In this case, we get log(1+MatrixNegate(Sample(Beta(...)))).
# There is no way to indicate that the computation is always positive real.
# Therefore, this leads to the compiler thinking the requirements are violated.
# We can avoid this by converting unsupported nodes to supported nodes and then tensorizing them.
# TODO: This will also allow us to carry out fixpoint between tensorizer and other fixers.
# TODO: We may want to do the same for other operators that can be tensorized.
if any(
isinstance(i, node_type)
for node_type in UnsupportedNodeFixer._unsupported_nodes
for i in node.outputs.items
):
return False
return True
def mult_can_be_tensorized(self, node: bn.MultiplicationNode) -> bool:
return len(node.inputs.inputs) == 2
# a node can be tensorized if all its parents satisfy the type requirements
def can_be_tensorized(self, original_node: bn.BMGNode) -> bool:
if self.can_be_transformed_map.__contains__(type(original_node)):
return self.can_be_transformed_map[type(original_node)](original_node)
else:
return False
def assess_node(
self, node: bn.BMGNode, original: BMGraphBuilder
) -> TransformAssessment:
report = ErrorReport()
error = self.size_assessor.size_error(node, self.cloner.bmg_original)
if error is not None:
report.add_error(error)
return TransformAssessment(self.can_be_tensorized(node), report)
# a node is either replaced 1-1, 1-many, or deleted
def transform_node(
self, node: bn.BMGNode, new_inputs: List[bn.BMGNode]
) -> typing.Optional[typing.Union[bn.BMGNode, List[bn.BMGNode]]]:
if self.transform_map.__contains__(type(node)):
return self.transform_map[type(node)](node, new_inputs)
else:
return self.cloner.clone(node, new_inputs)
| beanmachine-main | src/beanmachine/ppl/compiler/tensorizer_transformer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Error reporting for internal compiler errors"""
import os
from ast import AST
from tempfile import NamedTemporaryFile
from typing import Optional
import beanmachine.ppl.compiler.ast_tools as ast_tools
_BEANSTALK_LOG_ERRORS_TO_DISK = "BEANSTALK_LOG_ERRORS_TO_DISK"
_BEANSTALK_VERBOSE_EXCEPTIONS = "BEANSTALK_VERBOSE_EXCEPTIONS"
_help_log = f"""Set environment variable {_BEANSTALK_LOG_ERRORS_TO_DISK}
to 1 to dump extended error information to a temporary file.
"""
_help_verbose = f"""Set environment variable {_BEANSTALK_VERBOSE_EXCEPTIONS}
to 1 for extended error information.
"""
def _log_to_disk(message: str) -> str:
temp = NamedTemporaryFile(prefix="beanstalk_ice_", delete=False, mode="wt")
try:
temp.write(message)
finally:
temp.close()
return temp.name
def _check_environment(variable: str) -> bool:
return os.environ.get(variable) == "1"
# You can change this to True for debugging purposes.
_always_log_errors_to_disk = False
def _log_errors_to_disk() -> bool:
return _always_log_errors_to_disk or _check_environment(
_BEANSTALK_LOG_ERRORS_TO_DISK
)
# You can change this to True for debugging purposes.
_always_verbose_exceptions = False
def _verbose_exceptions() -> bool:
return _always_verbose_exceptions or _check_environment(
_BEANSTALK_VERBOSE_EXCEPTIONS
)
class InternalError(Exception):
"""An exception class for internal compiler errors"""
original_exception: Optional[Exception]
def __init__(self, message: str, original_exception: Optional[Exception] = None):
self.original_exception = original_exception
Exception.__init__(self, message)
class LiftedCompilationError(InternalError):
"""An exception class for internal compiler errors when
compiling the lifted code."""
source: str
ast: AST
def __init__(self, source: str, ast: AST, original_exception: Exception):
self.source = source
self.ast = ast
# TODO: Consider adding a compiler version number, hash
# or other identifier to help debug.
brief = f"""Compilation of the lifted AST failed.
This typically indicates an internal error in the rewrite phase of the compiler.
### Exception thrown ###
{original_exception}
"""
verbose = f"""### Internal compiler error ###
{brief}
### Model source ###
{source}
### Abstract syntax tree ###
from ast import *
failed = {ast_tools.print_python(ast)}
### End internal compiler error ###
"""
log = _log_errors_to_disk()
use_verbose = _verbose_exceptions()
help_text = "" if log else _help_log
help_text += "" if use_verbose else _help_verbose
message = verbose if use_verbose else brief
message += help_text
if log:
logname = _log_to_disk(verbose)
message += f"\nExtended error information logged to {logname}\n"
InternalError.__init__(self, message, original_exception)
| beanmachine-main | src/beanmachine/ppl/compiler/internal_error.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# See notes in typer_base.py for how the type computation logic works.
#
# This typer computes all possible tensor values that a graph node can
# possibly have. For example, if we have a random variable:
#
# @rv def flips(n):
# return Bernoulli(0.5)
#
# @functional def sumflips():
# return flips(0) + flips(1)
#
# Then the sample nodes each have a two-value support {0, 1} --
# and the addition node has a support {0, 1, 2}.
#
# Some nodes -- a sample from a normal, for instance -- have infinite
# support; we mark those with a special value. Similarly, some nodes
# have finite but large support, where "large" is a parameter we can
# choose; to keep graphs relatively small we will refuse to compile
# a model where there are thousands of samples associated with a
# particular call. For example, suppose we have K categories:
#
# @rv def cat():
# return Categorical(tensor([1, 1, 1, 1, 1, ...]))
#
# @rv def norm(n):
# return Normal(0, 1)
#
# @functional toobig():
# return whatever(cat())
#
# That model generates K normal samples; we want to restrict that
# to "small" K.
import functools
import itertools
import operator
from math import isnan
from typing import Callable, Dict
import beanmachine.ppl.compiler.bmg_nodes as bn
import torch
from beanmachine.ppl.compiler.sizer import Sizer
from beanmachine.ppl.compiler.typer_base import TyperBase
from beanmachine.ppl.utils.set_of_tensors import SetOfTensors
from torch import tensor
Infinite = SetOfTensors([])
TooBig = SetOfTensors([])
Unknown = SetOfTensors([])
positive_infinity = float("inf")
def _prod(x) -> int:
"""Compute the product of a sequence of values of arbitrary length"""
return functools.reduce(operator.mul, x, 1)
_limit = 1000
_always_infinite = {
bn.BetaNode,
bn.Chi2Node,
bn.DirichletNode,
bn.FlatNode,
bn.GammaNode,
bn.HalfCauchyNode,
bn.HalfNormalNode,
bn.NormalNode,
bn.PoissonNode,
bn.StudentTNode,
bn.UniformNode,
}
# TODO: We could do better for the comparison operators because we known
# that the support is not a function of the inputs; the support is just
# {True, False} for each element. Handling this correctly would enable
# us to do stochastic control flows of the form some_rv(normal(1) > normal(2))
# even though the normal rvs have infinite supports.
#
# However, since BMG does not yet implement comparison operators, this is a
# moot point; if it ever does so, then revisit this decision.
_product_of_inputs = {
bn.AdditionNode: torch.Tensor.__add__,
bn.BitAndNode: torch.Tensor.__and__,
bn.BitOrNode: torch.Tensor.__or__,
bn.BitXorNode: torch.Tensor.__xor__,
bn.CholeskyNode: torch.Tensor.cholesky,
bn.DivisionNode: torch.Tensor.div,
bn.EqualNode: torch.Tensor.eq,
bn.ExpM1Node: torch.Tensor.expm1,
bn.ExpNode: torch.Tensor.exp,
bn.Exp2Node: torch.exp2,
bn.FloorDivNode: torch.Tensor.__floordiv__,
bn.GreaterThanEqualNode: torch.Tensor.ge,
bn.GreaterThanNode: torch.Tensor.gt,
bn.InvertNode: torch.Tensor.__invert__,
bn.ItemNode: lambda x: x, # item() is an identity
bn.LessThanEqualNode: torch.Tensor.le,
bn.LessThanNode: torch.Tensor.lt,
bn.LogisticNode: torch.Tensor.sigmoid,
bn.LogNode: torch.Tensor.log,
bn.Log10Node: torch.log10,
bn.Log1pNode: torch.log1p,
bn.Log2Node: torch.log2,
bn.LShiftNode: torch.Tensor.__lshift__,
bn.MatrixMultiplicationNode: torch.Tensor.mm,
bn.ModNode: torch.Tensor.__mod__,
bn.MultiplicationNode: torch.Tensor.mul,
bn.NegateNode: torch.Tensor.neg,
bn.NotEqualNode: torch.Tensor.ne,
bn.PhiNode: torch.distributions.Normal(0.0, 1.0).cdf,
bn.PowerNode: torch.Tensor.pow,
bn.RShiftNode: torch.Tensor.__rshift__,
bn.SquareRootNode: torch.sqrt,
}
# TODO:
#
# NotNode -- note that "not t" on a tensor is equivalent to "not Tensor.__bool__(t)"
# and produces either True or False. It is *not* the same as "Tensor.logical_not(t)"
# which executes "not" on each element and returns a tensor of the same size as t.
#
# LogSumExpTorchNode
# Log1mexpNode
# IndexNode
# Log1mexpNode
#
# We will need to implement computation of the support
# of an arbitrary binomial distribution because samples are
# discrete values between 0 and count, which is typically small.
# Though implementing support computation if count is non-stochastic
# is straightforward, we do not yet have the gear to implement
# this for stochastic counts. Consider this contrived case:
#
# @bm.random_variable def a(): return Binomial(2, 0.5)
# @bm.random_variable def b(): return Binomial(a() + 1, 0.4)
# @bm.random_variable def c(i): return Normal(0.0, 2.0)
# @bm.random_variable def d(): return Normal(c(b()), 3.0)
#
# The support of a() is 0, 1, 2 -- easy.
#
# We need to know the support of b() in order to build the
# graph for d(). But how do we know the support of b()?
#
# What we must do is compute that the maximum possible value
# for a() + 1 is 3, and so the support of b() is 0, 1, 2, 3,
# and therefore there are four samples of c(i) generated.
#
# There are two basic ways to do this that immediately come to
# mind.
#
# The first is to simply ask the graph for the support of
# a() + 1, which we can generate, and then take the maximum
# value thus generated.
#
# If that turns out to be too expensive for some reason then
# we can write a bit of code that answers the question
# "what is the maximum value of your support?" and have each
# node implement that. However, that then introduces new
# problems; to compute the maximum value of a negation, for
# instance, we then would also need to answer the question
# "what is the minimum value you support?" and so on.
_nan = float("nan")
def _set_approximate_size(s: SetOfTensors) -> float:
if s is Infinite:
return positive_infinity
if s is Unknown:
return _nan
if s is TooBig:
return _limit
return len(s)
def _set_product_approximate_size(x: float, y: SetOfTensors) -> float:
# If either size is unknown (NaN), we return NaN.
# Otherwise, if either size is infinite, we return infinite.
# Otherwise, return the product.
return x * _set_approximate_size(y)
class ComputeSupport(TyperBase[SetOfTensors]):
_dispatch: Dict[type, Callable]
_sizer: Sizer
def __init__(self) -> None:
TyperBase.__init__(self)
self._sizer = Sizer()
self._dispatch = {
bn.BernoulliLogitNode: self._support_bernoulli,
bn.BernoulliNode: self._support_bernoulli,
bn.CategoricalLogitNode: self._support_categorical,
bn.CategoricalNode: self._support_categorical,
bn.SampleNode: self._support_sample,
bn.SwitchNode: self._support_switch,
bn.TensorNode: self._support_tensor,
}
def _product(self, f: Callable, *nodes: bn.BMGNode) -> SetOfTensors:
# * We have a sequence of nodes n[0], n[1], ... n[k-1].
#
# * Each of those nodes has possible values t[x][0], t[x][1] ...
# for x from 0 to k-1.
#
# * We have a function f which takes k tensors and returns a tensor.
#
# * We wish to compute the set:
#
# {
# f(t[0][0], t[1][0], ... t[k-1][0]),
# f(t[0][1], t[1][0], ... t[k-1][0]),
# ...
# }
#
# That is, the Cartesian product of all possible combinations of
# values of each node, with each element of the product run through
# function f.
#
# However, we have some problems:
#
# * The support of a node might be infinite.
# * The support of a node might be unknown.
# * The support of a node might be finite but large.
# * The size of the product might be finite but large.
#
# In those cases we want to return special values Infinite, Unknown
# or TooBig rather than wasting time and memory to compute the set.
#
# ####
#
# First thing to do is determine the *approximate* size of the
# Cartesian product of possible values.
#
# TODO: This approximation is an over-estimate; for instance, when
# multiplying {0 or 1} by { n elements} we assume that the resulting
# set has up to 2n elements, when in fact it only has n or n+1 elements.
# Would it be simpler and more accurate to instead make a loop, accumulate
# the result into a mutable deduplicating set, and if the set ever gets
# too big, bail out then?
size = functools.reduce(
lambda acc, node: _set_product_approximate_size(acc, self[node]), nodes, 1.0
)
if size == positive_infinity:
return Infinite
if isnan(size):
return Unknown
if size >= _limit:
return TooBig
# If we've made it here then every node had a small support
# and the product of the approximate sizes was small too.
# We can just take the Cartesian product and call f.
return SetOfTensors(f(c) for c in itertools.product(*(self[n] for n in nodes)))
def _support_bernoulli(self, node: bn.BernoulliBase) -> SetOfTensors:
# The support of a Bernoulli only depends on the shape of its input,
# not on the content of that input. Suppose we have a Bernoulli
# of shape [1, 2, 3]; there are 1x2x3 = 6 values in the output
# each of which is either 0 or 1, so that's 64 possibilities. If
# we have too big a shape then just bail out rather than handling
# thousands or millions of possibilities.
s = self._sizer[node]
p = _prod(s)
if 2.0**p >= _limit:
return TooBig
return SetOfTensors(
tensor(i).reshape(s) for i in itertools.product(*([[0.0, 1.0]] * p))
)
def _support_categorical(self, node: bn.CategoricalNodeBase) -> SetOfTensors:
# Suppose we have something like Categorical(tensor([0.25, 0.25, 0.5])),
# with size [3]. The support is 0, 1, 2.
# If we have a higher dimensional categorical, say size [2, 3] such as
# [[0.25, 0.25, 0.5], [0.5, 0.25, 0.25]], then the support is
# [0, 0], [0, 1], ... [2, 2], each is of size [2].
#
# That is: the range of values is determined by the last element
# of the categorical input size, and the size of each member of
# the support is the truncation of the last member off the categorical
# input size.
input_size = self._sizer[node.probability]
if len(input_size) == 0:
return Unknown
max_element = input_size[-1] # 3, in our example above.
r = list(range(max_element)) # [0, 1, 2]
result_size = input_size[:-1] # [2] in our example above
# In our example above we would have 3 ** 2 members of the
# support. Compute how many members we're going to get and
# bail out if it is too large.
# TODO: Move this prod helper function out of bmg_nodes.py
num_result_elements = _prod(result_size)
if max_element**num_result_elements >= _limit:
return TooBig
return SetOfTensors(
tensor(i).reshape(result_size)
for i in itertools.product(*([r] * num_result_elements))
)
def _support_sample(self, node: bn.SampleNode) -> SetOfTensors:
return self[node.operand]
def _support_switch(self, node: bn.SwitchNode) -> SetOfTensors:
for i in range((len(node.inputs) - 1) // 2):
if self[node.inputs[2 + i * 2]] == Infinite:
return Infinite
for i in range((len(node.inputs) - 1) // 2):
if self[node.inputs[2 + i * 2]] == TooBig:
return TooBig
for i in range((len(node.inputs) - 1) // 2):
if self[node.inputs[2 + i * 2]] == Unknown:
return Unknown
s = 0
for i in range((len(node.inputs) - 1) // 2):
s += len(self[node.inputs[2 + i * 2]])
if s >= _limit:
return TooBig
return SetOfTensors(
itertools.chain(
*(
self[node.inputs[2 + i * 2]]
for i in range((len(node.inputs) - 1) // 2)
)
)
)
def _support_tensor(self, node: bn.TensorNode) -> SetOfTensors:
return self._product(tensor, *node.inputs)
# This implements the abstract base type method.
def _compute_type_inputs_known(self, node: bn.BMGNode) -> SetOfTensors:
if isinstance(node, bn.ConstantNode):
return SetOfTensors([node.value])
t = type(node)
if t in _always_infinite:
result = Infinite
elif t in _product_of_inputs:
result = self._product(lambda x: _product_of_inputs[t](*x), *node.inputs)
elif t in self._dispatch:
result = self._dispatch[t](node)
else:
result = Unknown
return result
| beanmachine-main | src/beanmachine/ppl/compiler/support.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List
import beanmachine.ppl.compiler.bmg_nodes as bn
import torch
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.bmg_node_types import (
dist_type,
factor_type,
operator_type,
)
from beanmachine.ppl.compiler.bmg_types import _size_to_rc, SimplexMatrix
from beanmachine.ppl.compiler.fix_problems import fix_problems
from beanmachine.ppl.compiler.lattice_typer import LatticeTyper
def _value_to_cpp_eigen(value: torch.Tensor, variable: str, array_type: str) -> str:
# Torch tensors are row major but Eigen matrices are column major;
# a torch Dirichlet distribution expects a row of parameters;
# BMG expects a column. That's why we swap rows with columns here.
r, c = _size_to_rc(value.size())
v = value.reshape(r, c).transpose(0, 1).contiguous()
values = ", ".join(str(element) for element in v.reshape(-1).tolist())
return f"{array_type} {variable}({c}, {r});\n{variable} << {values};"
def _value_to_cpp_real_eigen(value: torch.Tensor, variable: str) -> str:
return _value_to_cpp_eigen(value, variable, "Eigen::MatrixXd")
def _value_to_cpp_natural_eigen(value: torch.Tensor, variable: str) -> str:
return _value_to_cpp_eigen(value, variable, "Eigen::MatrixXn")
def _value_to_cpp_bool_eigen(value: torch.Tensor, variable: str) -> str:
return _value_to_cpp_eigen(value, variable, "Eigen::MatrixXb")
def _value_to_cpp(value: Any) -> str:
return str(value).lower()
class GeneratedGraphCPP:
code: str
_code: List[str]
bmg: BMGraphBuilder
node_to_graph_id: Dict[bn.BMGNode, int]
query_to_query_id: Dict[bn.Query, int]
_observation_count: int
def __init__(self, bmg: BMGraphBuilder) -> None:
self.code = ""
self._code = ["graph::Graph g;"]
self.bmg = bmg
self.node_to_graph_id = {}
self.query_to_query_id = {}
self._observation_count = 0
def _add_observation(self, node: bn.Observation) -> None:
graph_id = self.node_to_graph_id[node.observed]
v = node.value
if isinstance(v, torch.Tensor):
o = f"o{self._observation_count}"
self._observation_count += 1
# TODO: What if its not a real-valued observation?
# We do not handle this case correctly in fix_observations.
# When we do, fix this here too.
s = _value_to_cpp_real_eigen(v, o)
self._code.append(s)
self._code.append(f"g.observe(n{graph_id}, {o});")
else:
self._code.append(f"g.observe(n{graph_id}, {_value_to_cpp(v)});")
def _add_query(self, node: bn.Query) -> None:
query_id = len(self.query_to_query_id)
self.query_to_query_id[node] = query_id
graph_id = self.node_to_graph_id[node.operator]
self._code.append(f"uint q{query_id} = g.query(n{graph_id});")
def _inputs(self, node: bn.BMGNode) -> str:
if isinstance(node, bn.LKJCholeskyNode):
# The LKJ dimension parameter has already been folded into the sample type
input_seq = [self.node_to_graph_id[node.inputs[1]]]
else:
input_seq = (self.node_to_graph_id[x] for x in node.inputs)
inputs = ", ".join("n" + str(x) for x in input_seq)
return "std::vector<uint>({" + inputs + "})"
def _add_factor(self, node: bn.FactorNode) -> None:
graph_id = len(self.node_to_graph_id)
self.node_to_graph_id[node] = graph_id
i = self._inputs(node)
ft = str(factor_type(node)).replace(".", "::")
self._code.append(f"uint n{graph_id} = g.add_factor(")
self._code.append(f" graph::{ft},")
self._code.append(f" {i});")
def _add_distribution(self, node: bn.DistributionNode) -> None:
graph_id = len(self.node_to_graph_id)
self.node_to_graph_id[node] = graph_id
i = self._inputs(node)
if isinstance(node, bn.DirichletNode):
t = LatticeTyper()[node]
assert isinstance(t, SimplexMatrix)
self._code.append(f"uint n{graph_id} = g.add_distribution(")
self._code.append(" graph::DistributionType::DIRICHLET,")
self._code.append(" graph::ValueType(")
self._code.append(" graph::VariableType::COL_SIMPLEX_MATRIX,")
self._code.append(" graph::AtomicType::PROBABILITY,")
self._code.append(f" {t.rows},")
self._code.append(f" {t.columns}")
self._code.append(" ),")
self._code.append(f" {i});")
else:
distr_type, elt_type = dist_type(node)
distr_type = str(distr_type).replace(".", "::")
elt_type = str(elt_type).replace(".", "::")
self.node_to_graph_id[node] = graph_id
self._code.append(f"uint n{graph_id} = g.add_distribution(")
self._code.append(f" graph::{distr_type},")
self._code.append(f" graph::{elt_type},")
self._code.append(f" {i});")
def _add_operator(self, node: bn.OperatorNode) -> None:
graph_id = len(self.node_to_graph_id)
self.node_to_graph_id[node] = graph_id
i = self._inputs(node)
ot = str(operator_type(node)).replace(".", "::")
self._code.append(f"uint n{graph_id} = g.add_operator(")
if len(node.inputs) <= 2:
self._code.append(f" graph::{ot}, {i});")
else:
self._code.append(f" graph::{ot},")
self._code.append(f" {i});")
def _add_constant(self, node: bn.ConstantNode) -> None: # noqa
graph_id = len(self.node_to_graph_id)
self.node_to_graph_id[node] = graph_id
t = type(node)
v = node.value
m = ""
if t is bn.PositiveRealNode:
f = f"add_constant_pos_real({_value_to_cpp(float(v))})"
elif t is bn.NegativeRealNode:
f = f"add_constant_neg_real({_value_to_cpp(float(v))})"
elif t is bn.ProbabilityNode:
f = f"add_constant_probability({_value_to_cpp(float(v))})"
elif t is bn.BooleanNode:
f = f"add_constant_bool({_value_to_cpp(bool(v))})"
elif t is bn.NaturalNode:
f = f"add_constant_natural({_value_to_cpp(int(v))})"
elif t is bn.RealNode:
f = f"add_constant_real({_value_to_cpp(float(v))})"
elif t is bn.ConstantTensorNode:
# This only happens in the rare case where we have a functional
# which returns a constant tensor and there's a query on it.
# We never rewrite that to another kind of node.
# TODO: Consider turning that into a constant of some more
# specific BMG node type.
m = _value_to_cpp_real_eigen(v, f"m{graph_id}")
f = f"add_constant_real_matrix(m{graph_id})"
elif t is bn.ConstantPositiveRealMatrixNode:
m = _value_to_cpp_real_eigen(v, f"m{graph_id}")
f = f"add_constant_pos_matrix(m{graph_id})"
elif t is bn.ConstantRealMatrixNode:
m = _value_to_cpp_real_eigen(v, f"m{graph_id}")
f = f"add_constant_real_matrix(m{graph_id})"
elif t is bn.ConstantNegativeRealMatrixNode:
m = _value_to_cpp_real_eigen(v, f"m{graph_id}")
f = f"add_constant_neg_matrix(m{graph_id})"
elif t is bn.ConstantProbabilityMatrixNode:
m = _value_to_cpp_real_eigen(v, f"m{graph_id}")
f = f"add_constant_probability_matrix(m{graph_id})"
elif t is bn.ConstantSimplexMatrixNode:
m = _value_to_cpp_real_eigen(v, f"m{graph_id}")
f = f"add_constant_col_simplex_matrix(m{graph_id})"
elif t is bn.ConstantNaturalMatrixNode:
m = _value_to_cpp_natural_eigen(v, f"m{graph_id}")
f = f"add_constant_natural_matrix(m{graph_id})"
elif t is bn.ConstantBooleanMatrixNode:
m = _value_to_cpp_bool_eigen(v, f"m{graph_id}")
f = f"add_constant_bool_matrix(m{graph_id})"
else:
f = "UNKNOWN"
if m != "":
self._code.append(m)
self._code.append(f"uint n{graph_id} = g.{f};")
def _generate_node(self, node: bn.BMGNode) -> None:
if isinstance(node, bn.Observation):
self._add_observation(node)
elif isinstance(node, bn.Query):
self._add_query(node)
elif isinstance(node, bn.FactorNode):
self._add_factor(node)
elif isinstance(node, bn.DistributionNode):
self._add_distribution(node)
elif isinstance(node, bn.OperatorNode):
self._add_operator(node)
elif isinstance(node, bn.ConstantNode):
self._add_constant(node)
def _generate_cpp(self) -> None:
bmg, error_report = fix_problems(self.bmg)
error_report.raise_errors()
self.bmg = bmg
for node in self.bmg.all_ancestor_nodes():
self._generate_node(node)
self.code = "\n".join(self._code)
def to_bmg_cpp(bmg: BMGraphBuilder) -> GeneratedGraphCPP:
gg = GeneratedGraphCPP(bmg)
gg._generate_cpp()
return gg
| beanmachine-main | src/beanmachine/ppl/compiler/gen_bmg_cpp.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""A pattern matching engine"""
from abc import ABC, ABCMeta, abstractmethod
from typing import Any, Callable, Dict, List, Optional, Union
# Logically, a pattern is just a predicate; it's a function from
# any value to bool: True if the value matches the predicate,
# false otherwise.
#
# However it will be convenient for us to be able to represent
# patterns like "is equal to zero" as just the integer 0,
# "is an instance of type T" as just the type object.
#
# Similarly, it will be convenient to build complex patterns as
# compositions of simpler ones.
#
# Also, for debugging purposes we will not simply return True
# or False; we'll return a MatchResult object that indicates
# what value was matched, whether it was successfully matched
# or not, and, if the pattern had subpatterns, what the results
# of those were.
# TODO: Tensor comprehension patterns
_empty = {}
class MatchResult(ABC):
"""The result of a pattern match; either success or failure."""
test: Any
submatches: Dict[str, "MatchResult"]
def __init__(
self, test: Any, submatches: Optional[Dict[str, "MatchResult"]] = None
) -> None:
self.test = test
self.submatches = submatches if submatches is not None else _empty
@abstractmethod
def is_success(self) -> bool:
pass
@abstractmethod
def is_fail(self) -> bool:
pass
def __str__(self) -> str:
return f"{type(self).__name__}:{self.test}"
def __bool__(self) -> bool:
return self.is_success()
# TODO: Display as a tree and graph also
class Fail(MatchResult):
"""A pattern that always fails."""
# TODO: If we save the patterns that failed as well, then we can build a
# TODO: diagnostic engine that describes why a value failed to match against
# TODO: a complex pattern.
def __init__(
self, test: Any = None, submatches: Optional[Dict[str, MatchResult]] = None
) -> None:
MatchResult.__init__(self, test, submatches)
def is_success(self) -> bool:
return False
def is_fail(self) -> bool:
return True
class Success(MatchResult):
"""A pattern that always succeeds."""
def __init__(
self, test: Any = None, submatches: Optional[Dict[str, MatchResult]] = None
) -> None:
MatchResult.__init__(self, test, submatches)
def is_success(self) -> bool:
return True
def is_fail(self) -> bool:
return False
class PatternBase(ABC):
@abstractmethod
def match(self, test: Any) -> MatchResult:
pass
@abstractmethod
def _to_str(self, test: str) -> str:
pass
def __str__(self) -> str:
return self._to_str("test")
def __call__(self, test: Any) -> MatchResult:
return self.match(test)
Pattern = Union[PatternBase, int, str, float, type, list, None]
class PredicatePattern(PatternBase):
"""A pattern is logically a predicate; this pattern just encapsulates any
predicate that returns a match result."""
predicate: Callable[[Any], bool]
name: str
def __init__(self, predicate: Callable[[Any], bool], name: str = "if") -> None:
self.predicate = predicate
self.name = name
def match(self, test: Any) -> MatchResult:
return Success(test) if self.predicate(test) else Fail(test)
def _to_str(self, test: str) -> str:
return f"{self.name}({test})"
AtomicType = Union[bool, int, float, str, None]
class AtomicPattern(PatternBase, metaclass=ABCMeta):
"""An atomic pattern matches against a single specific value, such as a
specific integer, Boolean, string, and so on."""
value: AtomicType
def __init__(self, value: AtomicType) -> None:
self.value = value
def match(self, test: Any) -> MatchResult:
return Success(test) if test == self.value else Fail(test)
def _to_str(self, test: str) -> str:
return f"{test}=={str(self.value)}"
class BoolPattern(AtomicPattern):
"""The pattern that matches a specific Boolean value."""
def __init__(self, value: bool) -> None:
AtomicPattern.__init__(self, value)
truePattern = BoolPattern(True)
falsePattern = BoolPattern(False)
class IntPattern(AtomicPattern):
"""The pattern that matches a specific integer value."""
def __init__(self, value: int) -> None:
AtomicPattern.__init__(self, value)
class FloatPattern(AtomicPattern):
"""The pattern that matches a specific float value."""
def __init__(self, value: float) -> None:
AtomicPattern.__init__(self, value)
class StringPattern(AtomicPattern):
"""The pattern that matches a specific string value."""
def __init__(self, value: str) -> None:
AtomicPattern.__init__(self, value)
# Note that we do not want to use "None" to mean "the pattern that matches nothing"
# because it will be useful to be able to match "None" values in ASTs. Use the
# FailPattern if you want a pattern that never matches.
class NonePattern(AtomicPattern):
"""The pattern that matches None."""
def __init__(self) -> None:
AtomicPattern.__init__(self, None)
def _to_str(self, test: str) -> str:
return f"{test} is None"
nonePattern = NonePattern()
class AnyPattern(PatternBase):
"""The pattern that matches anything; it always succeeds."""
def __init__(self) -> None:
pass
def match(self, test: Any) -> MatchResult:
return Success(test)
def _to_str(self, test: str) -> str:
return f"{test} is Any"
anyPattern = AnyPattern()
def is_any(pattern: Pattern) -> bool:
return isinstance(pattern, AnyPattern)
class FailPattern(PatternBase):
"""The pattern that matches nothing; it always fails."""
def __init__(self) -> None:
pass
def match(self, test: Any) -> MatchResult:
return Fail(test)
def _to_str(self, test: str) -> str:
return "FAIL"
failPattern = FailPattern()
class TypePattern(PatternBase):
"""The pattern that matches if the value is an instance of the given type."""
typ: type
def __init__(self, typ: type) -> None:
self.typ = typ
def match(self, test: Any) -> MatchResult:
return Success(test) if isinstance(test, self.typ) else Fail(test)
def _to_str(self, test: str) -> str:
return f"isinstance({test}, {self.typ.__name__})"
def _match_list_pattern(patterns: List[Pattern], test: Any) -> MatchResult:
if not isinstance(test, list) or len(test) != len(patterns):
return Fail(test)
submatches = {str(i): match(pattern, test[i]) for i, pattern in enumerate(patterns)}
if any(result.is_fail() for result in submatches.values()):
return Fail(test, submatches)
return Success(test, submatches)
def match(pattern: Pattern, test: Any) -> MatchResult:
if pattern is None:
return Success(test) if test is None else Fail(test)
if (
isinstance(pattern, int)
or isinstance(pattern, str)
or isinstance(pattern, bool)
or isinstance(pattern, float)
):
return Success(test) if test == pattern else Fail(test)
if isinstance(pattern, list):
return _match_list_pattern(pattern, test)
if isinstance(pattern, type):
return Success(test) if isinstance(test, pattern) else Fail(test)
if isinstance(pattern, PatternBase):
return pattern.match(test)
raise TypeError(f"Expected pattern, got {type(pattern).__name__}")
def to_pattern(pattern: Pattern) -> PatternBase:
"""Takes any value that can be used as a pattern, and returns an object that
derives from PatternBase that has the same semantics."""
if isinstance(pattern, PatternBase):
return pattern
if pattern is None:
return nonePattern
if isinstance(pattern, bool):
return BoolPattern(pattern)
if isinstance(pattern, int):
return IntPattern(pattern)
if isinstance(pattern, float):
return FloatPattern(pattern)
if isinstance(pattern, str):
return StringPattern(pattern)
if isinstance(pattern, list):
if len(pattern) == 0:
return EmptyListPattern()
return ListPattern(pattern)
if isinstance(pattern, type):
return TypePattern(pattern)
raise TypeError(f"Expected pattern, got {type(pattern).__name__}")
class Negate(PatternBase):
"""Negates a pattern; if the underlying pattern succeeds, this fails, and
vice versa."""
pattern: Pattern
def __init__(self, pattern: Pattern) -> None:
self.pattern = pattern
def match(self, test: Any) -> MatchResult:
result = match(self.pattern, test)
if result.is_success():
return Fail(result.test, result.submatches)
return Success(result.test, result.submatches)
def _to_str(self, test: str) -> str:
return f"not({to_pattern(self.pattern)._to_str(test)})"
# This is a *pattern combinator*. It takes a pattern and returns a modification
# of the pattern; in this case, it's negation.
def negate(pattern: Pattern) -> Pattern:
"""Produces the negation of a given pattern."""
if isinstance(pattern, Negate):
return pattern.pattern
return Negate(pattern)
class MatchEvery(PatternBase):
"""The pattern that succeeds if every pattern in the list succeeds.
It will stop trying to match patterns after the first failure. If there
are no patterns in the list then it succeeds."""
patterns: List[Pattern]
def __init__(self, *patterns: Pattern) -> None:
self.patterns = list(patterns)
def match(self, test: Any) -> MatchResult:
submatches = {}
for p in self.patterns:
result = match(p, test)
submatches.update(result.submatches)
if result.is_fail():
# We return the submatches here just for diagnostic purposes; since
# this pattern intends to match *every* subpattern, it is helpful
# when diagnosing a failure to see what all the failed submatches were.
return Fail(test, submatches)
return Success(test, submatches)
def _to_str(self, test: str) -> str:
children = " and ".join(
to_pattern(pattern)._to_str(test) for pattern in self.patterns
)
return f"({children})"
# This is also a pattern combinator.
def match_every(*patterns: Pattern) -> Pattern:
ps: List[Pattern] = list(patterns)
while True:
# If there is an "any" in the list we can discard it.
ps = [p for p in ps if not is_any(p)]
if len(ps) == 0:
return anyPattern
if len(ps) == 1:
return ps[0]
if any(p is FailPattern for p in ps):
return failPattern
index = next(
(i for (i, pattern) in enumerate(ps) if isinstance(pattern, MatchEvery)),
None,
)
if index is None:
return MatchEvery(*ps)
child = ps[index]
assert isinstance(child, MatchEvery)
ps = ps[:index] + child.patterns + ps[(index + 1) :]
class MatchAny(PatternBase):
"""The pattern that succeeds if any pattern in the list succeeds.
It will stop trying to match patterns after the first success. If there
are no patterns in the list then it fails."""
patterns: List[Pattern]
def __init__(self, *patterns: Pattern) -> None:
self.patterns = list(patterns)
def match(self, test: Any) -> MatchResult:
# Bail out on the first success.
submatches = {}
for p in self.patterns:
result = match(p, test)
submatches.update(result.submatches)
if result.is_success():
return result
return Fail(test, submatches)
def _to_str(self, test: str) -> str:
children = " or ".join(
to_pattern(pattern)._to_str(test) for pattern in self.patterns
)
return f"({children})"
# Another combinator.
def match_any(*patterns: Pattern) -> Pattern:
ps: List[Pattern] = list(patterns)
while True:
# If there is a "Fail" in the list we can discard it.
ps = [p for p in ps if not isinstance(p, FailPattern)]
if len(ps) == 0:
return failPattern
if len(ps) == 1:
return ps[0]
if any(is_any(p) for p in ps):
return anyPattern
index = next(
(i for (i, pattern) in enumerate(ps) if isinstance(pattern, MatchAny)), None
)
if index is None:
return MatchAny(*ps)
child = ps[index]
assert isinstance(child, MatchAny)
ps = ps[:index] + child.patterns + ps[index + 1 :]
class Subpattern(PatternBase):
"""Sometimes we want to check to see if a given value matches the pattern
whereby some projected value matches a pattern. This class represents
such patterns. It takes a subpattern and a projection; when match is called,
it projects the value and runs the subpattern on the projected value."""
name: str
subpattern: Pattern
get_subtest: Callable[[Any], Any]
def __init__(
self, name: str, subpattern: Pattern, get_subtest: Callable[[Any], Any]
) -> None:
self.name = name
self.subpattern = subpattern
self.get_subtest = get_subtest
def match(self, test: Any) -> MatchResult:
submatch = match(self.subpattern, self.get_subtest(test))
submatches = {self.name: submatch}
if submatch.is_success():
return Success(test, submatches)
return Fail(test, submatches)
def _to_str(self, test: str) -> str:
return to_pattern(self.subpattern)._to_str(f"{test}.{self.name}")
class AttributeSubpattern(PatternBase):
"""Sometimes we want to check to see if an attribute of a value matches
a pattern. This class represents such patterns. It takes a subpattern and
an attribute name. When match is called, it runs the subpattern on the
attribute of the given value."""
name: str
subpattern: Pattern
def __init__(self, name: str, subpattern: Pattern) -> None:
self.name = name
self.subpattern = subpattern
def match(self, test: Any) -> MatchResult:
submatch = match(self.subpattern, getattr(test, self.name, None))
submatches = {self.name: submatch}
if submatch.is_success():
return Success(test, submatches)
return Fail(test, submatches)
def _to_str(self, test: str) -> str:
return to_pattern(self.subpattern)._to_str(f"{test}.{self.name}")
# Another combinator
def attribute(name: str, subpattern: Pattern) -> Pattern:
if is_any(subpattern):
return subpattern
return AttributeSubpattern(name, subpattern)
class EmptyListPattern(PatternBase):
"""This pattern matches an empty list."""
name: str
def __init__(self, name: str = "empty_list") -> None:
self.name = name
def match(self, test: Any) -> MatchResult:
if isinstance(test, list) and len(test) == 0:
return Success(test)
return Fail(test)
def _to_str(self, test: str) -> str:
return f"{test}==[]"
emptyList = EmptyListPattern()
nonEmptyList = match_every(list, negate(emptyList))
twoPlusList = match_every(
list, negate(match_any([], [anyPattern], [anyPattern, anyPattern]))
)
class HeadTail(PatternBase):
"""This combinator takes a pattern to match the head of a list and
a pattern to match the tail. If the list is empty, it automatically
fails; otherwise both patterns must match. The tail pattern is not
attempted if the head pattern fails."""
name: str
head: Pattern
tail: Pattern
def __init__(
self,
head: Pattern = anyPattern,
tail: Pattern = anyPattern,
name: str = "head_tail",
) -> None:
self.name = name
self.head = head
self.tail = tail
def match(self, test: Any) -> MatchResult:
if not isinstance(test, list) or len(test) == 0:
return Fail(test)
# Python allows this interesting list destructuring:
h, *t = test
head_result = match(self.head, h)
if head_result.is_fail():
return Fail(test, {"head": h})
tail_result = match(self.tail, t)
submatches = {"head": head_result, "tail": tail_result}
if tail_result.is_fail():
return Fail(test, submatches)
return Success(test, submatches)
def _to_str(self, test: str) -> str:
h = to_pattern(self.head)._to_str(test + "[0]")
t = to_pattern(self.tail)._to_str(test + "[1:]")
return f"{h} and {t}"
class ListPattern(PatternBase):
"""This pattern matches a list of patterns to a list."""
name: str
patterns: List[Pattern]
def __init__(self, patterns: List[Pattern], name: str = "list_pattern") -> None:
self.patterns = patterns
self.name = name
def match(self, test: Any) -> MatchResult:
return _match_list_pattern(self.patterns, test)
def _to_str(self, test: str) -> str:
ps = ", ".join(
to_pattern(p)._to_str(f"{test}[{i}]") for i, p in enumerate(self.patterns)
)
return f"[{ps}]"
class ListAny(PatternBase):
"""Matches a list where one or more elements of the list match the pattern."""
name: str
pattern: Pattern
def __init__(self, pattern: Pattern, name: str = "list_any") -> None:
self.pattern = pattern
self.name = name
def match(self, test: Any) -> MatchResult:
if not isinstance(test, list):
return Fail(test)
# TODO: We could bail out after the first success.
submatches = {str(i): match(self.pattern, t) for i, t in enumerate(test)}
if any(result.is_success() for result in submatches.values()):
return Success(test, submatches)
return Fail(test, submatches)
def _to_str(self, test: str) -> str:
return f"{test}.any(x:{to_pattern(self.pattern)._to_str('x')})"
class ListAll(PatternBase):
"""Matches a list where all elements of the list match the pattern."""
name: str
pattern: Pattern
def __init__(self, pattern: Pattern, name: str = "list_all") -> None:
self.pattern = pattern
self.name = name
def match(self, test: Any) -> MatchResult:
if not isinstance(test, list):
return Fail(test)
# TODO: We could bail out after the first failure.
submatches = {str(i): match(self.pattern, t) for i, t in enumerate(test)}
if any(result.is_fail() for result in submatches.values()):
return Fail(test, submatches)
return Success(test, submatches)
def _to_str(self, test: str) -> str:
return f"{test}.all(x:{to_pattern(self.pattern)._to_str('x')})"
# This complex combinator takes a type and a list of patterns to match against
# attributes of the type, and then returns the resulting pattern that matches
# values of that type with attributes that match all the given patterns.
def type_and_attributes(typ: type, patterns: Dict[str, Pattern]) -> Pattern:
t: List[Pattern] = [typ]
tuples: List[Pattern] = [
attribute(name, subpattern) for (name, subpattern) in patterns.items()
]
return match_every(*(t + tuples))
| beanmachine-main | src/beanmachine/ppl/compiler/patterns.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.