python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli, Cauchy, Normal
@bm.random_variable
def flip():
return Bernoulli(0.5)
@bm.random_variable
def norm(n):
return Normal(0, 1)
@bm.functional
def do_it():
return norm(flip())
@bm.functional
def bad_functional():
return 123
@bm.random_variable
def no_distribution_rv():
return 123
@bm.random_variable
def unsupported_distribution_rv():
return Cauchy(1.0, 2.0)
@bm.functional
def missing_tensor_instance_function():
# What happens if we call a function on a tensor instance
# that does not exist at all?
return norm(1).not_a_real_function()
@bm.functional
def unsupported_tensor_instance_function_1():
# Tensor instance function exists but we do not handle it.
return norm(1).arccos()
@bm.functional
def unsupported_tensor_instance_function_2():
# Same as above but called via Tensor:
return torch.Tensor.arccos(norm(1))
@bm.functional
def unsupported_tensor_instance_function_3():
# Regular receiver, stochastic argument:
return torch.tensor(7.0).dot(norm(1))
@bm.functional
def unsupported_torch_function():
# Same as above but called via torch:
return torch.arccos(norm(1))
@bm.functional
def unsupported_torch_submodule_function():
# What if we call an unsupported function in submodule of torch?
return torch.special.erf(norm(1))
@bm.functional
def missing_distribution_function():
# What happens if we try to get a nonsensical attr from a
# stochastic distribution?
return Normal(norm(1), 1.0).no_such_function()
@bm.functional
def unsupported_distribution_function():
return Normal(norm(1), 1.0).entropy()
class BMGBadModelsTest(unittest.TestCase):
def test_bmg_inference_error_reporting(self):
with self.assertRaises(TypeError) as ex:
BMGInference().infer(123, {}, 10)
self.assertEqual(
str(ex.exception),
"Parameter 'queries' is required to be a list but is of type int.",
)
with self.assertRaises(TypeError) as ex:
BMGInference().infer([], 123, 10)
self.assertEqual(
str(ex.exception),
"Parameter 'observations' is required to be a dictionary but is of type int.",
)
# Should be flip():
with self.assertRaises(TypeError) as ex:
BMGInference().infer([flip], {}, 10)
self.assertEqual(
str(ex.exception),
"A query is required to be a random variable but is of type function.",
)
# Should be flip():
with self.assertRaises(TypeError) as ex:
BMGInference().infer([flip()], {flip: tensor(True)}, 10)
self.assertEqual(
str(ex.exception),
"An observation is required to be a random variable but is of type function.",
)
# Should be a tensor
with self.assertRaises(TypeError) as ex:
BMGInference().infer([flip()], {flip(): 123.0}, 10)
self.assertEqual(
str(ex.exception),
"An observed value is required to be a tensor but is of type float.",
)
# You can't make inferences on rv-of-rv
with self.assertRaises(TypeError) as ex:
BMGInference().infer([norm(flip())], {}, 10)
self.assertEqual(
str(ex.exception),
"The arguments to a query must not be random variables.",
)
# You can't make inferences on rv-of-rv
with self.assertRaises(TypeError) as ex:
BMGInference().infer([flip()], {norm(flip()): tensor(123)}, 10)
self.assertEqual(
str(ex.exception),
"The arguments to an observation must not be random variables.",
)
# Observations must be of random variables, not
# functionals
with self.assertRaises(TypeError) as ex:
BMGInference().infer([flip()], {do_it(): tensor(123)}, 10)
self.assertEqual(
str(ex.exception),
"An observation must observe a random_variable, not a functional.",
)
# A functional must always return a value that can be represented
# in the graph.
with self.assertRaises(TypeError) as ex:
BMGInference().infer([bad_functional()], {}, 10)
self.assertEqual(
str(ex.exception),
"A functional must return a tensor.",
)
# TODO: Verify we handle correctly the case where a queried value is
# a constant, because that is not directly supported by BMG but
# it would be nice to have.
# An rv must return a distribution.
with self.assertRaises(TypeError) as ex:
BMGInference().infer([no_distribution_rv()], {}, 10)
self.assertEqual(
str(ex.exception),
"A random_variable is required to return a distribution.",
)
# An rv must return a supported distribution.
with self.assertRaises(TypeError) as ex:
BMGInference().infer([unsupported_distribution_rv()], {}, 10)
self.assertEqual(
str(ex.exception),
"Distribution 'Cauchy' is not supported by Bean Machine Graph.",
)
def test_bad_tensor_operations(self) -> None:
with self.assertRaises(ValueError) as ex:
BMGInference().infer([unsupported_tensor_instance_function_1()], {}, 1)
expected = """
Function arccos is not supported by Bean Machine Graph.
"""
self.assertEqual(expected.strip(), str(ex.exception).strip())
with self.assertRaises(ValueError) as ex:
BMGInference().infer([unsupported_tensor_instance_function_2()], {}, 1)
self.assertEqual(expected.strip(), str(ex.exception).strip())
with self.assertRaises(ValueError) as ex:
BMGInference().infer([unsupported_torch_function()], {}, 1)
self.assertEqual(expected.strip(), str(ex.exception).strip())
expected = """
Function dot is not supported by Bean Machine Graph.
"""
with self.assertRaises(ValueError) as ex:
BMGInference().infer([unsupported_tensor_instance_function_3()], {}, 1)
self.assertEqual(expected.strip(), str(ex.exception).strip())
# I have no idea why torch gives the name of torch.special.erf as
# "special_erf" rather than "erf", but it does.
expected = """
Function special_erf is not supported by Bean Machine Graph.
"""
with self.assertRaises(ValueError) as ex:
BMGInference().infer([unsupported_torch_submodule_function()], {}, 1)
self.assertEqual(expected.strip(), str(ex.exception).strip())
with self.assertRaises(ValueError) as ex:
BMGInference().infer([missing_tensor_instance_function()], {}, 1)
expected = """
Function not_a_real_function is not supported by Bean Machine Graph.
"""
self.assertEqual(expected.strip(), str(ex.exception).strip())
with self.assertRaises(ValueError) as ex:
BMGInference().infer([missing_distribution_function()], {}, 1)
expected = """
Function no_such_function is not supported by Bean Machine Graph.
"""
self.assertEqual(expected.strip(), str(ex.exception).strip())
with self.assertRaises(ValueError) as ex:
BMGInference().infer([unsupported_distribution_function()], {}, 1)
expected = """
Function entropy is not supported by Bean Machine Graph.
"""
self.assertEqual(expected.strip(), str(ex.exception).strip())
| beanmachine-main | tests/ppl/compiler/bmg_bad_models_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.error_report import BadMatrixMultiplication
from beanmachine.ppl.compiler.size_assessment import SizeAssessment
from beanmachine.ppl.compiler.sizer import Size, Sizer
class SizeAssessmentTests(unittest.TestCase):
def test_matrix_mult(self):
bmg = BMGraphBuilder()
assessor = SizeAssessment(Sizer())
probs = bmg.add_real_matrix(
torch.tensor([[0.5, 0.125, 0.125], [0.0625, 0.0625, 0.875]])
)
tensor_elements = []
for row in range(0, 2):
row_node = bmg.add_natural(row)
row_prob = bmg.add_column_index(probs, row_node)
for column in range(0, 3):
col_index = bmg.add_natural(column)
prob = bmg.add_vector_index(row_prob, col_index)
bernoulli = bmg.add_bernoulli(prob)
sample = bmg.add_sample(bernoulli)
tensor_elements.append(sample)
matrix2by3_rhs = bmg.add_tensor(Size([2, 3]), *tensor_elements)
# invalid
matrix2by3 = bmg.add_real_matrix(
torch.tensor([[0.21, 0.27, 0.3], [0.5, 0.6, 0.1]])
)
matrix1by3 = bmg.add_real_matrix(torch.tensor([[0.1, 0.2, 0.3]]))
matrix3 = bmg.add_real_matrix(torch.tensor([0.1, 0.2, 0.9]))
scalar = bmg.add_real(4.5)
mm_invalid = bmg.add_matrix_multiplication(matrix2by3_rhs, matrix2by3)
error_size_mismatch = assessor.size_error(mm_invalid, bmg)
self.assertIsInstance(error_size_mismatch, BadMatrixMultiplication)
expectation = """
The model uses a matrix multiplication (@) operation unsupported by Bean Machine Graph.
The dimensions of the operands are 2x3 and 2x3.
"""
self.assertEqual(expectation.strip(), error_size_mismatch.__str__().strip())
broadcast_not_supported_yet = bmg.add_matrix_multiplication(
matrix2by3_rhs, matrix1by3
)
error_broadcast_not_supported_yet = assessor.size_error(
broadcast_not_supported_yet, bmg
)
expectation = """
The model uses a matrix multiplication (@) operation unsupported by Bean Machine Graph.
The dimensions of the operands are 2x3 and 1x3.
"""
self.assertEqual(
expectation.strip(), error_broadcast_not_supported_yet.__str__().strip()
)
errors = [
assessor.size_error(bmg.add_matrix_multiplication(matrix2by3_rhs, mm), bmg)
for mm in [matrix3, scalar]
]
for error in errors:
self.assertIsNone(error)
| beanmachine-main | tests/ppl/compiler/size_assessment_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.compiler.gen_dot import to_dot
from beanmachine.ppl.compiler.runtime import BMGRuntime
from torch import tensor
from torch.distributions import Bernoulli, Beta
# We need to be able to tell what size the tensor is
# when a model operates on multi-valued tensors.
@bm.random_variable
def coin():
return Beta(tensor([[1.0, 2.0]]), 3.0)
@bm.random_variable
def flip():
return Bernoulli(coin())
class SizerTest(unittest.TestCase):
def test_sizer_1(self) -> None:
self.maxDiff = None
queries = [flip()]
observations = {}
bmg = BMGRuntime().accumulate_graph(queries, observations)
observed = to_dot(bmg, node_sizes=True)
expected = """
digraph "graph" {
N0[label="[[1.0,2.0]]:[1,2]"];
N1[label="[[3.0,3.0]]:[1,2]"];
N2[label="Beta:[1,2]"];
N3[label="Sample:[1,2]"];
N4[label="Bernoulli:[1,2]"];
N5[label="Sample:[1,2]"];
N6[label="Query:[1,2]"];
N0 -> N2[label=alpha];
N1 -> N2[label=beta];
N2 -> N3[label=operand];
N3 -> N4[label=probability];
N4 -> N5[label=operand];
N5 -> N6[label=operator];
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/sizer_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
from beanmachine.ppl.inference import BMGInference
trials = torch.tensor([29854.0, 2016.0])
pos = torch.tensor([4.0, 0.0])
buck_rep = torch.tensor([0.0006, 0.01])
n_buckets = len(trials)
def log1mexp(x):
return (1 - x.exp()).log()
@bm.random_variable
def eta(): # k reals
return dist.Normal(0.0, 1.0).expand((n_buckets,))
@bm.random_variable
def alpha(): # atomic R+
return dist.half_normal.HalfNormal(5.0)
@bm.random_variable
def sigma(): # atomic R+
return dist.half_normal.HalfNormal(1.0)
@bm.random_variable
def length_scale(): # R+
return dist.half_normal.HalfNormal(0.1)
@bm.functional
def cholesky(): # k by k reals
delta = 1e-3
alpha_sq = alpha() * alpha()
rho_sq = length_scale() * length_scale()
cov = (buck_rep - buck_rep.unsqueeze(-1)) ** 2
cov = alpha_sq * torch.exp(-cov / (2 * rho_sq))
cov += torch.eye(buck_rep.size(0)) * delta
return torch.linalg.cholesky(cov)
@bm.random_variable
def prev(): # k reals
return dist.Normal(torch.matmul(cholesky(), eta()), sigma())
@bm.random_variable
def bucket_prob(): # atomic bool
phi_prev = dist.Normal(0, 1).cdf(prev()) # k probs
log_prob = pos * torch.log(phi_prev)
log_prob += (trials - pos) * torch.log1p(-phi_prev)
joint_log_prob = log_prob.sum()
# Convert the joint log prob to a log-odds.
logit_prob = joint_log_prob - log1mexp(joint_log_prob)
return dist.Bernoulli(logits=logit_prob)
class GEPTest(unittest.TestCase):
def test_gep_model_compilation(self) -> None:
self.maxDiff = None
queries = [prev()]
observations = {bucket_prob(): torch.tensor([1.0])}
# Demonstrate that compiling to an actual BMG graph
# generates a graph which type checks.
g, _ = BMGInference().to_graph(queries, observations)
observed = g.to_dot()
# TODO: We're not generating matrix complement or matrix log here after the matrix phi.
# It seems like we could be; what's going wrong in the tensorizer?
expected = """
digraph "graph" {
N0[label="5"];
N1[label="HalfNormal"];
N2[label="~"];
N3[label="0.1"];
N4[label="HalfNormal"];
N5[label="~"];
N6[label="0"];
N7[label="1"];
N8[label="Normal"];
N9[label="~"];
N10[label="Normal"];
N11[label="~"];
N12[label="HalfNormal"];
N13[label="~"];
N14[label="*"];
N15[label="2"];
N16[label="*"];
N17[label="-1"];
N18[label="^"];
N19[label="ToReal"];
N20[label="matrix"];
N21[label="MatrixScale"];
N22[label="MatrixExp"];
N23[label="MatrixScale"];
N24[label="matrix"];
N25[label="MatrixAdd"];
N26[label="Cholesky"];
N27[label="2"];
N28[label="1"];
N29[label="ToMatrix"];
N30[label="MatrixMultiply"];
N31[label="0"];
N32[label="Index"];
N33[label="Normal"];
N34[label="~"];
N35[label="Index"];
N36[label="Normal"];
N37[label="~"];
N38[label="matrix"];
N39[label="ToMatrix"];
N40[label="MatrixPhi"];
N41[label="MatrixLog"];
N42[label="ToReal"];
N43[label="ElementwiseMultiply"];
N44[label="matrix"];
N45[label="Index"];
N46[label="Complement"];
N47[label="Log"];
N48[label="Index"];
N49[label="Complement"];
N50[label="Log"];
N51[label="ToMatrix"];
N52[label="ToReal"];
N53[label="ElementwiseMultiply"];
N54[label="MatrixAdd"];
N55[label="MatrixSum"];
N56[label="ToNegReal"];
N57[label="Log1mExp"];
N58[label="Negate"];
N59[label="ToReal"];
N60[label="+"];
N61[label="BernoulliLogit"];
N62[label="~"];
N0 -> N1;
N1 -> N2;
N2 -> N14;
N2 -> N14;
N3 -> N4;
N4 -> N5;
N5 -> N16;
N5 -> N16;
N6 -> N8;
N6 -> N10;
N7 -> N8;
N7 -> N10;
N7 -> N12;
N8 -> N9;
N9 -> N29;
N10 -> N11;
N11 -> N29;
N12 -> N13;
N13 -> N33;
N13 -> N36;
N14 -> N23;
N15 -> N16;
N16 -> N18;
N17 -> N18;
N18 -> N19;
N19 -> N21;
N20 -> N21;
N21 -> N22;
N22 -> N23;
N23 -> N25;
N24 -> N25;
N25 -> N26;
N26 -> N30;
N27 -> N29;
N27 -> N39;
N27 -> N51;
N28 -> N29;
N28 -> N35;
N28 -> N39;
N28 -> N48;
N28 -> N51;
N29 -> N30;
N30 -> N32;
N30 -> N35;
N31 -> N32;
N31 -> N45;
N32 -> N33;
N33 -> N34;
N34 -> N39;
N35 -> N36;
N36 -> N37;
N37 -> N39;
N38 -> N43;
N39 -> N40;
N40 -> N41;
N40 -> N45;
N40 -> N48;
N41 -> N42;
N42 -> N43;
N43 -> N54;
N44 -> N53;
N45 -> N46;
N46 -> N47;
N47 -> N51;
N48 -> N49;
N49 -> N50;
N50 -> N51;
N51 -> N52;
N52 -> N53;
N53 -> N54;
N54 -> N55;
N55 -> N56;
N55 -> N60;
N56 -> N57;
N57 -> N58;
N58 -> N59;
N59 -> N60;
N60 -> N61;
N61 -> N62;
O0[label="Observation"];
N62 -> O0;
Q0[label="Query"];
N39 -> Q0;
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/gep_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Test performance of multiary multiplication optimization """
import platform
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference import BMGInference
from torch.distributions import Normal
@bm.random_variable
def norm(x):
return Normal(0.0, 1.0)
@bm.functional
def prod_1(counter):
prod = 1.0
for i in range(counter):
prod = prod * norm(i)
return prod
@bm.functional
def prod_2():
return prod_1(100) * prod_1(50)
def get_report(skip_optimizations):
observations = {}
queries = [prod_2()]
number_samples = 1000
_, perf_report = BMGInference()._infer(
queries, observations, number_samples, skip_optimizations=skip_optimizations
)
return perf_report
class BinaryVsMultiaryMultiplicationPerformanceTest(unittest.TestCase):
def test_perf_num_nodes_edges(self) -> None:
"""
Test to check if Multiary multiplication optimization reduces the
number of nodes and number of edges using the performance
report returned by BMGInference.
"""
if platform.system() == "Windows":
self.skipTest("Disabling *_perf_test.py until flakiness is resolved")
self.maxDiff = None
skip_optimizations = {
"beta_bernoulli_conjugate_fixer",
"beta_binomial_conjugate_fixer",
"normal_normal_conjugate_fixer",
}
report_w_optimization = get_report(skip_optimizations)
self.assertEqual(report_w_optimization.node_count, 105)
self.assertEqual(report_w_optimization.edge_count, 204)
skip_optimizations = {
"multiary_multiplication_fixer",
"beta_bernoulli_conjugate_fixer",
"beta_binomial_conjugate_fixer",
"normal_normal_conjugate_fixer",
}
report_wo_optimization = get_report(skip_optimizations)
self.assertEqual(report_wo_optimization.node_count, 203)
self.assertEqual(report_wo_optimization.edge_count, 302)
| beanmachine-main | tests/ppl/compiler/binary_vs_multiary_multiplication_perf_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for bmg_types.py"""
import unittest
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.bmg_types import (
_lookup,
Boolean,
BooleanMatrix,
bottom,
Natural,
NaturalMatrix,
NegativeReal,
NegativeRealMatrix,
One,
OneHotMatrix,
PositiveReal,
PositiveRealMatrix,
Probability,
ProbabilityMatrix,
Real,
RealMatrix,
SimplexMatrix,
supremum,
Tensor,
type_of_value,
Zero,
ZeroMatrix,
)
from beanmachine.ppl.compiler.gen_dot import to_dot
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from torch import tensor
def _rv_id() -> RVIdentifier:
return RVIdentifier(lambda a, b: a, (1, 1))
class BMGTypesTest(unittest.TestCase):
def test_lookup_table(self) -> None:
"""Tests _lookup_table for some basic properties"""
# This is a simple example of "property based testing"
# Since the search space is finite, we can do it exhaustively
lookup_table = _lookup()
keys = lookup_table.keys()
for key in keys:
a, b = key
class_1 = lookup_table[(a, b)]
class_2 = lookup_table[(b, a)]
# symmertry
self.assertEqual(
class_1(1, 1),
class_2(1, 1),
msg="Table breaks symmertry when inputs are ("
+ str(a)
+ ","
+ str(b)
+ ")",
)
return
def test_supremum(self) -> None:
"""test_supremum"""
# Degenerate case -- supremum of no types is bottom because it
# is the smallest type that is larger than every type in the
# empty list.
self.assertEqual(bottom, supremum())
# Supremum of one type is that type
self.assertEqual(Probability, supremum(Probability))
# A few cases for single-valued types.
self.assertEqual(PositiveReal, supremum(Probability, Natural))
self.assertEqual(Real, supremum(Natural, Probability, Real))
self.assertEqual(Tensor, supremum(Real, Tensor, Natural, Boolean))
self.assertEqual(Real, supremum(NegativeReal, PositiveReal))
self.assertEqual(Boolean, supremum(One, Zero))
# Supremum of any two types with different matrix dimensions is Tensor
self.assertEqual(Tensor, supremum(RealMatrix(1, 2), RealMatrix(2, 1)))
# A few cases for matrices
self.assertEqual(
ProbabilityMatrix(1, 2), supremum(BooleanMatrix(1, 2), SimplexMatrix(1, 2))
)
self.assertEqual(
PositiveRealMatrix(1, 2), supremum(NaturalMatrix(1, 2), SimplexMatrix(1, 2))
)
def test_type_of_value(self) -> None:
"""test_type_of_value"""
self.assertEqual(One, type_of_value(True))
self.assertEqual(Zero, type_of_value(False))
self.assertEqual(Zero, type_of_value(0))
self.assertEqual(One, type_of_value(1))
self.assertEqual(Zero, type_of_value(0.0))
self.assertEqual(One, type_of_value(1.0))
self.assertEqual(Zero, type_of_value(tensor(False)))
self.assertEqual(Zero, type_of_value(tensor(0)))
self.assertEqual(One, type_of_value(tensor(1)))
self.assertEqual(Zero, type_of_value(tensor(0.0)))
self.assertEqual(One, type_of_value(tensor(1.0)))
self.assertEqual(One, type_of_value(tensor([[True]])))
self.assertEqual(Zero, type_of_value(tensor([[False]])))
self.assertEqual(Zero, type_of_value(tensor([[0]])))
self.assertEqual(One, type_of_value(tensor([[1]])))
self.assertEqual(Zero, type_of_value(tensor([[0.0]])))
self.assertEqual(One, type_of_value(tensor([[1.0]])))
self.assertEqual(Natural, type_of_value(2))
self.assertEqual(Natural, type_of_value(2.0))
self.assertEqual(Natural, type_of_value(tensor(2)))
self.assertEqual(Natural, type_of_value(tensor(2.0)))
self.assertEqual(Natural, type_of_value(tensor([[2]])))
self.assertEqual(Natural, type_of_value(tensor([[2.0]])))
self.assertEqual(Probability, type_of_value(0.5))
self.assertEqual(Probability, type_of_value(tensor(0.5)))
self.assertEqual(Probability, type_of_value(tensor([[0.5]])))
self.assertEqual(PositiveReal, type_of_value(1.5))
self.assertEqual(PositiveReal, type_of_value(tensor(1.5)))
self.assertEqual(PositiveReal, type_of_value(tensor([[1.5]])))
self.assertEqual(NegativeReal, type_of_value(-1.5))
self.assertEqual(NegativeReal, type_of_value(tensor(-1.5)))
self.assertEqual(NegativeReal, type_of_value(tensor([[-1.5]])))
# 1-d tensor is matrix
# Tensors are row-major in torch but column-major in BMG. We give
# the BMG type of the tensor as though it were column-major.
# This is treated as if it were [[0],[0]], a 2-column 1-row tensor
# because that's what we're going to emit into BMG.
self.assertEqual(ZeroMatrix(2, 1), type_of_value(tensor([0, 0])))
self.assertEqual(BooleanMatrix(3, 1), type_of_value(tensor([0, 1, 1])))
self.assertEqual(BooleanMatrix(2, 1), type_of_value(tensor([1, 1])))
# 2-d tensor is matrix
self.assertEqual(OneHotMatrix(2, 2), type_of_value(tensor([[1, 0], [1, 0]])))
self.assertEqual(BooleanMatrix(2, 2), type_of_value(tensor([[1, 1], [1, 0]])))
self.assertEqual(NaturalMatrix(2, 2), type_of_value(tensor([[1, 3], [1, 0]])))
self.assertEqual(
SimplexMatrix(2, 2), type_of_value(tensor([[0.5, 0.5], [0.5, 0.5]]))
)
self.assertEqual(
ProbabilityMatrix(2, 2), type_of_value(tensor([[0.75, 0.5], [0.5, 0.5]]))
)
self.assertEqual(
PositiveRealMatrix(2, 2), type_of_value(tensor([[1.75, 0.5], [0.5, 0.5]]))
)
self.assertEqual(
RealMatrix(2, 2), type_of_value(tensor([[1.75, 0.5], [0.5, -0.5]]))
)
self.assertEqual(
NegativeRealMatrix(2, 2),
type_of_value(tensor([[-1.75, -0.5], [-0.5, -0.5]])),
)
# 3-d tensor is Tensor
self.assertEqual(
Tensor, type_of_value(tensor([[[0, 0], [0, 0]], [[0, 0], [0, 0]]]))
)
# Empty tensor is Tensor
self.assertEqual(Tensor, type_of_value(tensor([])))
def test_types_in_dot(self) -> None:
"""test_types_in_dot"""
self.maxDiff = None
bmg = BMGraphBuilder()
one = bmg.add_constant(tensor(1.0))
two = bmg.add_constant(tensor(2.0))
half = bmg.add_constant(tensor(0.5))
beta = bmg.add_beta(two, two)
betas = bmg.add_sample(beta)
mult = bmg.add_multiplication(half, betas)
norm = bmg.add_normal(mult, one)
bern = bmg.add_bernoulli(mult)
bmg.add_sample(norm)
bmg.add_sample(bern)
bmg.add_query(mult, _rv_id())
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N00[label="0.5:P"];
N01[label="2.0:N"];
N02[label="Beta:P"];
N03[label="Sample:P"];
N04[label="*:P"];
N05[label="1.0:OH"];
N06[label="Normal:R"];
N07[label="Sample:R"];
N08[label="Bernoulli:B"];
N09[label="Sample:B"];
N10[label="Query:P"];
N00 -> N04[label="left:P"];
N01 -> N02[label="alpha:R+"];
N01 -> N02[label="beta:R+"];
N02 -> N03[label="operand:P"];
N03 -> N04[label="right:P"];
N04 -> N06[label="mu:R"];
N04 -> N08[label="probability:P"];
N04 -> N10[label="operator:any"];
N05 -> N06[label="sigma:R+"];
N06 -> N07[label="operand:R"];
N08 -> N09[label="operand:B"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_matrix_types(self) -> None:
"""test_matrix_types"""
b22 = BooleanMatrix(2, 2)
b33 = BooleanMatrix(3, 3)
# Reference equality
self.assertEqual(b22, BooleanMatrix(2, 2))
self.assertNotEqual(b22, b33)
self.assertEqual(b22.short_name, "MB[2,2]")
self.assertEqual(b22.long_name, "2 x 2 bool matrix")
| beanmachine-main | tests/ppl/compiler/bmg_types_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for rules.py"""
import ast
import re
import unittest
from typing import Any
import astor
from beanmachine.ppl.compiler.ast_patterns import (
add,
ast_domain,
ast_false,
ast_true,
attribute,
binop,
expr,
function_def,
name,
num,
)
from beanmachine.ppl.compiler.patterns import (
anyPattern as _default,
ListAny,
match_every,
PredicatePattern,
)
from beanmachine.ppl.compiler.rules import (
AllOf,
at_least_once,
either_or_both,
fail,
if_then,
ignore_div_zero,
ignore_runtime_error,
list_member_children,
ListEdit,
make_logger,
pattern_rules,
PatternRule,
projection_rule,
remove_from_list,
SomeOf,
TryMany as many,
TryOnce as once,
)
def tidy(s: str) -> str:
return re.sub(" +", " ", s.replace("\n", " ")).strip()
def first_expr(s: str) -> ast.AST:
return ast.parse(s).body[0].value
_all = ast_domain.all_children
some = ast_domain.some_children
one = ast_domain.one_child
top_down = ast_domain.top_down
bottom_up = ast_domain.bottom_up
descend_until = ast_domain.descend_until
specific_child = ast_domain.specific_child
class RulesTest(unittest.TestCase):
def test_rules_1(self) -> None:
"""Tests for rules.py"""
remove_plus_zero = pattern_rules(
[
(binop(op=add, left=num(n=0)), lambda b: b.right),
(binop(op=add, right=num(n=0)), lambda b: b.left),
],
"remove_plus_zero",
)
self.maxDiff = None
m = ast.parse("0; 1; 1+1; 0+1; 1+0; 0+1+0; 0+(1+0); (0+1)+(1+0)")
# z = m.body[0].value
o = m.body[1].value
oo = m.body[2].value
zo = m.body[3].value
oz = m.body[4].value
zo_z = m.body[5].value
z_oz = m.body[6].value
zo_oz = m.body[7].value
rpz_once = once(remove_plus_zero)
rpz_many = many(remove_plus_zero)
observed = str(rpz_once)
expected = """
try_once(
first_match(
remove_plus_zero(
(isinstance(test, BinOp) and
isinstance(test.op, Add) and
(isinstance(test.left, Num) and test.left.n==0)),
remove_plus_zero(
(isinstance(test, BinOp) and
isinstance(test.op, Add) and
(isinstance(test.right, Num) and test.right.n==0)) ) )
"""
self.assertEqual(tidy(observed), tidy(expected))
# Note that _all on this list does not recurse down to the
# children of the list elements. It runs the rule once on
# each list element, adn that's it.
result = _all(rpz_once)([oo, zo_z, z_oz, zo_oz]).expect_success()
self.assertEqual(ast.dump(result[0]), ast.dump(oo))
self.assertEqual(ast.dump(result[1]), ast.dump(zo))
self.assertEqual(ast.dump(result[2]), ast.dump(oz))
self.assertEqual(ast.dump(result[3]), ast.dump(zo_oz))
# Again, this does not recurse to the children. Rather, it keeps
# running the rule until the pattern fails; that is different than
# recursing down into the children!
result = _all(rpz_many)([oo, zo_z, z_oz, zo_oz]).expect_success()
self.assertEqual(ast.dump(result[0]), ast.dump(oo))
self.assertEqual(ast.dump(result[1]), ast.dump(o))
self.assertEqual(ast.dump(result[2]), ast.dump(o))
self.assertEqual(ast.dump(result[3]), ast.dump(zo_oz))
# Now instead of running the rule on all elements of a list, let's
# run the rule once on all *children* of a node. Again, this applies the
# rule just to the children; it does not recurse down into their
# children, and it does not re-run the rule on the result.
result = _all(rpz_once)(z_oz).expect_success()
self.assertEqual(ast.dump(result), ast.dump(zo))
result = _all(rpz_once)(zo_z).expect_success()
self.assertEqual(ast.dump(result), ast.dump(oz))
result = _all(rpz_once)(zo_oz).expect_success()
self.assertEqual(ast.dump(result), ast.dump(oo))
# Above we had a test for _all(many(rpz))([oo, zo_z, z_oz, zo_oz]);
# we can get the same results with:
result = many(some(remove_plus_zero))([oo, zo_z, z_oz, zo_oz]).expect_success()
self.assertEqual(ast.dump(result[0]), ast.dump(oo))
self.assertEqual(ast.dump(result[1]), ast.dump(o))
self.assertEqual(ast.dump(result[2]), ast.dump(o))
self.assertEqual(ast.dump(result[3]), ast.dump(zo_oz))
# Both attain a fixpoint.
# OneChild applies a rule to members of a list or the children of a node,
# until the first success, and then it stops.
result = one(remove_plus_zero)([oo, zo_z, z_oz, zo_oz]).expect_success()
self.assertEqual(ast.dump(result[0]), ast.dump(oo)) # Rule fails
self.assertEqual(ast.dump(result[1]), ast.dump(zo)) # Rule succeeds
self.assertEqual(ast.dump(result[2]), ast.dump(z_oz)) # Rule does not run
self.assertEqual(ast.dump(result[3]), ast.dump(zo_oz)) # Rule does not run
# Testing list editing:
# Let's start with a simple test: remove all the zeros from a list
# of integers:
remove_zeros = PatternRule(0, lambda b: remove_from_list, "remove_zeros")
result = _all(once(remove_zeros))(
[0, 1, 0, 2, 0, 0, 3, 4, 0, 0]
).expect_success()
self.assertEqual(result, [1, 2, 3, 4])
# Let's try some deeper combinations. Here we apply a rule to all
# children of a module -- that is, the body. That rule then applies
# remove_num_statements once to all members of the body list.
remove_num_statements = PatternRule(
expr(num()), lambda b: remove_from_list, "remove_num_statements"
)
t = ast.parse("0; 1; 2 + 3; 4 + 5 + 6; 7 + 8 * 9;")
result = _all(_all(once(remove_num_statements)))(t).expect_success()
self.assertEqual(
ast.dump(result), ast.dump(ast.parse("2 + 3; 4 + 5 + 6; 7 + 8 * 9;"))
)
# Split every statement that is a binop into two statements,
# and keep going until you can split no more:
split_binops = PatternRule(
expr(binop()),
lambda b: ListEdit([ast.Expr(b.value.left), ast.Expr(b.value.right)]),
"split_binops",
)
# This correctly implements those semantics.
# The "some" fails when no more work can be done, so the "many"
# repeats until a fixpoint is reached for the statement list.
result = _all(many(some(split_binops)))(t).expect_success()
self.assertEqual(
ast.dump(result), ast.dump(ast.parse("0; 1; 2; 3; 4; 5; 6; 7; 8; 9;"))
)
# TODO: Unfortunately, this does not attain a fixpoint.
# TODO: This seems like it should have the same behaviour as the
# TODO: previous, but what happens is: split_binops returns a ListEdit.
# TODO: TryMany then checks whether split_binops applies again;
# TODO: it does not because a ListEdit is not an Expr(BinOp); it is a
# TODO: ListEdit possibly containing an Expr(BinOp). It then returns the
# TODO: ListEdit to AllChildren, which splices in the result and goes on
# TODO: to the next item in the list.
# TODO:
# TODO: We have a problem here: should rules which return ListEdits to other
# TODO: rules have those other rules automatically distribute their behaviour
# TODO: across the ListEdit? Should we disallow a rule from returning a
# TODO: ListEdit to anything other than All/Some/One, which are the only
# TODO: combinators that know how to splice in the edit? Give this some
# TODO: thought.
result = _all(_all(many(split_binops)))(t).expect_success()
self.assertEqual(
ast.dump(result), ast.dump(ast.parse("0; 1; 2; 3; 4 + 5; 6; 7; 8 * 9;"))
)
# Test top-down and bottom-up combinators:
# The top-down and bottom-up combinators recursively apply a rule to every
# node in a tree; top-down rewrites the root and then rewrites all the new
# children; bottom-up rewrites the leaves and then the new parents.
# What is the difference between bottom-up and top-down traversals?
# Consider this example.
test = ast.parse("m(0, 1, 2+3, [0+4, 5+0, 0+6+0], {0+(7+0): (0+8)+(9+0)})")
expected = ast.parse("m(0, 1, 2+3, [4, 5, 6], {7: 8+9})")
result = bottom_up(rpz_once)(test).expect_success()
self.assertEqual(ast.dump(result), ast.dump(expected))
# As we'd expect, the bottom-up traversal eliminates all the +0 operations
# from the tree. But top-down does not!
result = top_down(rpz_once)(test).expect_success()
expected = ast.parse("m(0, 1, 2+3, [4, 5, 0+6], {7+0: 8+9})")
self.assertEqual(ast.dump(result), ast.dump(expected))
# Why are 0+6+0 and 0+(7+0) not simplified to 6 and 7 by top_down?
# Well, think about what top-down does when it encounters 0+6+0.
# First it notes that 0+6+0 has the form x+0 and simplifies it to x,
# so we have 0+6. Then we recurse on the children, but the children
# are not of the form 0+x or x+0, so we're done. I said rpz_once,
# not rpz_many, which would keep trying to simplify until proceding
# to the children:
result = top_down(rpz_many)(test).expect_success()
expected = ast.parse("m(0, 1, 2+3, [4, 5, 6], {7: 8+9})")
self.assertEqual(ast.dump(result), ast.dump(expected))
# The at_least_once combinator requires that a rule succeed at least
# once, and then runs it until it fails.
alorpz = at_least_once(remove_plus_zero)
self.assertFalse(alorpz(o))
self.assertTrue(alorpz(z_oz))
def test_infinite_loop_detection(self) -> None:
# While working on a previous test case I accidentally created a pattern
# that has an infinite loop; one of the benefits of a combinator-based
# approach to rewriting is we can often detect statically when a particular
# combination of rules must produce an infinite loop, and raise an error.
# In particular, we know several of the rules always succeed (TryMany,
# TryOnce, identity) and if any of these rules are ever passed to TryMany,
# we've got an infinite loop right there.
# Here's an example. fail always fails, but once always succeeds. Since
# once always succeeds, _all(once(anything)) always succeeds, which means
# that we've given something that always succeeds to many, and we'll loop
# forever.
with self.assertRaises(ValueError):
_all = ast_domain.all_children
_all(many(_all(once(fail))))
def disabled_test_rules_2(self) -> None:
"""Tests for rules.py"""
# PYTHON VERSIONING ISSUE
# TODO: This test does not pass in newer versions of Python; for
# unknown reasons the two parse trees differ in small details.
# Once we understand why, re-enable this test.
self.maxDiff = None
_all = ast_domain.all_children
num_stmt = expr(num())
even = PatternRule(
match_every(num_stmt, PredicatePattern(lambda e: e.value.n % 2 == 0))
)
add_one = projection_rule(lambda e: ast.Expr(ast.Num(e.value.n + 1)))
t = ast.parse("0; 1; 2; 3; 4; 5 + 6")
result = _all(_all(if_then(even, add_one)))(t).expect_success()
self.assertEqual(ast.dump(result), ast.dump(ast.parse("1; 1; 3; 3; 5; 5 + 6")))
def disabled_test_find_random_variables(self) -> None:
"""Find all the functions that have a decorator, delete everything else."""
# PYTHON VERSIONING ISSUE
# TODO: This test does not pass in newer versions of Python; for
# unknown reasons the two parse trees differ in small details.
# Once we understand why, re-enable this test.
self.maxDiff = None
_all = ast_domain.all_children
rule = pattern_rules(
[
(
function_def(
decorator_list=ListAny(attribute(attr="random_variable"))
),
lambda f: ast.FunctionDef(
name=f.name,
args=f.args,
body=[ast.Pass()],
returns=None,
decorator_list=[],
),
),
(_default, lambda x: remove_from_list),
]
)
source = """
# foo.py
@bm.random_variable
def bias() -> Beta:
return Beta(1, 1)
@bm.random_variable
def toss(i) -> Bernoulli:
return Bernoulli(bias())
def foo():
return 123
"""
expected = """
def bias():
pass
def toss(i):
pass
"""
m = ast.parse(source)
result = _all(_all(rule))(m).expect_success()
self.assertEqual(ast.dump(result), ast.dump(ast.parse(expected)))
def test_rules_3(self) -> None:
"""Tests for rules.py"""
self.maxDiff = None
# Some nodes, like BoolOp, have the interesting property that they
# have both regular children and children in a list, which makes it
# inconvenient to apply a rule to all the "logical" children. This
# combinator helps with that.
t = ast.NameConstant(True)
f = ast.NameConstant(False)
swap_bools = pattern_rules(
[(ast_true, lambda n: f), (ast_false, lambda n: t)], "swap_bools"
)
# First we'll try it without the combinator:
_all = ast_domain.all_children
# "True < False < 1" has this structure:
c = ast.Compare(ops=[ast.Lt(), ast.Lt()], left=t, comparators=[f, ast.Num(1)])
result = _all(once(swap_bools))(c).expect_success()
# all applies the rule to op, left and comparators; since op and comparators
# do not match the pattern, they're unchanged. But we do not recurse
# into values, so we only change the first one:
expected = "(False < False < 1)"
observed = astor.to_source(result)
self.assertEqual(observed.strip(), expected.strip())
# This version treats all the ops and values as children, and as
# we intend, the rule operates on all the children:
result = _all(list_member_children(once(swap_bools)))(c).expect_success()
expected = "(False < True < 1)"
observed = astor.to_source(result)
self.assertEqual(observed.strip(), expected.strip())
def test_rules_4(self) -> None:
"""Tests for rules.py"""
self.maxDiff = None
# either_or_both logically takes two rules A and B, and tries to apply
# Compose(A, B), A, or B, in that order. The first that succeeds is
# the result.
zero_to_one = PatternRule(0, lambda n: 1)
one_to_two = PatternRule(1, lambda n: 2)
eob = either_or_both(zero_to_one, one_to_two)
self.assertEqual(eob(0).expect_success(), 2)
self.assertEqual(eob(1).expect_success(), 2)
self.assertTrue(eob(2).is_fail())
# The some_top_down combinator applies a rule to every node in the tree,
# from root to leaves, but ignores nodes for which the rule fails.
# It succeeds iff the rule succeeded on any node in the tree. This is
# useful because it guarantees that if it succeeds, then it did the most
# work it could do applying a rule to a tree.
sometd = ast_domain.some_top_down
result = sometd(eob)(ast.parse("0 + 1 * 2 + 3")).expect_success()
expected = "2 + 2 * 2 + 3"
observed = astor.to_source(result)
self.assertEqual(observed.strip(), expected.strip())
# If the rule applies to no node, then we fail.
self.assertTrue(sometd(eob)(result).is_fail())
# The some_bottom_up combinator is the same as some_top_down but it
# works from leaves to root instead of root to leaves.
somebu = ast_domain.some_bottom_up
result = somebu(eob)(ast.parse("0 + 1 * 2 + 3")).expect_success()
expected = "2 + 2 * 2 + 3"
observed = astor.to_source(result)
self.assertEqual(observed.strip(), expected.strip())
# If the rule applies to no node, then we fail.
self.assertTrue(somebu(eob)(result).is_fail())
# SomeOf extends either_or_both to arbitrarily many rules.
zero_to_one = PatternRule(0, lambda n: 1)
one_to_two = PatternRule(1, lambda n: 2)
three_to_four = PatternRule(3, lambda n: 4)
so = SomeOf([zero_to_one, one_to_two, three_to_four])
self.assertEqual(so(0).expect_success(), 2)
self.assertEqual(so(1).expect_success(), 2)
self.assertEqual(so(3).expect_success(), 4)
self.assertTrue(so(2).is_fail())
# AllOf extends composition to arbitrarily many rRulesTest
two_to_three = PatternRule(2, lambda n: 3)
ao1 = AllOf([zero_to_one, one_to_two, two_to_three])
self.assertEqual(ao1(0).expect_success(), 3)
self.assertTrue(ao1(1).is_fail())
ao2 = AllOf([zero_to_one, one_to_two, three_to_four])
self.assertTrue(ao2(0).is_fail())
def test_rules_6(self) -> None:
"""Tests for rules.py"""
# Sometimes a rule's projection will fail with an exception through
# no fault of our own; it can be expensive or impossible to detect
# a coming exception in some cases. In those cases we can use a combinator
# which causes rules that throw exceptions to fail rather than throw.
def always_throws(x: Any):
raise NotImplementedError()
self.maxDiff = None
d = ignore_div_zero(PatternRule([int, int], lambda l: l[0] / l[1]))
self.assertEqual(d([10, 5]).expect_success(), 2)
self.assertTrue(d([10, 0]).is_fail())
n = ignore_runtime_error(PatternRule(int, always_throws))
self.assertTrue(n(123).is_fail())
def test_rules_7(self) -> None:
"""Tests for rules.py"""
# descend_until is a handy combinator that descends through the tree,
# top down, until a test rule succeeds. It then applies a rule to
# the nodes that succeeded but does not further recurse down. It does
# this for all matching nodes in the tree starting from the root.
self.maxDiff = None
# replace all 1 with 2, but only in functions decorated with @frob:
t = PatternRule(function_def(decorator_list=ListAny(name(id="frob"))))
r = top_down(once(PatternRule(num(1), lambda n: ast.Num(2))))
s = """
0
1
@frob
def f():
0
1
@frob
def g():
1
1
def h():
0
1
"""
expected = """
0
1
@frob
def f():
0
2
@frob
def g():
2
2
def h():
0
1
"""
result = descend_until(t, r)(ast.parse(s)).expect_success()
observed = astor.to_source(result)
self.assertEqual(observed.strip(), expected.strip())
def test_rules_8(self) -> None:
"""Tests for rules.py"""
# specific_child applies a rule to a specified child of the rule
# input; the input is required to have such a child. If the rule
# succeeds then the output is the input with the rewritten child.
self.maxDiff = None
# replace all 1 with 2, but only in functions decorated with @frob:
log = []
trace = make_logger(log)
r = trace(
top_down(
once(
if_then(
PatternRule(binop()),
trace(
specific_child(
"left", PatternRule(num(1), lambda n: ast.Num(2))
)
),
)
)
)
)
s = "1 + 1 * 1 + 1"
expected = "2 + 2 * 1 + 1"
result = r(ast.parse(s)).expect_success()
observed = astor.to_source(result)
self.assertEqual(observed.strip(), expected.strip())
observed = "\n".join(log)
expected = """
Started top_down
Started specific_child
Finished specific_child
Started specific_child
Finished specific_child
Started specific_child
Finished specific_child
Finished top_down
"""
self.assertEqual(observed.strip(), expected.strip())
def test_rules_9(self) -> None:
"""Tests for rules.py"""
# This demonstrates that a rule that produces a list edit will
# recursively rewrite that list edit.
self.maxDiff = None
# Recursively replace any list of the form [True, [another list]] with
# the inner list.
r = top_down(once(PatternRule([True, list], lambda l: ListEdit(l[1]))))
s = [[True, [1]], [True, [[True, [2]], 3]]]
expected = "[1, 2, 3]"
observed = str(r(s).expect_success())
self.assertEqual(observed.strip(), expected.strip())
| beanmachine-main | tests/ppl/compiler/rules_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
from beanmachine.ppl.inference import BMGInference
from torch import tensor
from torch.distributions import Normal
@bm.random_variable
def norm(x):
return Normal(0.0, 1.0)
@bm.functional
def norm_array():
return tensor([[norm(0), norm(1)], [norm(2), norm(3)]])
@bm.functional
def transpose_1():
return torch.transpose(norm_array(), 0, 1)
@bm.functional
def transpose_2():
return torch.transpose(norm_array(), 1, 0)
@bm.functional
def transpose_3():
return norm_array().transpose(0, 1)
# Fails due to invalid dimensions
@bm.functional
def unsupported_transpose_1():
return torch.transpose(norm_array(), 3, 2)
# Fails due to invalid dimension
@bm.functional
def unsupported_transpose_2():
return norm_array().transpose(3, 1)
# Fails due to invalid (non-int) dimension
@bm.functional
def unsupported_transpose_3():
return norm_array().transpose(3.2, 1)
@bm.functional
def scalar_transpose():
return torch.transpose(norm(0), 0, 1)
@bm.functional
def scalar_transpose_2():
return torch.transpose(tensor([norm(0)]), 0, 1)
class TransposeTest(unittest.TestCase):
dot_from_normal = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label=Sample];
N06[label=Sample];
N07[label=2];
N08[label=ToMatrix];
N09[label=Transpose];
N10[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N02 -> N04;
N02 -> N05;
N02 -> N06;
N03 -> N08;
N04 -> N08;
N05 -> N08;
N06 -> N08;
N07 -> N08;
N07 -> N08;
N08 -> N09;
N09 -> N10;
}
""".strip()
def test_transpose_1(self) -> None:
queries = [transpose_1()]
dot = BMGInference().to_dot(queries, {})
self.assertEqual(dot.strip(), self.dot_from_normal)
def test_transpose_2(self) -> None:
queries = [transpose_2()]
dot = BMGInference().to_dot(queries, {})
self.assertEqual(dot.strip(), self.dot_from_normal)
def test_transpose_3(self) -> None:
queries = [transpose_3()]
dot = BMGInference().to_dot(queries, {})
self.assertEqual(dot.strip(), self.dot_from_normal)
def test_unsupported_transpose_1(self) -> None:
with self.assertRaises(ValueError) as ex:
BMGInference().infer([unsupported_transpose_1()], {}, 1)
expected = """
Unsupported dimension arguments for transpose: 3 and 2
"""
self.assertEqual(expected.strip(), str(ex.exception).strip())
def test_unsupported_transpose_2(self) -> None:
with self.assertRaises(ValueError) as ex:
BMGInference().infer([unsupported_transpose_2()], {}, 1)
expected = """
Unsupported dimension arguments for transpose: 3 and 1
"""
self.assertEqual(expected.strip(), str(ex.exception).strip())
def test_unsupported_transpose_3(self) -> None:
with self.assertRaises(ValueError) as ex:
BMGInference().infer([unsupported_transpose_3()], {}, 1)
expected = """
Unsupported dimension arguments for transpose: 3.2 and 1
"""
self.assertEqual(expected.strip(), str(ex.exception).strip())
def test_scalar_transpose(self) -> None:
queries = [scalar_transpose()]
dot = BMGInference().to_dot(queries, {})
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N4;
}
"""
self.assertEqual(dot.strip(), expected.strip())
def test_1x1_transpose(self) -> None:
queries = [scalar_transpose_2()]
dot = BMGInference().to_dot(queries, {})
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N4;
}
"""
self.assertEqual(dot.strip(), expected.strip())
| beanmachine-main | tests/ppl/compiler/transpose_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Test performance of multiary addition optimization """
import platform
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference import BMGInference
from torch.distributions import Normal
@bm.random_variable
def norm(x):
return Normal(0.0, 1.0)
@bm.functional
def sum_1(counter):
sum = 0.0
for i in range(counter):
sum = sum + norm(i)
return sum
@bm.functional
def sum_2():
return sum_1(100) + sum_1(50)
def get_report(skip_optimizations):
observations = {}
queries = [sum_2()]
number_samples = 1000
_, perf_report = BMGInference()._infer(
queries, observations, number_samples, skip_optimizations=skip_optimizations
)
return perf_report
class BinaryVsMultiaryAdditionPerformanceTest(unittest.TestCase):
def test_perf_num_nodes_edges(self) -> None:
"""
Test to check if Multiary addition optimization reduces the
number of nodes and number of edges using the performance
report returned by BMGInference.
"""
if platform.system() == "Windows":
self.skipTest("Disabling *_perf_test.py until flakiness is resolved")
self.maxDiff = None
skip_optimizations = {
"beta_bernoulli_conjugate_fixer",
"beta_binomial_conjugate_fixer",
"normal_normal_conjugate_fixer",
}
report_w_optimization = get_report(skip_optimizations)
self.assertEqual(report_w_optimization.node_count, 105)
self.assertEqual(report_w_optimization.edge_count, 204)
skip_optimizations = {
"multiary_addition_fixer",
"beta_bernoulli_conjugate_fixer",
"beta_binomial_conjugate_fixer",
"normal_normal_conjugate_fixer",
}
report_wo_optimization = get_report(skip_optimizations)
self.assertEqual(report_wo_optimization.node_count, 203)
self.assertEqual(report_wo_optimization.edge_count, 302)
| beanmachine-main | tests/ppl/compiler/binary_vs_multiary_addition_perf_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl.compiler.bmg_nodes as bn
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.bmg_node_types import is_supported_by_bmg
from beanmachine.ppl.compiler.typer_base import TyperBase
# To test out the typer base class, here is a very simple typer: it assigns
# the "type" True to a node if that node and *all* of its ancestors are supported
# node types for BMG, and False otherwise.
#
# The intention here is to demonstrate that the typer behaves as expected as we
# modify the graph and update the typer.
class SupportedTyper(TyperBase[bool]):
def __init__(self) -> None:
TyperBase.__init__(self)
def _compute_type_inputs_known(self, node: bn.BMGNode) -> bool:
return (isinstance(node, bn.ConstantNode) or is_supported_by_bmg(node)) and all(
self[i] for i in node.inputs
)
class TyperTest(unittest.TestCase):
def test_typer(self) -> None:
self.maxDiff = None
# We start with this graph:
#
# 0 1
# | |
# NORM
# |
# ~ 2
# | |
# DIV 3
# | |
# ADD
# | |
# EXP NEG
#
# The DIV node is not supported in BMG.
bmg = BMGraphBuilder()
c0 = bmg.add_constant(0.0)
c1 = bmg.add_constant(1.0)
c2 = bmg.add_constant(2.0)
c3 = bmg.add_constant(3.0)
norm = bmg.add_normal(c0, c1)
ns = bmg.add_sample(norm)
d = bmg.add_division(ns, c2)
a = bmg.add_addition(d, c3)
e = bmg.add_exp(a)
neg = bmg.add_negate(a)
typer = SupportedTyper()
# When we ask the typer for a judgment of a node, we should get judgments
# of all of its ancestor nodes as well, but we skip computing types of
# non-ancestors:
self.assertTrue(typer[ns]) # Just type the sample and its ancestors.
self.assertTrue(norm in typer)
self.assertTrue(c0 in typer)
self.assertTrue(c1 in typer)
self.assertFalse(d in typer)
self.assertFalse(a in typer)
self.assertFalse(c2 in typer)
self.assertFalse(c3 in typer)
self.assertFalse(e in typer)
self.assertFalse(neg in typer)
# If we then type the exp, all of its ancestors become typed.
# Division is not supported in BMG, so the division is marked
# as not supported. The division is an ancestor of the addition
# and exp, so they are typed as False also.
self.assertFalse(typer[e])
# The ancestors of the exp are now all typed.
self.assertTrue(a in typer)
self.assertTrue(d in typer)
self.assertTrue(c3 in typer)
self.assertTrue(c2 in typer)
# But the negate is still not typed.
self.assertFalse(neg in typer)
# The types of the division, addition and exp are False:
self.assertFalse(typer[d])
self.assertFalse(typer[a])
self.assertFalse(typer[e])
self.assertTrue(typer[c2])
self.assertTrue(typer[c3])
# Now let's mutate the graph by adding some new nodes...
c4 = bmg.add_constant(0.5)
m = bmg.add_multiplication(ns, c4)
# ... and mutating the addition:
a.inputs[0] = m
# The graph now looks like this:
#
# 0 1
# | |
# NORM
# |
# ~ 2
# | | |
# | DIV
# |
# | 0.5
# | |
# MUL 3
# | |
# ADD
# | |
# EXP NEG
#
# But we have not yet informed the typer that there was an update.
self.assertFalse(typer[a])
typer.update_type(a)
self.assertTrue(typer[a])
# This should trigger typing on the untyped ancestors of
# the addition:
self.assertTrue(m in typer)
self.assertTrue(c4 in typer)
# It should NOT trigger typing the NEG. We have yet to ask for the type of
# that branch, so we do not spent time propagating type information down
# the NEG branch.
self.assertFalse(neg in typer)
# The multiplication and exp should now all be marked as supported also.
self.assertTrue(typer[m])
self.assertTrue(typer[e])
| beanmachine-main | tests/ppl/compiler/typer_base_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import operator
import unittest
import beanmachine.ppl as bm
import torch
from beanmachine.ppl.inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli, Binomial, Normal
m1 = tensor([[12.0, 13.0], [14.0, 15.0]])
m2 = tensor([[22.0, 23.0], [24.0, 25.0]])
@bm.random_variable
def norm_1():
return Normal(0.0, 1.0)
@bm.functional
def norm():
return torch.tensor([[1.0, 0.0], [0.0, norm_1()]])
@bm.functional
def mm():
# Use both the instance and static forms.
return torch.mm(m1.mm(norm()), m2)
@bm.functional
def matmul():
return torch.matmul(m1.matmul(norm()), m2)
@bm.functional
def infix():
return m1 @ norm() @ m2
@bm.functional
def op_matmul():
return operator.matmul(operator.matmul(m1, norm()), m2)
# Matrix multiplication of single-valued tensors is turned into ordinary multiplication.
@bm.random_variable
def trivial_norm_matrix():
return Normal(torch.tensor([0.0]), torch.tensor([1.0]))
@bm.functional
def trivial():
return trivial_norm_matrix() @ trivial_norm_matrix()
@bm.functional
def matmul_bad_dimensions():
n = norm() # 2x2
m = torch.eye(3) # 3x3
return n @ m
@bm.random_variable
def bern():
return Bernoulli(0.5)
@bm.random_variable
def bino():
return Binomial(5, 0.5)
@bm.functional
def bool_times_nat():
b1 = torch.tensor([[0, 1], [0, 1]])
b2 = torch.tensor([[0, bern()], [bern(), 1]])
n1 = torch.tensor([[1, bino()], [bino(), 2]])
n2 = torch.tensor([[2, 3], [4, 5]])
return b1 @ b2 @ n1 @ n2
class MatMulTest(unittest.TestCase):
def test_matrix_multiplication(self) -> None:
self.maxDiff = None
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label="[[12.0,13.0],\\\\n[14.0,15.0]]"];
N05[label=2];
N06[label=1.0];
N07[label=ToMatrix];
N08[label="@"];
N09[label="[[22.0,23.0],\\\\n[24.0,25.0]]"];
N10[label="@"];
N11[label=Query];
N00 -> N02;
N00 -> N07;
N00 -> N07;
N01 -> N02;
N02 -> N03;
N03 -> N07;
N04 -> N08;
N05 -> N07;
N05 -> N07;
N06 -> N07;
N07 -> N08;
N08 -> N10;
N09 -> N10;
N10 -> N11;
}"""
observed = BMGInference().to_dot([mm()], {})
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([matmul()], {})
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([infix()], {})
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([op_matmul()], {})
self.assertEqual(expected.strip(), observed.strip())
expected_trivial = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label="*"];
N5[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N3 -> N4;
N4 -> N5;
}"""
observed = BMGInference().to_dot([trivial()], {})
self.assertEqual(expected_trivial.strip(), observed.strip())
with self.assertRaises(ValueError) as ex:
BMGInference().to_dot([matmul_bad_dimensions()], {})
expected = """
The model uses a matrix multiplication (@) operation unsupported by Bean Machine Graph.
The dimensions of the operands are 2x2 and 3x3.
The unsupported node was created in function call matmul_bad_dimensions()."""
self.assertEqual(expected.strip(), str(ex.exception).strip())
expected = """
digraph "graph" {
N00[label=0.5];
N01[label=Bernoulli];
N02[label=Sample];
N03[label=5];
N04[label=Binomial];
N05[label=Sample];
N06[label="[[0,1],\\\\n[0,1]]"];
N07[label=2];
N08[label=False];
N09[label=True];
N10[label=ToMatrix];
N11[label=ToRealMatrix];
N12[label="@"];
N13[label=1];
N14[label=ToMatrix];
N15[label=ToRealMatrix];
N16[label="@"];
N17[label="[[2,3],\\\\n[4,5]]"];
N18[label="@"];
N19[label=Query];
N00 -> N01;
N00 -> N04;
N01 -> N02;
N02 -> N10;
N02 -> N10;
N03 -> N04;
N04 -> N05;
N05 -> N14;
N05 -> N14;
N06 -> N12;
N07 -> N10;
N07 -> N10;
N07 -> N14;
N07 -> N14;
N07 -> N14;
N08 -> N10;
N09 -> N10;
N10 -> N11;
N11 -> N12;
N12 -> N16;
N13 -> N14;
N14 -> N15;
N15 -> N16;
N16 -> N18;
N17 -> N18;
N18 -> N19;
}
"""
observed = BMGInference().to_dot([bool_times_nat()], {})
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/matrix_multiplication_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Compare original and conjugate prior transformed
Beta-Bernoulli model with operations on Bernoulli samples"""
import unittest
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
from .testlib.conjugate_models import BetaBernoulliOpsModel
class BetaBernoulliWithOpsConjugateTest(unittest.TestCase):
def test_conjugate_graph(self) -> None:
self.maxDiff = None
model = BetaBernoulliOpsModel(2.0, 2.0)
queries = [model.theta(), model.sum_y()]
observations = {
model.y(0): tensor(0.0),
model.y(1): tensor(0.0),
model.y(2): tensor(1.0),
model.y(3): tensor(0.0),
}
num_samples = 1000
bmg = BMGInference()
# This is the model after beta-bernoulli conjugate rewrite is done
skip_optimizations = set()
observed_bmg = bmg.to_dot(
queries, observations, num_samples, skip_optimizations=skip_optimizations
)
expected_bmg = """
digraph "graph" {
N00[label=3.0];
N01[label=5.0];
N02[label=Beta];
N03[label=Sample];
N04[label=Bernoulli];
N05[label=Sample];
N06[label=Sample];
N07[label=Sample];
N08[label=Sample];
N09[label=Query];
N10[label=Sample];
N11[label=ToPosReal];
N12[label=ToPosReal];
N13[label=ToPosReal];
N14[label=ToPosReal];
N15[label=ToPosReal];
N16[label="+"];
N17[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N03 -> N04;
N03 -> N09;
N04 -> N05;
N04 -> N06;
N04 -> N07;
N04 -> N08;
N04 -> N10;
N05 -> N11;
N06 -> N12;
N07 -> N13;
N08 -> N14;
N10 -> N15;
N11 -> N16;
N12 -> N16;
N13 -> N16;
N14 -> N16;
N15 -> N16;
N16 -> N17;
}
"""
self.assertEqual(expected_bmg.strip(), observed_bmg.strip())
| beanmachine-main | tests/ppl/compiler/fix_beta_bernoulli_ops_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for gen_builder.py"""
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.compiler.gen_builder import generate_builder
from beanmachine.ppl.compiler.runtime import BMGRuntime
from torch.distributions import Normal
@bm.random_variable
def norm(x):
return Normal(0.0, 1.0)
@bm.functional
def norm_sum():
return norm(1) + norm(2) + norm(3) + norm(4)
class GenerateBuilderTest(unittest.TestCase):
def test_generate_builder_1(self) -> None:
self.maxDiff = None
bmg = BMGRuntime().accumulate_graph([norm_sum()], {})
observed = generate_builder(bmg)
expected = """
import beanmachine.ppl.compiler.bmg_nodes as bn
import torch
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from torch import tensor
bmg = BMGraphBuilder()
n0 = bmg.add_node(bn.UntypedConstantNode(tensor(0.)))
n1 = bmg.add_node(bn.UntypedConstantNode(tensor(1.)))
n2 = bmg.add_node(bn.NormalNode(n0, n1))
n3 = bmg.add_node(bn.SampleNode(n2))
n4 = bmg.add_node(bn.SampleNode(n2))
n5 = bmg.add_node(bn.AdditionNode(n3, n4))
n6 = bmg.add_node(bn.SampleNode(n2))
n7 = bmg.add_node(bn.AdditionNode(n5, n6))
n8 = bmg.add_node(bn.SampleNode(n2))
n9 = bmg.add_node(bn.AdditionNode(n7, n8))
n10 = bmg.add_node(bn.Query(n9))"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/gen_builder_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Compare original and conjugate prior transformed
Beta-Binomial model"""
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import log, tensor
from torch.distributions import Binomial, Normal
@bm.random_variable
def binomial(x):
return Binomial(100, logits=log(tensor([0.25]))) # equivalent probability: 0.2
@bm.random_variable
def normal(x):
return Normal(0.0, 1.0)
@bm.random_variable
def binomial_normal_logit():
return Binomial(100, logits=tensor([normal(0)]))
@bm.functional
def add():
return binomial(0) + binomial(1)
class BinomialLogitTest(unittest.TestCase):
def test_constant_binomial_logit_graph(self) -> None:
observations = {}
queries_observed = [add()]
graph_observed = BMGInference().to_dot(queries_observed, observations)
graph_expected = """
digraph "graph" {
N0[label=100];
N1[label=0.20000000298023224];
N2[label=Binomial];
N3[label=Sample];
N4[label=Sample];
N5[label=ToPosReal];
N6[label=ToPosReal];
N7[label="+"];
N8[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N2 -> N4;
N3 -> N5;
N4 -> N6;
N5 -> N7;
N6 -> N7;
N7 -> N8;
}
"""
self.assertEqual(graph_observed.strip(), graph_expected.strip())
def test_binomial_normal_logit_graph(self) -> None:
observations = {}
queries_observed = [binomial_normal_logit()]
graph_observed = BMGInference().to_dot(queries_observed, observations)
graph_expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=100];
N5[label=Logistic];
N6[label=Binomial];
N7[label=Sample];
N8[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N5;
N4 -> N6;
N5 -> N6;
N6 -> N7;
N7 -> N8;
}
"""
self.assertEqual(graph_observed.strip(), graph_expected.strip())
| beanmachine-main | tests/ppl/compiler/fix_binomial_logit_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli, Beta, Normal
@bm.random_variable
def beta():
return Beta(2.0, 2.0)
@bm.random_variable
def flip(n):
return Bernoulli(beta())
@bm.random_variable
def flip_2(n):
return Bernoulli(beta() * 0.5)
@bm.random_variable
def normal(n):
return Normal(flip_2(n), 1.0)
class CoinFlipTest(unittest.TestCase):
def test_gen_bm_python_simple(self) -> None:
self.maxDiff = None
queries = [beta()]
observations = {
flip(0): tensor(0.0),
flip(1): tensor(0.0),
flip(2): tensor(1.0),
flip(3): tensor(0.0),
}
observed = BMGInference().to_bm_python(queries, observations)
expected = """
import beanmachine.ppl as bm
import torch
v0 = 2.0
@bm.random_variable
def rv0():
\treturn torch.distributions.Beta(v0, v0)
v1 = rv0()
@bm.random_variable
def rv1(i):
\treturn torch.distributions.Bernoulli(v1.wrapper(*v1.arguments))
v2 = rv1(1)
v3 = rv1(2)
v4 = rv1(3)
v5 = rv1(4)
queries = [v1]
observations = {v2 : torch.tensor(0.0),v3 : torch.tensor(0.0),v4 : torch.tensor(1.0),v5 : torch.tensor(0.0)}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_gen_bm_python_rv_operations(self) -> None:
self.maxDiff = None
queries = [beta(), normal(0), normal(1)]
observations = {
flip_2(0): tensor(0.0),
}
observed = BMGInference().to_bm_python(queries, observations)
expected = """
import beanmachine.ppl as bm
import torch
v0 = 2.0
@bm.random_variable
def rv0():
\treturn torch.distributions.Beta(v0, v0)
v1 = rv0()
v2 = 0.5
@bm.functional
def f3():
\treturn torch.multiply(v1.wrapper(*v1.arguments), v2)
@bm.random_variable
def rv1(i):
\treturn torch.distributions.Bernoulli(f3())
v4 = rv1(1)
@bm.functional
def f5():
\treturn (v4.wrapper(*v4.arguments))
v6 = 1.0
@bm.random_variable
def rv2():
\treturn torch.distributions.Normal(f5(), v6)
v7 = rv2()
v8 = rv1(2)
@bm.functional
def f9():
\treturn (v8.wrapper(*v8.arguments))
@bm.random_variable
def rv3():
\treturn torch.distributions.Normal(f9(), v6)
v10 = rv3()
queries = [v1,v7,v10]
observations = {v4 : torch.tensor(0.0)}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/gen_bm_python_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Compare original and conjugate prior transformed
Beta-Bernoulli model with a hyperparameter given
by calling a non-random_variable function."""
import unittest
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
from .testlib.conjugate_models import BetaBernoulliScaleHyperParameters
class BetaBernoulliWithScaledHPConjugateTest(unittest.TestCase):
def test_beta_bernoulli_conjugate_graph(self) -> None:
model = BetaBernoulliScaleHyperParameters(0.5, 1.5)
queries = [model.theta()]
observations = {
model.y(0): tensor(0.0),
model.y(1): tensor(0.0),
model.y(2): tensor(1.0),
model.y(3): tensor(0.0),
}
num_samples = 1000
bmg = BMGInference()
# This is the model after beta-bernoulli conjugate rewrite is done
skip_optimizations = set()
observed_bmg = bmg.to_dot(
queries, observations, num_samples, skip_optimizations=skip_optimizations
)
expected_bmg = """
digraph "graph" {
N0[label=1.5];
N1[label=6.5];
N2[label=Beta];
N3[label=Sample];
N4[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N4;
}
"""
self.assertEqual(expected_bmg.strip(), observed_bmg.strip())
| beanmachine-main | tests/ppl/compiler/fix_beta_bernoulli_const_added_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference import BMGInference
from torch.distributions import Normal
@bm.random_variable
def norm(x):
return Normal(0.0, 1.0)
@bm.functional
def prod_1(counter):
prod = 0.0
for i in range(counter):
prod = prod * norm(i)
return prod
@bm.functional
def prod_2():
return prod_1(10)
class ZeroQueryTypeCheckingBug(unittest.TestCase):
def test_query_type_zero(self) -> None:
"""
Query of a variable of type Zero produces a type checking error.
"""
self.maxDiff = None
# TODO: One of the design principles of BMG is to allow
# TODO: for any query, even if you ask it to query constants.
# TODO: A potential solution could be to add a warning system so that
# TODO: the model's developer becomes aware of the possible error
with self.assertRaises(AssertionError) as ex:
BMGInference().infer([prod_2()], {}, 1)
expected = ""
observed = str(ex.exception)
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/query_type_zero_bug_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.gen_bmg_cpp import to_bmg_cpp
from beanmachine.ppl.compiler.gen_bmg_graph import to_bmg_graph
from beanmachine.ppl.compiler.gen_bmg_python import to_bmg_python
from beanmachine.ppl.compiler.gen_dot import to_dot
def tidy(s: str) -> str:
return "\n".join(c.strip() for c in s.strip().split("\n")).strip()
class BMGFactorTest(unittest.TestCase):
def test_bmg_factor(self) -> None:
bmg = BMGraphBuilder()
pos1 = bmg.add_pos_real(2.0)
real1 = bmg.add_real(3.0)
prob1 = bmg.add_probability(0.4)
dist1 = bmg.add_normal(real1, pos1)
x = bmg.add_sample(dist1)
x_sq = bmg.add_multiplication(x, x)
bmg.add_exp_product(x, prob1, x_sq)
bmg.add_observation(x, 7.0)
observed = to_dot(bmg, label_edges=False)
expected = """
digraph "graph" {
N0[label=3.0];
N1[label=2.0];
N2[label=Normal];
N3[label=Sample];
N4[label=0.4];
N5[label="*"];
N6[label=ExpProduct];
N7[label="Observation 7.0"];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N5;
N3 -> N5;
N3 -> N6;
N3 -> N7;
N4 -> N6;
N5 -> N6;
}
"""
self.maxDiff = None
self.assertEqual(expected.strip(), observed.strip())
observed = to_bmg_graph(bmg).graph.to_string()
expected = """
0: CONSTANT(real 3) (out nodes: 2)
1: CONSTANT(positive real 2) (out nodes: 2)
2: NORMAL(0, 1) (out nodes: 3)
3: SAMPLE(2) (out nodes: 5, 5, 6) observed to be real 7
4: CONSTANT(probability 0.4) (out nodes: 6)
5: MULTIPLY(3, 3) (out nodes: 6)
6: EXP_PRODUCT(3, 4, 5) (out nodes: ) observed to be unknown
"""
self.assertEqual(tidy(expected), tidy(observed))
observed = to_bmg_python(bmg).code
expected = """
from beanmachine import graph
from torch import tensor
g = graph.Graph()
n0 = g.add_constant_real(3.0)
n1 = g.add_constant_pos_real(2.0)
n2 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n0, n1],
)
n3 = g.add_operator(graph.OperatorType.SAMPLE, [n2])
n4 = g.add_constant_probability(0.4)
n5 = g.add_operator(graph.OperatorType.MULTIPLY, [n3, n3])
n6 = g.add_factor(
graph.FactorType.EXP_PRODUCT,
[n3, n4, n5],
)
g.observe(n3, 7.0)
"""
self.assertEqual(expected.strip(), observed.strip())
observed = to_bmg_cpp(bmg).code
expected = """
graph::Graph g;
uint n0 = g.add_constant_real(3.0);
uint n1 = g.add_constant_pos_real(2.0);
uint n2 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n0, n1}));
uint n3 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n2}));
uint n4 = g.add_constant_probability(0.4);
uint n5 = g.add_operator(
graph::OperatorType::MULTIPLY, std::vector<uint>({n3, n3}));
uint n6 = g.add_factor(
graph::FactorType::EXP_PRODUCT,
std::vector<uint>({n3, n4, n5}));
g.observe(n3, 7.0);
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/bmg_factor_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli
# Bean Machine allows queries on functionals that return constants;
# BMG does not. It would be nice though if a BM model that queried
# a constant worked when using BMGInference the same way that it
# does with other inference engines, for several reasons:
#
# (1) consistency of behaviour across inference engines
# (2) testing optimizations; if an optimization ends up producing
# a constant, it's nice to be able to query that functional
# and see that it does indeed produce a constant.
# (3) possible future error reporting; it would be nice to warn the
# user that they are querying a constant because this could be
# a bug in their model.
# (4) model development and debugging; a user might make a dummy functional
# that just returns a constant now, intending to replace it with an
# actual function later. Or might force a functional to produce a
# particular value to see how the model behaves in that case.
#
# This test verifies that we can query a constant functional.
@bm.functional
def c():
return tensor(2.5)
@bm.functional
def c2():
return tensor([1.5, -2.5])
# Two RVIDs but they both refer to the same query node:
@bm.random_variable
def flip():
return Bernoulli(0.5)
@bm.functional
def flip2():
return flip()
@bm.functional
def flip3():
return flip() + 0
@bm.functional
def flip4():
return 0 + flip()
# Here's a weird case. Normally query nodes are deduplicated but it is
# possible to end up with two distinct query nodes both referring to the
# same constant because of an optimization.
@bm.functional
def always_false_1():
return 1 < flip()
@bm.functional
def always_false_2():
# Boolean comparision optimizer turns both of these into False,
# even though the queries were originally on different expressions
# and therefore were different nodes.
return flip() < 0
# BMG supports constant single values or tensors, but the tensors must
# be 1 or 2 dimensional; empty tensors and 3+ dimensional tensors
# need to produce an error.
@bm.functional
def invalid_tensor_1():
return tensor([])
@bm.functional
def invalid_tensor_2():
return tensor([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
class BMGQueryTest(unittest.TestCase):
def test_constant_functional(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot([c(), c2()], {})
expected = """
digraph "graph" {
N0[label=2.5];
N1[label=Query];
N2[label="[1.5,-2.5]"];
N3[label=Query];
N0 -> N1;
N2 -> N3;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_cpp([c(), c2()], {})
# TODO: Is this valid C++? The API for adding constants
# has changed but the code generator has not kept up.
# Check if this is wrong and fix it.
expected = """
graph::Graph g;
Eigen::MatrixXd m0(1, 1);
m0 << 2.5;
uint n0 = g.add_constant_real_matrix(m0);
uint q0 = g.query(n0);
Eigen::MatrixXd m1(2, 1);
m1 << 1.5, -2.5;
uint n1 = g.add_constant_real_matrix(m1);
uint q1 = g.query(n1);
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_python([c(), c2()], {})
expected = """
from beanmachine import graph
from torch import tensor
g = graph.Graph()
n0 = g.add_constant_real(2.5)
q0 = g.query(n0)
n1 = g.add_constant_real_matrix(tensor([[1.5],[-2.5]]))
q1 = g.query(n1)
"""
self.assertEqual(expected.strip(), observed.strip())
samples = BMGInference().infer([c(), c2()], {}, 1, 1)
observed = samples[c()]
expected = "tensor([[2.5000]])"
self.assertEqual(expected.strip(), str(observed).strip())
observed = samples[c2()]
expected = "tensor([[[ 1.5000, -2.5000]]], dtype=torch.float64)"
self.assertEqual(expected.strip(), str(observed).strip())
def test_redundant_functionals(self) -> None:
self.maxDiff = None
# We see from the graph that we have two distinct RVIDs but they
# both refer to the same query. We need to make sure that BMG
# inference works, and that we get the dictionary out that we expect.
observed = BMGInference().to_dot([flip(), flip2()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=Query];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N2 -> N4;
}
"""
self.assertEqual(expected.strip(), str(observed).strip())
samples = BMGInference().infer([flip(), flip2()], {}, 10, 1)
f = samples[flip()]
f2 = samples[flip2()]
self.assertEqual(str(f), str(f2))
# A strange case: two queries on the same constant.
observed = BMGInference().to_dot([always_false_1(), always_false_2()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=False];
N4[label=Query];
N5[label=Query];
N0 -> N1;
N1 -> N2;
N3 -> N4;
N3 -> N5;
}
"""
self.assertEqual(expected.strip(), str(observed).strip())
samples = BMGInference().infer([always_false_1(), always_false_2()], {}, 2, 1)
af1 = samples[always_false_1()]
af2 = samples[always_false_2()]
expected = "tensor([[False, False]])"
self.assertEqual(expected, str(af1))
self.assertEqual(expected, str(af2))
def test_redundant_functionals_2(self) -> None:
self.maxDiff = None
# Here's a particularly weird one: we have what is initially two
# distinct queries: flip() + 0 and 0 + flip(), but the graph optimizer
# deduces that both queries refer to the same non-constant node.
observed = BMGInference().to_dot([flip3(), flip4()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=Query];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N2 -> N4;
}
"""
self.assertEqual(expected.strip(), str(observed).strip())
samples = BMGInference().infer([flip3(), flip4()], {}, 10, 1)
f3 = samples[flip3()]
f4 = samples[flip4()]
self.assertEqual(str(f3), str(f4))
def test_invalid_tensors(self) -> None:
self.maxDiff = None
with self.assertRaises(ValueError) as ex:
BMGInference().to_dot([invalid_tensor_1(), invalid_tensor_2()], {})
# TODO: This error message is horrid. Fix it.
expected = (
"The model uses a tensor "
+ "operation unsupported by Bean Machine Graph.\n"
+ "The unsupported node is the operator of a query.\n"
+ "The model uses a tensor operation unsupported by Bean Machine Graph.\n"
+ "The unsupported node is the operator of a query."
)
self.assertEqual(expected, str(ex.exception))
| beanmachine-main | tests/ppl/compiler/bmg_query_test.py |
beanmachine-main | tests/ppl/compiler/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for bmg_nodes.py"""
import unittest
import torch
from beanmachine.ppl.compiler.bmg_nodes import (
ConstantRealMatrixNode,
ConstantTensorNode,
MatrixMultiplicationNode,
MultiplicationNode,
NormalNode,
RealNode,
)
from beanmachine.ppl.compiler.sizer import Sizer
from beanmachine.ppl.compiler.support import ComputeSupport
def support(n):
return str(ComputeSupport()[n])
def size(n):
return Sizer()[n]
class BMGNodesTest(unittest.TestCase):
def test_RealNode(self) -> None:
r42 = RealNode(42.0)
self.assertEqual(r42.value, 42.0)
self.assertEqual(size(r42), torch.Size([]))
# Note that support always returns a set of tensors, even though this
# node is technically scalar valued. In practice we never need to compute
# the support of a RealNode, so fixing this minor oddity is unnecessary.
self.assertEqual(support(r42), "tensor(42.)")
def test_MultiplicationNode(self) -> None:
r2 = RealNode(2.0)
r3 = RealNode(3.0)
rx = MultiplicationNode([r2, r3])
self.assertEqual(size(rx), torch.Size([]))
self.assertEqual(support(rx), "tensor(6.)")
def test_ConstantTensorNode_1d(self) -> None:
v42 = torch.tensor([42, 43])
t42 = ConstantTensorNode(v42)
self.assertEqual(t42.value[0], v42[0])
self.assertEqual(t42.value[1], v42[1])
self.assertEqual(v42.size(), torch.Size([2]))
self.assertEqual(size(t42), v42.size())
self.assertEqual(support(t42), "tensor([42, 43])")
def test_ConstantTensorNode_2d(self) -> None:
v42 = torch.tensor([[42, 43], [44, 45]])
t42 = ConstantTensorNode(v42)
self.assertEqual(t42.value[0, 0], v42[0, 0])
self.assertEqual(t42.value[1, 0], v42[1, 0])
self.assertEqual(v42.size(), torch.Size([2, 2]))
self.assertEqual(size(t42), v42.size())
expected = """
tensor([[42, 43],
[44, 45]])"""
self.assertEqual(support(t42).strip(), expected.strip())
def test_ConstantRealMatrixNode_2d(self) -> None:
v42 = torch.tensor([[42, 43], [44, 45]])
t42 = ConstantRealMatrixNode(v42)
self.assertEqual(t42.value[0, 0], v42[0, 0])
self.assertEqual(t42.value[1, 0], v42[1, 0])
self.assertEqual(v42.size(), torch.Size([2, 2]))
self.assertEqual(size(t42), v42.size())
expected = """
tensor([[42, 43],
[44, 45]])"""
self.assertEqual(support(t42).strip(), expected.strip())
def test_MatrixMultiplicationNode(self) -> None:
v42 = torch.tensor([[42, 43], [44, 45]])
mv = torch.mm(v42, v42)
t42 = ConstantRealMatrixNode(v42)
mt = MatrixMultiplicationNode(t42, t42)
self.assertEqual(v42.size(), torch.Size([2, 2]))
self.assertEqual(size(mt), mv.size())
expected = """
tensor([[3656, 3741],
[3828, 3917]])
"""
self.assertEqual(support(mt).strip(), expected.strip())
def test_inputs_and_outputs(self) -> None:
# We must maintain the invariant that the output set and the
# input set of every node are consistent even when the graph
# is edited.
r1 = RealNode(1.0)
self.assertEqual(len(r1.outputs.items), 0)
n = NormalNode(r1, r1)
# r1 has two outputs, both equal to n
self.assertEqual(r1.outputs.items[n], 2)
r2 = RealNode(2.0)
n.inputs[0] = r2
# r1 and r2 now each have one output
self.assertEqual(r1.outputs.items[n], 1)
self.assertEqual(r2.outputs.items[n], 1)
| beanmachine-main | tests/ppl/compiler/bmg_nodes_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.gen_bmg_cpp import to_bmg_cpp
from beanmachine.ppl.compiler.gen_bmg_graph import to_bmg_graph
from beanmachine.ppl.compiler.gen_bmg_python import to_bmg_python
from beanmachine.ppl.compiler.gen_dot import to_dot
from beanmachine.ppl.compiler.runtime import BMGRuntime
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from torch import tensor
from torch.distributions import Normal
def _rv_id() -> RVIdentifier:
return RVIdentifier(lambda a, b: a, (1, 1))
@bm.random_variable
def norm():
return Normal(tensor(0.0), tensor(1.0))
@bm.functional
def f1by2():
# a 1x2 tensor in Python becomes a 2x1 matrix in BMG
return tensor([norm().exp(), norm()])
@bm.functional
def f2by1():
# a 2x1 tensor in Python becomes a 1x2 matrix in BMG
return tensor([[norm().exp()], [norm()]])
@bm.functional
def f2by3():
# a 2x3 tensor in Python becomes a 3x2 matrix in BMG
return tensor([[norm().exp(), 10, 20], [norm(), 30, 40]])
@bm.functional
def f1by2by3():
# A 1x2x3 tensor in Python is an error in BMG.
return tensor([[[norm().exp(), 10, 20], [norm(), 30, 40]]])
class ToMatrixTest(unittest.TestCase):
def test_to_matrix_1by2(self) -> None:
self.maxDiff = None
bmg = BMGRuntime().accumulate_graph([f1by2()], {})
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
after_transform=True,
label_edges=True,
)
expected = """
digraph "graph" {
N0[label="0.0:R"];
N1[label="1.0:R+"];
N2[label="Normal:R"];
N3[label="Sample:R"];
N4[label="2:N"];
N5[label="1:N"];
N6[label="Exp:R+"];
N7[label="ToReal:R"];
N8[label="ToMatrix:MR[2,1]"];
N9[label="Query:MR[2,1]"];
N0 -> N2[label="mu:R"];
N1 -> N2[label="sigma:R+"];
N2 -> N3[label="operand:R"];
N3 -> N6[label="operand:R"];
N3 -> N8[label="1:R"];
N4 -> N8[label="rows:N"];
N5 -> N8[label="columns:N"];
N6 -> N7[label="operand:<=R"];
N7 -> N8[label="0:R"];
N8 -> N9[label="operator:any"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = to_bmg_cpp(bmg).code
expected = """
graph::Graph g;
uint n0 = g.add_constant_real(0.0);
uint n1 = g.add_constant_pos_real(1.0);
uint n2 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n0, n1}));
uint n3 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n2}));
uint n4 = g.add_constant_natural(2);
uint n5 = g.add_constant_natural(1);
uint n6 = g.add_operator(
graph::OperatorType::EXP, std::vector<uint>({n3}));
uint n7 = g.add_operator(
graph::OperatorType::TO_REAL, std::vector<uint>({n6}));
uint n8 = g.add_operator(
graph::OperatorType::TO_MATRIX,
std::vector<uint>({n4, n5, n7, n3}));
uint q0 = g.query(n8);
"""
self.assertEqual(expected.strip(), observed.strip())
observed = to_bmg_python(bmg).code
expected = """
from beanmachine import graph
from torch import tensor
g = graph.Graph()
n0 = g.add_constant_real(0.0)
n1 = g.add_constant_pos_real(1.0)
n2 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n0, n1],
)
n3 = g.add_operator(graph.OperatorType.SAMPLE, [n2])
n4 = g.add_constant_natural(2)
n5 = g.add_constant_natural(1)
n6 = g.add_operator(graph.OperatorType.EXP, [n3])
n7 = g.add_operator(graph.OperatorType.TO_REAL, [n6])
n8 = g.add_operator(
graph.OperatorType.TO_MATRIX,
[n4, n5, n7, n3],
)
q0 = g.query(n8)
"""
self.assertEqual(expected.strip(), observed.strip())
observed = to_bmg_graph(bmg).graph.to_dot()
expected = """
digraph "graph" {
N0[label="0"];
N1[label="1"];
N2[label="Normal"];
N3[label="~"];
N4[label="2"];
N5[label="1"];
N6[label="exp"];
N7[label="ToReal"];
N8[label="ToMatrix"];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N6;
N3 -> N8;
N4 -> N8;
N5 -> N8;
N6 -> N7;
N7 -> N8;
Q0[label="Query"];
N8 -> Q0;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_to_matrix_2by1(self) -> None:
self.maxDiff = None
bmg = BMGRuntime().accumulate_graph([f2by1()], {})
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
after_transform=True,
label_edges=True,
)
expected = """
digraph "graph" {
N0[label="0.0:R"];
N1[label="1.0:R+"];
N2[label="Normal:R"];
N3[label="Sample:R"];
N4[label="1:N"];
N5[label="2:N"];
N6[label="Exp:R+"];
N7[label="ToReal:R"];
N8[label="ToMatrix:MR[1,2]"];
N9[label="Query:MR[1,2]"];
N0 -> N2[label="mu:R"];
N1 -> N2[label="sigma:R+"];
N2 -> N3[label="operand:R"];
N3 -> N6[label="operand:R"];
N3 -> N8[label="1:R"];
N4 -> N8[label="rows:N"];
N5 -> N8[label="columns:N"];
N6 -> N7[label="operand:<=R"];
N7 -> N8[label="0:R"];
N8 -> N9[label="operator:any"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = to_bmg_cpp(bmg).code
expected = """
graph::Graph g;
uint n0 = g.add_constant_real(0.0);
uint n1 = g.add_constant_pos_real(1.0);
uint n2 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n0, n1}));
uint n3 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n2}));
uint n4 = g.add_constant_natural(1);
uint n5 = g.add_constant_natural(2);
uint n6 = g.add_operator(
graph::OperatorType::EXP, std::vector<uint>({n3}));
uint n7 = g.add_operator(
graph::OperatorType::TO_REAL, std::vector<uint>({n6}));
uint n8 = g.add_operator(
graph::OperatorType::TO_MATRIX,
std::vector<uint>({n4, n5, n7, n3}));
uint q0 = g.query(n8);
"""
self.assertEqual(expected.strip(), observed.strip())
observed = to_bmg_python(bmg).code
expected = """
from beanmachine import graph
from torch import tensor
g = graph.Graph()
n0 = g.add_constant_real(0.0)
n1 = g.add_constant_pos_real(1.0)
n2 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n0, n1],
)
n3 = g.add_operator(graph.OperatorType.SAMPLE, [n2])
n4 = g.add_constant_natural(1)
n5 = g.add_constant_natural(2)
n6 = g.add_operator(graph.OperatorType.EXP, [n3])
n7 = g.add_operator(graph.OperatorType.TO_REAL, [n6])
n8 = g.add_operator(
graph.OperatorType.TO_MATRIX,
[n4, n5, n7, n3],
)
q0 = g.query(n8)
"""
self.assertEqual(expected.strip(), observed.strip())
observed = to_bmg_graph(bmg).graph.to_dot()
expected = """
digraph "graph" {
N0[label="0"];
N1[label="1"];
N2[label="Normal"];
N3[label="~"];
N4[label="1"];
N5[label="2"];
N6[label="exp"];
N7[label="ToReal"];
N8[label="ToMatrix"];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N6;
N3 -> N8;
N4 -> N8;
N5 -> N8;
N6 -> N7;
N7 -> N8;
Q0[label="Query"];
N8 -> Q0;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_to_matrix_2by3(self) -> None:
self.maxDiff = None
bmg = BMGRuntime().accumulate_graph([f2by3()], {})
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
after_transform=True,
label_edges=True,
)
expected = """
digraph "graph" {
N00[label="0.0:R"];
N01[label="1.0:R+"];
N02[label="Normal:R"];
N03[label="Sample:R"];
N04[label="3:N"];
N05[label="2:N"];
N06[label="Exp:R+"];
N07[label="ToReal:R"];
N08[label="10.0:R"];
N09[label="20.0:R"];
N10[label="30.0:R"];
N11[label="40.0:R"];
N12[label="ToMatrix:MR[3,2]"];
N13[label="Query:MR[3,2]"];
N00 -> N02[label="mu:R"];
N01 -> N02[label="sigma:R+"];
N02 -> N03[label="operand:R"];
N03 -> N06[label="operand:R"];
N03 -> N12[label="3:R"];
N04 -> N12[label="rows:N"];
N05 -> N12[label="columns:N"];
N06 -> N07[label="operand:<=R"];
N07 -> N12[label="0:R"];
N08 -> N12[label="1:R"];
N09 -> N12[label="2:R"];
N10 -> N12[label="4:R"];
N11 -> N12[label="5:R"];
N12 -> N13[label="operator:any"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_to_matrix_2(self) -> None:
# Test TO_MATRIX, TO_REAL_MATRIX, TO_POS_REAL_MATRIX and
# TO_NEG_REAL_MATRIX. The first composes a matrix from elements;
# the latter convert a matrix of one type (probability in this case)
# to a matrix of another type.
#
# Notice that we do not explicitly insert a ToRealMatrix
# node here; the problem fixer detects that we have a 2x1
# probability matrix from the column index but the
# LogSumExpVector needs a real, positive real or negative
# real matrix, and inserts a ToRealMatrix node on that edge.
self.maxDiff = None
bmg = BMGraphBuilder()
zero = bmg.add_constant(0)
one = bmg.add_constant(1)
two = bmg.add_natural(2)
three = bmg.add_constant(3)
neg_three = bmg.add_neg_real(-3.0)
beta = bmg.add_beta(three, three)
b0 = bmg.add_sample(beta)
b1 = bmg.add_sample(beta)
b2 = bmg.add_sample(beta)
b3 = bmg.add_sample(beta)
pm = bmg.add_to_matrix(two, two, b0, b1, b2, b3)
nm = bmg.add_to_matrix(two, two, neg_three, neg_three, neg_three, neg_three)
c0 = bmg.add_column_index(pm, zero)
c1 = bmg.add_column_index(pm, one)
nc0 = bmg.add_column_index(nm, zero)
tpr = bmg.add_to_positive_real_matrix(c1)
tnr = bmg.add_to_negative_real_matrix(nc0)
lse0 = bmg.add_logsumexp_vector(c0)
lse1 = bmg.add_logsumexp_vector(tpr)
lse2 = bmg.add_logsumexp_vector(tnr)
bmg.add_query(lse0, _rv_id())
bmg.add_query(lse1, _rv_id())
bmg.add_query(lse2, _rv_id())
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
after_transform=True,
label_edges=True,
)
expected = """
digraph "graph" {
N00[label="3.0:R+"];
N01[label="Beta:P"];
N02[label="Sample:P"];
N03[label="Sample:P"];
N04[label="Sample:P"];
N05[label="Sample:P"];
N06[label="2:N"];
N07[label="ToMatrix:MP[2,2]"];
N08[label="0:N"];
N09[label="ColumnIndex:MP[2,1]"];
N10[label="ToRealMatrix:MR[2,1]"];
N11[label="LogSumExp:R"];
N12[label="Query:R"];
N13[label="1:N"];
N14[label="ColumnIndex:MP[2,1]"];
N15[label="ToPosRealMatrix:MR+[2,1]"];
N16[label="LogSumExp:R"];
N17[label="Query:R"];
N18[label="-3.0:R-"];
N19[label="ToMatrix:MR-[2,2]"];
N20[label="ColumnIndex:MR-[2,1]"];
N21[label="ToNegRealMatrix:MR-[2,1]"];
N22[label="LogSumExp:R"];
N23[label="Query:R"];
N00 -> N01[label="alpha:R+"];
N00 -> N01[label="beta:R+"];
N01 -> N02[label="operand:P"];
N01 -> N03[label="operand:P"];
N01 -> N04[label="operand:P"];
N01 -> N05[label="operand:P"];
N02 -> N07[label="0:P"];
N03 -> N07[label="1:P"];
N04 -> N07[label="2:P"];
N05 -> N07[label="3:P"];
N06 -> N07[label="columns:N"];
N06 -> N07[label="rows:N"];
N06 -> N19[label="columns:N"];
N06 -> N19[label="rows:N"];
N07 -> N09[label="left:MP[2,2]"];
N07 -> N14[label="left:MP[2,2]"];
N08 -> N09[label="right:N"];
N08 -> N20[label="right:N"];
N09 -> N10[label="operand:any"];
N10 -> N11[label="operand:MR[2,1]"];
N11 -> N12[label="operator:any"];
N13 -> N14[label="right:N"];
N14 -> N15[label="operand:any"];
N15 -> N16[label="operand:MR+[2,1]"];
N16 -> N17[label="operator:any"];
N18 -> N19[label="0:R-"];
N18 -> N19[label="1:R-"];
N18 -> N19[label="2:R-"];
N18 -> N19[label="3:R-"];
N19 -> N20[label="left:MR-[2,2]"];
N20 -> N21[label="UNKNOWN:any"];
N21 -> N22[label="operand:MR-[2,1]"];
N22 -> N23[label="operator:any"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = to_bmg_cpp(bmg).code
expected = """
graph::Graph g;
uint n0 = g.add_constant_pos_real(3.0);
uint n1 = g.add_distribution(
graph::DistributionType::BETA,
graph::AtomicType::PROBABILITY,
std::vector<uint>({n0, n0}));
uint n2 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n3 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n4 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n5 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n6 = g.add_constant_natural(2);
uint n7 = g.add_operator(
graph::OperatorType::TO_MATRIX,
std::vector<uint>({n6, n6, n2, n3, n4, n5}));
uint n8 = g.add_constant_natural(0);
uint n9 = g.add_operator(
graph::OperatorType::COLUMN_INDEX, std::vector<uint>({n7, n8}));
uint n10 = g.add_operator(
graph::OperatorType::TO_REAL_MATRIX, std::vector<uint>({n9}));
uint n11 = g.add_operator(
graph::OperatorType::LOGSUMEXP_VECTOR, std::vector<uint>({n10}));
uint q0 = g.query(n11);
uint n12 = g.add_constant_natural(1);
uint n13 = g.add_operator(
graph::OperatorType::COLUMN_INDEX, std::vector<uint>({n7, n12}));
uint n14 = g.add_operator(
graph::OperatorType::TO_POS_REAL_MATRIX, std::vector<uint>({n13}));
uint n15 = g.add_operator(
graph::OperatorType::LOGSUMEXP_VECTOR, std::vector<uint>({n14}));
uint q1 = g.query(n15);
uint n16 = g.add_constant_neg_real(-3.0);
uint n17 = g.add_operator(
graph::OperatorType::TO_MATRIX,
std::vector<uint>({n6, n6, n16, n16, n16, n16}));
uint n18 = g.add_operator(
graph::OperatorType::COLUMN_INDEX, std::vector<uint>({n17, n8}));
uint n19 = g.add_operator(
graph::OperatorType::TO_NEG_REAL_MATRIX, std::vector<uint>({n18}));
uint n20 = g.add_operator(
graph::OperatorType::LOGSUMEXP_VECTOR, std::vector<uint>({n19}));
uint q2 = g.query(n20);
"""
self.assertEqual(expected.strip(), observed.strip())
observed = to_bmg_graph(bmg).graph.to_dot()
expected = """
digraph "graph" {
N0[label="3"];
N1[label="Beta"];
N2[label="~"];
N3[label="~"];
N4[label="~"];
N5[label="~"];
N6[label="2"];
N7[label="ToMatrix"];
N8[label="0"];
N9[label="ColumnIndex"];
N10[label="ToReal"];
N11[label="LogSumExp"];
N12[label="1"];
N13[label="ColumnIndex"];
N14[label="ToPosReal"];
N15[label="LogSumExp"];
N16[label="-3"];
N17[label="ToMatrix"];
N18[label="ColumnIndex"];
N19[label="ToNegReal"];
N20[label="LogSumExp"];
N0 -> N1;
N0 -> N1;
N1 -> N2;
N1 -> N3;
N1 -> N4;
N1 -> N5;
N2 -> N7;
N3 -> N7;
N4 -> N7;
N5 -> N7;
N6 -> N7;
N6 -> N7;
N6 -> N17;
N6 -> N17;
N7 -> N9;
N7 -> N13;
N8 -> N9;
N8 -> N18;
N9 -> N10;
N10 -> N11;
N12 -> N13;
N13 -> N14;
N14 -> N15;
N16 -> N17;
N16 -> N17;
N16 -> N17;
N16 -> N17;
N17 -> N18;
N18 -> N19;
N19 -> N20;
Q0[label="Query"];
N11 -> Q0;
Q1[label="Query"];
N15 -> Q1;
Q2[label="Query"];
N20 -> Q2;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_to_matrix_1by2by3(self) -> None:
self.maxDiff = None
bmg = BMGRuntime().accumulate_graph([f1by2by3()], {})
# TODO: Error message could be more specific here than "a tensor".
# We could say what is wrong: its size.
expected = """
The model uses a tensor operation unsupported by Bean Machine Graph.
The unsupported node was created in function call f1by2by3()."""
with self.assertRaises(ValueError) as ex:
to_dot(
bmg,
node_types=True,
edge_requirements=True,
after_transform=True,
label_edges=True,
)
self.assertEqual(expected.strip(), str(ex.exception).strip())
| beanmachine-main | tests/ppl/compiler/to_matrix_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""End-to-end test for tutorial on GMM with Poisson number of components"""
# This file is a manual replica of the Bento tutorial with the same name
# TODO: The disabled test generates the following error:
# E TypeError: Distribution 'Poisson' is not supported by Bean Machine Graph.
# This will need to be fixed for OSS readiness task
import logging
import unittest
# Comments after imports suggest alternative comment style (for original tutorial)
import beanmachine.ppl as bm
import torch # from torch import manual_seed, tensor
import torch.distributions as dist # from torch.distributions import Bernoulli, Normal, Uniform
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
# This makes the results deterministic and reproducible.
logging.getLogger("beanmachine").setLevel(50)
torch.manual_seed(42)
# Model
class GaussianMixtureModel(object):
@bm.random_variable
def mu(self, c):
return dist.Normal(0.0, 10.0)
@bm.random_variable
def sigma(self, c):
return dist.Gamma(1, 1)
@bm.random_variable
def component(self, i):
return dist.Bernoulli(probs=0.5)
@bm.random_variable
def y(self, i):
c = self.component(i)
return dist.Normal(self.mu(c), self.sigma(c))
# Creating sample data
n = 12 # num observations
k = 2 # true number of clusters
gmm = GaussianMixtureModel()
ground_truth = {
**{gmm.mu(i): tensor(i % 2).float() for i in range(k)},
**{gmm.sigma(i): tensor(0.1) for i in range(k)},
**{gmm.component(i): tensor(i % k).float() for i in range(n)},
}
# [Visualization code in tutorial skipped]
# Inference parameters
num_samples = (
1 ###00 Sample size should not affect (the ability to find) compilation issues.
)
queries = (
[gmm.component(j) for j in range(n)]
+ [gmm.mu(i) for i in range(k)]
+ [gmm.sigma(i) for i in range(k)]
)
observations = {
gmm.y(i): ground_truth[gmm.mu(ground_truth[gmm.component(i)].item())]
for i in range(n)
}
class tutorialGMM1Dimension2Components(unittest.TestCase):
def test_tutorial_GMM_1_dimension_2_components(self) -> None:
"""Check BM and BMG inference both terminate"""
self.maxDiff = None
# Inference with BM
torch.manual_seed(
42
) # Note: Second time we seed. Could be a good tutorial style
mh = bm.CompositionalInference()
mh.infer(
queries,
observations,
num_samples=num_samples,
num_chains=1,
)
bmg = BMGInference()
bmg.infer(
queries=queries,
observations=observations,
num_samples=num_samples,
num_chains=1,
)
self.assertTrue(True, msg="We just want to check this point is reached")
def test_tutorial_GMM_1_dimension_2_components_to_dot_cpp_python(
self,
) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(queries, observations)
expected = """digraph "graph" {
N00[label=0.5];
N01[label=Bernoulli];
N02[label=Sample];
N03[label=0.0];
N04[label=10.0];
N05[label=Normal];
N06[label=Sample];
N07[label=Sample];
N08[label=1.0];
N09[label=Gamma];
N10[label=Sample];
N11[label=Sample];
N12[label=if];
N13[label=if];
N14[label=Normal];
N15[label=Sample];
N16[label="Observation 0.0"];
N17[label=Sample];
N18[label=if];
N19[label=if];
N20[label=Normal];
N21[label=Sample];
N22[label="Observation 1.0"];
N23[label=Sample];
N24[label=if];
N25[label=if];
N26[label=Normal];
N27[label=Sample];
N28[label="Observation 0.0"];
N29[label=Sample];
N30[label=if];
N31[label=if];
N32[label=Normal];
N33[label=Sample];
N34[label="Observation 1.0"];
N35[label=Sample];
N36[label=if];
N37[label=if];
N38[label=Normal];
N39[label=Sample];
N40[label="Observation 0.0"];
N41[label=Sample];
N42[label=if];
N43[label=if];
N44[label=Normal];
N45[label=Sample];
N46[label="Observation 1.0"];
N47[label=Sample];
N48[label=if];
N49[label=if];
N50[label=Normal];
N51[label=Sample];
N52[label="Observation 0.0"];
N53[label=Sample];
N54[label=if];
N55[label=if];
N56[label=Normal];
N57[label=Sample];
N58[label="Observation 1.0"];
N59[label=Sample];
N60[label=if];
N61[label=if];
N62[label=Normal];
N63[label=Sample];
N64[label="Observation 0.0"];
N65[label=Sample];
N66[label=if];
N67[label=if];
N68[label=Normal];
N69[label=Sample];
N70[label="Observation 1.0"];
N71[label=Sample];
N72[label=if];
N73[label=if];
N74[label=Normal];
N75[label=Sample];
N76[label="Observation 0.0"];
N77[label=Sample];
N78[label=if];
N79[label=if];
N80[label=Normal];
N81[label=Sample];
N82[label="Observation 1.0"];
N83[label=Query];
N84[label=Query];
N85[label=Query];
N86[label=Query];
N87[label=Query];
N88[label=Query];
N89[label=Query];
N90[label=Query];
N91[label=Query];
N92[label=Query];
N93[label=Query];
N94[label=Query];
N95[label=Query];
N96[label=Query];
N97[label=Query];
N98[label=Query];
N00 -> N01;
N01 -> N02;
N01 -> N17;
N01 -> N23;
N01 -> N29;
N01 -> N35;
N01 -> N41;
N01 -> N47;
N01 -> N53;
N01 -> N59;
N01 -> N65;
N01 -> N71;
N01 -> N77;
N02 -> N12;
N02 -> N13;
N02 -> N83;
N03 -> N05;
N04 -> N05;
N05 -> N06;
N05 -> N07;
N06 -> N12;
N06 -> N18;
N06 -> N24;
N06 -> N30;
N06 -> N36;
N06 -> N42;
N06 -> N48;
N06 -> N54;
N06 -> N60;
N06 -> N66;
N06 -> N72;
N06 -> N78;
N06 -> N95;
N07 -> N12;
N07 -> N18;
N07 -> N24;
N07 -> N30;
N07 -> N36;
N07 -> N42;
N07 -> N48;
N07 -> N54;
N07 -> N60;
N07 -> N66;
N07 -> N72;
N07 -> N78;
N07 -> N96;
N08 -> N09;
N08 -> N09;
N09 -> N10;
N09 -> N11;
N10 -> N13;
N10 -> N19;
N10 -> N25;
N10 -> N31;
N10 -> N37;
N10 -> N43;
N10 -> N49;
N10 -> N55;
N10 -> N61;
N10 -> N67;
N10 -> N73;
N10 -> N79;
N10 -> N97;
N11 -> N13;
N11 -> N19;
N11 -> N25;
N11 -> N31;
N11 -> N37;
N11 -> N43;
N11 -> N49;
N11 -> N55;
N11 -> N61;
N11 -> N67;
N11 -> N73;
N11 -> N79;
N11 -> N98;
N12 -> N14;
N13 -> N14;
N14 -> N15;
N15 -> N16;
N17 -> N18;
N17 -> N19;
N17 -> N84;
N18 -> N20;
N19 -> N20;
N20 -> N21;
N21 -> N22;
N23 -> N24;
N23 -> N25;
N23 -> N85;
N24 -> N26;
N25 -> N26;
N26 -> N27;
N27 -> N28;
N29 -> N30;
N29 -> N31;
N29 -> N86;
N30 -> N32;
N31 -> N32;
N32 -> N33;
N33 -> N34;
N35 -> N36;
N35 -> N37;
N35 -> N87;
N36 -> N38;
N37 -> N38;
N38 -> N39;
N39 -> N40;
N41 -> N42;
N41 -> N43;
N41 -> N88;
N42 -> N44;
N43 -> N44;
N44 -> N45;
N45 -> N46;
N47 -> N48;
N47 -> N49;
N47 -> N89;
N48 -> N50;
N49 -> N50;
N50 -> N51;
N51 -> N52;
N53 -> N54;
N53 -> N55;
N53 -> N90;
N54 -> N56;
N55 -> N56;
N56 -> N57;
N57 -> N58;
N59 -> N60;
N59 -> N61;
N59 -> N91;
N60 -> N62;
N61 -> N62;
N62 -> N63;
N63 -> N64;
N65 -> N66;
N65 -> N67;
N65 -> N92;
N66 -> N68;
N67 -> N68;
N68 -> N69;
N69 -> N70;
N71 -> N72;
N71 -> N73;
N71 -> N93;
N72 -> N74;
N73 -> N74;
N74 -> N75;
N75 -> N76;
N77 -> N78;
N77 -> N79;
N77 -> N94;
N78 -> N80;
N79 -> N80;
N80 -> N81;
N81 -> N82;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_cpp(queries, observations)
expected = """graph::Graph g;
uint n0 = g.add_constant_probability(0.5);
uint n1 = g.add_distribution(
graph::DistributionType::BERNOULLI,
graph::AtomicType::BOOLEAN,
std::vector<uint>({n0}));
uint n2 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n3 = g.add_constant_real(0.0);
uint n4 = g.add_constant_pos_real(10.0);
uint n5 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n3, n4}));
uint n6 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n5}));
uint n7 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n5}));
uint n8 = g.add_constant_pos_real(1.0);
uint n9 = g.add_distribution(
graph::DistributionType::GAMMA,
graph::AtomicType::POS_REAL,
std::vector<uint>({n8, n8}));
uint n10 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n9}));
uint n11 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n9}));
uint n12 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n2, n7, n6}));
uint n13 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n2, n11, n10}));
uint n14 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n12, n13}));
uint n15 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n14}));
g.observe(n15, 0.0);
uint n16 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n17 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n16, n7, n6}));
uint n18 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n16, n11, n10}));
uint n19 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n17, n18}));
uint n20 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n19}));
g.observe(n20, 1.0);
uint n21 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n22 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n21, n7, n6}));
uint n23 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n21, n11, n10}));
uint n24 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n22, n23}));
uint n25 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n24}));
g.observe(n25, 0.0);
uint n26 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n27 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n26, n7, n6}));
uint n28 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n26, n11, n10}));
uint n29 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n27, n28}));
uint n30 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n29}));
g.observe(n30, 1.0);
uint n31 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n32 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n31, n7, n6}));
uint n33 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n31, n11, n10}));
uint n34 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n32, n33}));
uint n35 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n34}));
g.observe(n35, 0.0);
uint n36 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n37 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n36, n7, n6}));
uint n38 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n36, n11, n10}));
uint n39 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n37, n38}));
uint n40 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n39}));
g.observe(n40, 1.0);
uint n41 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n42 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n41, n7, n6}));
uint n43 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n41, n11, n10}));
uint n44 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n42, n43}));
uint n45 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n44}));
g.observe(n45, 0.0);
uint n46 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n47 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n46, n7, n6}));
uint n48 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n46, n11, n10}));
uint n49 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n47, n48}));
uint n50 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n49}));
g.observe(n50, 1.0);
uint n51 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n52 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n51, n7, n6}));
uint n53 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n51, n11, n10}));
uint n54 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n52, n53}));
uint n55 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n54}));
g.observe(n55, 0.0);
uint n56 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n57 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n56, n7, n6}));
uint n58 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n56, n11, n10}));
uint n59 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n57, n58}));
uint n60 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n59}));
g.observe(n60, 1.0);
uint n61 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n62 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n61, n7, n6}));
uint n63 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n61, n11, n10}));
uint n64 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n62, n63}));
uint n65 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n64}));
g.observe(n65, 0.0);
uint n66 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n67 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n66, n7, n6}));
uint n68 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n66, n11, n10}));
uint n69 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n67, n68}));
uint n70 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n69}));
g.observe(n70, 1.0);
uint q0 = g.query(n2);
uint q1 = g.query(n16);
uint q2 = g.query(n21);
uint q3 = g.query(n26);
uint q4 = g.query(n31);
uint q5 = g.query(n36);
uint q6 = g.query(n41);
uint q7 = g.query(n46);
uint q8 = g.query(n51);
uint q9 = g.query(n56);
uint q10 = g.query(n61);
uint q11 = g.query(n66);
uint q12 = g.query(n6);
uint q13 = g.query(n7);
uint q14 = g.query(n10);
uint q15 = g.query(n11);
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_python(queries, observations)
expected = """
from beanmachine import graph
from torch import tensor
g = graph.Graph()
n0 = g.add_constant_probability(0.5)
n1 = g.add_distribution(
graph.DistributionType.BERNOULLI,
graph.AtomicType.BOOLEAN,
[n0],
)
n2 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
n3 = g.add_constant_real(0.0)
n4 = g.add_constant_pos_real(10.0)
n5 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n3, n4],
)
n6 = g.add_operator(graph.OperatorType.SAMPLE, [n5])
n7 = g.add_operator(graph.OperatorType.SAMPLE, [n5])
n8 = g.add_constant_pos_real(1.0)
n9 = g.add_distribution(
graph.DistributionType.GAMMA,
graph.AtomicType.POS_REAL,
[n8, n8],
)
n10 = g.add_operator(graph.OperatorType.SAMPLE, [n9])
n11 = g.add_operator(graph.OperatorType.SAMPLE, [n9])
n12 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n2, n7, n6],
)
n13 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n2, n11, n10],
)
n14 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n12, n13],
)
n15 = g.add_operator(graph.OperatorType.SAMPLE, [n14])
g.observe(n15, 0.0)
n16 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
n17 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n16, n7, n6],
)
n18 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n16, n11, n10],
)
n19 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n17, n18],
)
n20 = g.add_operator(graph.OperatorType.SAMPLE, [n19])
g.observe(n20, 1.0)
n21 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
n22 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n21, n7, n6],
)
n23 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n21, n11, n10],
)
n24 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n22, n23],
)
n25 = g.add_operator(graph.OperatorType.SAMPLE, [n24])
g.observe(n25, 0.0)
n26 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
n27 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n26, n7, n6],
)
n28 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n26, n11, n10],
)
n29 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n27, n28],
)
n30 = g.add_operator(graph.OperatorType.SAMPLE, [n29])
g.observe(n30, 1.0)
n31 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
n32 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n31, n7, n6],
)
n33 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n31, n11, n10],
)
n34 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n32, n33],
)
n35 = g.add_operator(graph.OperatorType.SAMPLE, [n34])
g.observe(n35, 0.0)
n36 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
n37 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n36, n7, n6],
)
n38 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n36, n11, n10],
)
n39 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n37, n38],
)
n40 = g.add_operator(graph.OperatorType.SAMPLE, [n39])
g.observe(n40, 1.0)
n41 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
n42 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n41, n7, n6],
)
n43 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n41, n11, n10],
)
n44 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n42, n43],
)
n45 = g.add_operator(graph.OperatorType.SAMPLE, [n44])
g.observe(n45, 0.0)
n46 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
n47 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n46, n7, n6],
)
n48 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n46, n11, n10],
)
n49 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n47, n48],
)
n50 = g.add_operator(graph.OperatorType.SAMPLE, [n49])
g.observe(n50, 1.0)
n51 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
n52 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n51, n7, n6],
)
n53 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n51, n11, n10],
)
n54 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n52, n53],
)
n55 = g.add_operator(graph.OperatorType.SAMPLE, [n54])
g.observe(n55, 0.0)
n56 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
n57 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n56, n7, n6],
)
n58 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n56, n11, n10],
)
n59 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n57, n58],
)
n60 = g.add_operator(graph.OperatorType.SAMPLE, [n59])
g.observe(n60, 1.0)
n61 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
n62 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n61, n7, n6],
)
n63 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n61, n11, n10],
)
n64 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n62, n63],
)
n65 = g.add_operator(graph.OperatorType.SAMPLE, [n64])
g.observe(n65, 0.0)
n66 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
n67 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n66, n7, n6],
)
n68 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n66, n11, n10],
)
n69 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n67, n68],
)
n70 = g.add_operator(graph.OperatorType.SAMPLE, [n69])
g.observe(n70, 1.0)
q0 = g.query(n2)
q1 = g.query(n16)
q2 = g.query(n21)
q3 = g.query(n26)
q4 = g.query(n31)
q5 = g.query(n36)
q6 = g.query(n41)
q7 = g.query(n46)
q8 = g.query(n51)
q9 = g.query(n56)
q10 = g.query(n61)
q11 = g.query(n66)
q12 = g.query(n6)
q13 = g.query(n7)
q14 = g.query(n10)
q15 = g.query(n11)
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/gmm_1d_2comp_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import graphviz
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import exp
from torch.distributions import Normal
@bm.random_variable
def X():
return Normal(0.0, 3.0)
@bm.random_variable
def Y():
return Normal(loc=0.0, scale=exp(X() * 0.5))
class NealsFunnelTest(unittest.TestCase):
def test_neals_funnel(self) -> None:
self.maxDiff = None
observations = {}
queries = [X(), Y()]
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=3.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Query];
N05[label=0.5];
N06[label="*"];
N07[label=Exp];
N08[label=Normal];
N09[label=Sample];
N10[label=Query];
N00 -> N02;
N00 -> N08;
N01 -> N02;
N02 -> N03;
N03 -> N04;
N03 -> N06;
N05 -> N06;
N06 -> N07;
N07 -> N08;
N08 -> N09;
N09 -> N10;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_to_graphviz_type(self) -> None:
self.maxDiff = None
observations = {}
queries = [X(), Y()]
observed = type(BMGInference().to_graphviz(queries, observations))
expected = graphviz.Source
self.assertEqual(expected, observed)
| beanmachine-main | tests/ppl/compiler/neals_funnel_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Comparison operators are not supported in BMG yet but we need to be able to detect
use of them and give an error. Here we verify that we can rewrite code containing
them correctly."""
import unittest
import astor
import beanmachine.ppl as bm
from beanmachine.ppl.compiler.bm_to_bmg import _bm_function_to_bmg_ast
from torch.distributions import Normal, StudentT
@bm.random_variable
def x():
return Normal(0.0, 1.0)
@bm.random_variable
def y():
z = 0.0 < x() < 2.0
return StudentT(3.0, z, 4.0)
class ComparisonRewritingTest(unittest.TestCase):
def test_comparison_rewriting_1(self) -> None:
self.maxDiff = None
# The key thing to note here is that we eliminate Python's weird
# comparison logic entirely; we reduce
#
# z = 0.0 < x() < 2.0
#
# to the equivalent of:
#
# tx = x()
# comp = 0.0 < tx
# if comp:
# z = tx < 2.0
# else:
# z = comp
#
# which has the same semantics but has only simple comparisons and
# simple control flows.
self.assertTrue(y.is_random_variable)
bmgast = _bm_function_to_bmg_ast(y().function, "y_helper")
observed = astor.to_source(bmgast)
expected = """
def y_helper(bmg):
import operator
def y():
a1 = 0.0
r6 = []
r10 = {}
a4 = bmg.handle_function(x, r6, r10)
a7 = bmg.handle_function(operator.lt, [a1, a4])
bmg.handle_if(a7)
if a7:
a8 = 2.0
z = bmg.handle_function(operator.lt, [a4, a8])
else:
z = a7
a16 = 3.0
a13 = [a16]
a17 = [z]
a12 = bmg.handle_function(operator.add, [a13, a17])
a18 = 4.0
a14 = [a18]
r11 = bmg.handle_function(operator.add, [a12, a14])
r15 = {}
r2 = bmg.handle_function(StudentT, r11, r15)
return r2
a3 = bmg.handle_dot_get(bm, 'random_variable')
r5 = [y]
r9 = {}
y = bmg.handle_function(a3, r5, r9)
return y
"""
self.assertEqual(observed.strip(), expected.strip())
| beanmachine-main | tests/ppl/compiler/comparison_rewriting_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
# The item() member function should be treated as an identity by the compiler
# for the purposes of graph generation.
import beanmachine.ppl as bm
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch.distributions import Bernoulli, Beta
@bm.random_variable
def beta():
return Beta(2.0, 2.0)
@bm.random_variable
def flip():
return Bernoulli(beta().item())
class ItemTest(unittest.TestCase):
def test_item_member_function(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot([flip()], {}, after_transform=False)
expected = """
digraph "graph" {
N0[label=2.0];
N1[label=Beta];
N2[label=Sample];
N3[label=Item];
N4[label=Bernoulli];
N5[label=Sample];
N6[label=Query];
N0 -> N1;
N0 -> N1;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([flip()], {}, after_transform=True)
expected = """
digraph "graph" {
N0[label=2.0];
N1[label=Beta];
N2[label=Sample];
N3[label=Bernoulli];
N4[label=Sample];
N5[label=Query];
N0 -> N1;
N0 -> N1;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N4 -> N5;
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/item_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from beanmachine.ppl.compiler.profiler import ProfilerData
class ProfilerTest(unittest.TestCase):
def test_profiler(self) -> None:
self.maxDiff = None
pd = ProfilerData()
pd.begin("A", 1000000000)
pd.begin("B", 1100000000)
pd.begin("C", 1200000000)
pd.finish("C", 1300000000)
pd.begin("C", 1400000000)
pd.finish("C", 1500000000)
pd.finish("B", 1600000000)
pd.finish("A", 1700000000)
pd.begin("D", 1800000000)
pd.finish("D", 1900000000)
expected = """
begin A 1000000000
begin B 1100000000
begin C 1200000000
finish C 1300000000
begin C 1400000000
finish C 1500000000
finish B 1600000000
finish A 1700000000
begin D 1800000000
finish D 1900000000"""
self.assertEqual(expected.strip(), str(pd).strip())
# B accounts for 500 ms of A; the two Cs account for 200 ms of B;
# the rest is unattributed
report = pd.to_report()
expected = """
A:(1) 700 ms
B:(1) 500 ms
C:(2) 200 ms
unattributed: 300 ms
unattributed: 200 ms
D:(1) 100 ms
Total time: 800 ms
"""
self.assertEqual(expected.strip(), str(report).strip())
self.assertEqual(700000000, report.A.total_time)
self.assertEqual(500000000, report.A.B.total_time)
self.assertEqual(200000000, report.A.B.C.total_time)
| beanmachine-main | tests/ppl/compiler/profiler_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch.distributions import Bernoulli
# The dependency graph here is x -> y -> z -> x
@bm.random_variable
def bad_cycle_1_x():
return Bernoulli(bad_cycle_1_y())
@bm.random_variable
def bad_cycle_1_y():
return Bernoulli(bad_cycle_1_z())
@bm.random_variable
def bad_cycle_1_z():
return Bernoulli(bad_cycle_1_x())
# The dependency graph here is z -> x(2) -> y(0) -> x(1) -> y(0)
@bm.random_variable
def bad_cycle_2_x(n):
return Bernoulli(bad_cycle_2_y(0))
@bm.random_variable
def bad_cycle_2_y(n):
return Bernoulli(bad_cycle_2_x(n + 1))
@bm.random_variable
def bad_cycle_2_z():
return Bernoulli(bad_cycle_2_x(2))
class CycleDetectorTest(unittest.TestCase):
def test_bad_cyclic_model_1(self) -> None:
with self.assertRaises(RecursionError):
BMGInference().infer([bad_cycle_1_x()], {}, 1)
def test_bad_cyclic_model_2(self) -> None:
with self.assertRaises(RecursionError):
BMGInference().infer([bad_cycle_2_z()], {}, 1)
| beanmachine-main | tests/ppl/compiler/cycle_detector_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
from beanmachine.ppl.inference import BMGInference
# https://github.com/facebookresearch/beanmachine/issues/1312
@bm.random_variable
def unif():
return dist.Uniform(0, 1)
@bm.random_variable
def beta():
return dist.Beta(unif() + 0.1, unif() + 0.1)
@bm.random_variable
def flip():
return dist.Bernoulli(1 - beta())
class BugRegressionTest(unittest.TestCase):
def test_regress_1312(self) -> None:
self.maxDiff = None
# There were two problems exposed by this user-supplied repro. Both are
# now fixed.
#
# The first was that a typo in the code which propagated type mutations
# through the graph during the problem-fixing phase was causing some types
# to not be updated correctly, which was then causing internal compiler
# errors down the line.
#
# The second was that due to the order in which the problem fixers ran,
# the 1-beta operation was generated as:
#
# ToProb(Add(1.0, ToReal(Negate(ToPosReal(Sample(Beta(...))))))
#
# Which is not wrong but is quite inefficient. It is now generated as
# you would expect:
#
# Complement(Sample(Beta(...)))
queries = [unif()]
observations = {flip(): torch.tensor(1)}
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N00[label=Flat];
N01[label=Sample];
N02[label=ToPosReal];
N03[label=0.1];
N04[label="+"];
N05[label=Beta];
N06[label=Sample];
N07[label=complement];
N08[label=Bernoulli];
N09[label=Sample];
N10[label="Observation True"];
N11[label=Query];
N00 -> N01;
N01 -> N02;
N01 -> N11;
N02 -> N04;
N03 -> N04;
N04 -> N05;
N04 -> N05;
N05 -> N06;
N06 -> N07;
N07 -> N08;
N08 -> N09;
N09 -> N10;
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/bug_regression_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# """End-to-end test for n-schools model based on the one in PPL Bench"""
# See for example https://github.com/facebookresearch/pplbench/blob/main/pplbench/models/n_schools.py
import logging
import unittest
from typing import Tuple
import beanmachine.ppl as bm
import numpy as np
import torch.distributions as dist
import xarray as xr
from beanmachine import graph
from beanmachine.ppl.inference.bmg_inference import BMGInference
from scipy.stats import norm
from torch import tensor
LOGGER = logging.getLogger(__name__)
# Planned additions:
# TODO: It would be great to have another example based on Sepehr's bma model here:
# https://www.internalfb.com/intern/diffusion/FBS/browsefile/master/fbcode/applications/fb/bma/bma_model.py
# TODO: It would be great to also have a test case based on the PPL bench version of n-schools that follows:
# Start n-schools model
"""
N Schools
This is a generalization of a classical 8 schools model to n schools.
The model posits that the effect of a school on a student's performance
can be explained by the a baseline effect of all schools plus an additive
effect of the state, the school district and the school type.
Hyper Parameters:
n - total number of schools
num_states - number of states
num_districts_per_state - number of school districts in each state
num_types - number of school types
scale_state - state effect scale
scale_district - district effect scale
scale_type - school type effect scale
Model:
beta_baseline = StudentT(dof_baseline, 0.0, scale_baseline)
sigma_state ~ HalfCauchy(0, scale_state)
sigma_district ~ HalfCauchy(0, scale_district)
sigma_type ~ HalfCauchy(0, scale_type)
for s in 0 .. num_states - 1
beta_state[s] ~ Normal(0, sigma_state)
for d in 0 .. num_districts_per_state - 1
beta_district[s, d] ~ Normal(0, sigma_district)
for t in 0 .. num_types - 1
beta_type[t] ~ Normal(0, sigma_type)
for i in 0 ... n - 1
Assume we are given state[i], district[i], type[i]
Y_hat[i] = beta_baseline + beta_state[state[i]]
+ beta_district[state[i], district[i]]
+ beta_type[type[i]]
sigma[i] ~ Uniform(0.5, 1.5)
Y[i] ~ Normal(Y_hat[i], sigma[i])
The dataset consists of the following
Y[school] - float
sigma[school] - float
and it includes the attributes
n - number of schools
num_states
num_districts_per_state
num_types
dof_baseline
scale_baseline
scale_state
scale_district
scale_type
state_idx[school] - 0 .. num_states - 1
district_idx[school] - 0 .. num_districts_per_state - 1
type_idx[school] - 0 .. num_types - 1
The posterior samples include the following,
sigma_state[draw] - float
sigma_district[draw] - float
sigma_type[draw] - float
beta_baseline[draw] - float
beta_state[draw, state] - float
beta_district[draw, state, district] - float
beta_type[draw, type] - float
"""
def generate_data( # type: ignore
seed: int,
n: int = 2000,
num_states: int = 8,
num_districts_per_state: int = 5,
num_types: int = 5,
dof_baseline: float = 3.0,
scale_baseline: float = 10.0,
scale_state: float = 1.0,
scale_district: float = 1.0,
scale_type: float = 1.0,
) -> Tuple[xr.Dataset, xr.Dataset]:
"""
See the class documentation for an explanation of the parameters.
:param seed: random number generator seed
"""
if n % 2 != 0:
LOGGER.warn(f"n should be a multiple of 2. Actual values = {n}")
# In this model we will generate exactly equal amounts of training
# and test data with the same number of training and test schools
# in each state, district, and type combination
n = n // 2
rng = np.random.default_rng(seed)
beta_baseline = rng.standard_t(dof_baseline) * scale_baseline
sigma_state = np.abs(rng.standard_cauchy()) * scale_state
sigma_district = np.abs(rng.standard_cauchy()) * scale_district
sigma_type = np.abs(rng.standard_cauchy()) * scale_type
beta_state = rng.normal(loc=0, scale=sigma_state, size=num_states)
beta_district = rng.normal(
loc=0, scale=sigma_district, size=(num_states, num_districts_per_state)
)
beta_type = rng.normal(loc=0, scale=sigma_type, size=num_types)
# we will randomly assign the schools to states, district, and types
state_idx = rng.integers(low=0, high=num_states, size=n)
district_idx = rng.integers(low=0, high=num_districts_per_state, size=n)
type_idx = rng.integers(low=0, high=num_types, size=n)
y_hat = (
beta_baseline
+ beta_state[state_idx]
+ beta_district[state_idx, district_idx]
+ beta_type[type_idx]
)
train_sigma = rng.uniform(0.5, 1.5, size=n)
train_y = rng.normal(loc=y_hat, scale=train_sigma)
test_sigma = rng.uniform(0.5, 1.5, size=n)
test_y = rng.normal(loc=y_hat, scale=test_sigma)
return tuple( # type: ignore
xr.Dataset(
{"Y": (["school"], y), "sigma": (["school"], sigma)},
coords={"school": np.arange(n)},
attrs={
"n": n,
"num_states": num_states,
"num_districts_per_state": num_districts_per_state,
"num_types": num_types,
"dof_baseline": dof_baseline,
"scale_baseline": scale_baseline,
"scale_state": scale_state,
"scale_district": scale_district,
"scale_type": scale_type,
"state_idx": state_idx,
"district_idx": district_idx,
"type_idx": type_idx,
},
)
for y, sigma in [(train_y, train_sigma), (test_y, test_sigma)]
)
def evaluate_posterior_predictive(samples: xr.Dataset, test: xr.Dataset) -> np.ndarray:
"""
Computes the predictive likelihood of all the test items w.r.t. each sample.
See the class documentation for the `samples` and `test` parameters.
:returns: a numpy array of the same size as the sample dimension.
"""
# transpose the datasets to be in a convenient format
samples = samples.transpose("draw", "state", "district", "type")
y_hat = (
samples.beta_baseline.values[:, np.newaxis]
+ samples.beta_state.values[:, test.attrs["state_idx"]]
+ samples.beta_district.values[
:, test.attrs["state_idx"], test.attrs["district_idx"]
]
+ samples.beta_type.values[:, test.attrs["type_idx"]]
) # size = (iterations, n_test)
loglike = norm.logpdf(
test.Y.values[np.newaxis, :],
loc=y_hat,
scale=test.sigma.values[np.newaxis, :],
) # size = (iterations, n_test)
return loglike.sum(axis=1) # size = (iterations,)
## OLD
# DATA
NUM_CLASS = 2 # num_classes (For Dirichlet may be need at least two)
# TODO: Clarify number of lablers is implicit
NUM_ITEMS = 1 # number of items
PREV_PRIOR = tensor([1.0, 0.0]) # prior on prevalence
# PREV_PRIOR is a list of length NUM_CLASSES
CONF_MATRIX_PRIOR = tensor([1.0, 0.0]) # prior on confusion matrix
# CONF_MATRIX_PRIOR is a list of length NUM_CLASS
# TODO: Does Dirichlet support 2d matrices?
# TODO: Is it really necessary to reject dirichlet on tensor([1])?
IDX_RATINGS = [[0]] # indexed ratings that labelers assigned to items
IDX_LABELERS = [[0]] # indexed list of labelers who labeled items
EXPERT_CONF_MATRIX = tensor(
[[0.99, 0.01], [0.01, 0.99]]
) # confusion matrix of an expert (if we have true ratings)
# EXPERT_CONF_MATRIX is of size NUM_CLASS x NUM_CLASS
# Row (first) index is true class, and column (second) index is observed
IDX_TRUE_RATINGS = [0]
# Of size NUM_ITEMS
# Represents true class of items by a perfect labler
# When information is missing, use value NUM_CLASS
# MODEL
@bm.random_variable
def prevalence():
# Dirichlet distribution support is implemented in Beanstalk but not yet landed.
return dist.Dirichlet(PREV_PRIOR)
@bm.random_variable
def confusion_matrix(labeler, true_class):
return dist.Dirichlet(CONF_MATRIX_PRIOR) # size: NUM_CLASSES
# log of the unnormalized item probs
# log P(true label of item i = k | labels)
# shape: [NUM_ITEMS, NUM_CLASSES]
@bm.functional
def log_item_prob(i, k):
# Indexing into a simplex with a constant is implemented
# but not yet landed
prob = prevalence()[k].log()
for r in range(len(IDX_RATINGS[i])):
label = IDX_RATINGS[i][r]
labeler = IDX_LABELERS[i][r]
prob = prob + confusion_matrix(labeler, k)[label].log()
if IDX_TRUE_RATINGS[i] != NUM_CLASS: # Value NUM_CLASS means missing value
prob = prob + EXPERT_CONF_MATRIX[k, IDX_TRUE_RATINGS[i]].log()
return prob
# log of joint prob of labels, prev, conf_matrix
@bm.random_variable
def target():
joint_log_prob = 0
for i in range(NUM_ITEMS):
# logsumexp on a newly-constructed tensor with stochastic
# elements has limited support but this should work:
log_probs = tensor(
# TODO: Hard-coded k in {0,1}
# [log_item_prob(i, 0), log_item_prob(i, 1), log_item_prob(i, 2)]
# [log_item_prob(i, 0), log_item_prob(i, 1)]
[log_item_prob(i, k) for k in range(NUM_CLASS)]
)
joint_log_prob = joint_log_prob + log_probs.logsumexp(0)
return dist.Bernoulli(joint_log_prob.exp())
observations = {target(): tensor(1.0)}
queries = [
log_item_prob(0, 0), # Ideally, all the other elements too
prevalence(),
confusion_matrix(0, 0), # Ideally, all the other elements too
]
ssrw = "SingleSiteRandomWalk"
bmgi = "BMG inference"
both = {ssrw, bmgi}
# TODO: Replace 4th param of expecteds by more methodical calculation
expecteds = [
(prevalence(), both, 0.5000, 0.001),
(confusion_matrix(0, 0), both, 0.5000, 0.001),
(log_item_prob(0, 0), {ssrw}, -1.3863, 0.5),
(log_item_prob(0, 0), {bmgi}, -1.0391, 0.5),
]
class NSchoolsTest(unittest.TestCase):
def test_eight_schools_e2e(self):
# see https://www.jstatsoft.org/article/view/v012i03/v12i03.pdf
# For each school, the average treatment effect and the standard deviation
DATA = [
(28.39, 14.9),
(7.94, 10.2),
(-2.75, 16.3),
(6.82, 11.0),
(-0.64, 9.4),
(0.63, 11.4),
(18.01, 10.4),
(12.16, 17.6),
]
# the expected mean and standard deviation of each random variable
EXPECTED = [
(11.1, 9.1),
(7.6, 6.6),
(5.7, 8.4),
(7.1, 7.0),
(5.1, 6.8),
(5.7, 7.3),
(10.4, 7.3),
(8.3, 8.4),
(7.6, 5.9), # overall mean
(6.7, 5.6), # overall std
]
g = graph.Graph()
zero = g.add_constant(0.0)
thousand = g.add_constant_pos_real(1000.0)
# overall_mean ~ Normal(0, 1000)
overall_mean_dist = g.add_distribution(
graph.DistributionType.NORMAL, graph.AtomicType.REAL, [zero, thousand]
)
overall_mean = g.add_operator(graph.OperatorType.SAMPLE, [overall_mean_dist])
# overall_std ~ HalfCauchy(1000)
# [note: the original paper had overall_std ~ Uniform(0, 1000)]
overall_std_dist = g.add_distribution(
graph.DistributionType.HALF_CAUCHY, graph.AtomicType.POS_REAL, [thousand]
)
overall_std = g.add_operator(graph.OperatorType.SAMPLE, [overall_std_dist])
# for each school we will add two random variables,
# but first we need to define a distribution
school_effect_dist = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[overall_mean, overall_std],
)
for treatment_mean_value, treatment_std_value in DATA:
# school_effect ~ Normal(overall_mean, overall_std)
school_effect = g.add_operator(
graph.OperatorType.SAMPLE, [school_effect_dist]
)
g.query(school_effect)
# treatment_mean ~ Normal(school_effect, treatment_std)
treatment_std = g.add_constant_pos_real(treatment_std_value)
treatment_mean_dist = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[school_effect, treatment_std],
)
treatment_mean = g.add_operator(
graph.OperatorType.SAMPLE, [treatment_mean_dist]
)
g.observe(treatment_mean, treatment_mean_value)
g.query(overall_mean)
g.query(overall_std)
observed = g.to_dot()
expected = """
digraph "graph" {
N0[label="0"];
N1[label="1000"];
N2[label="Normal"];
N3[label="~"];
N4[label="HalfCauchy"];
N5[label="~"];
N6[label="Normal"];
N7[label="~"];
N8[label="14.9"];
N9[label="Normal"];
N10[label="~"];
N11[label="~"];
N12[label="10.2"];
N13[label="Normal"];
N14[label="~"];
N15[label="~"];
N16[label="16.3"];
N17[label="Normal"];
N18[label="~"];
N19[label="~"];
N20[label="11"];
N21[label="Normal"];
N22[label="~"];
N23[label="~"];
N24[label="9.4"];
N25[label="Normal"];
N26[label="~"];
N27[label="~"];
N28[label="11.4"];
N29[label="Normal"];
N30[label="~"];
N31[label="~"];
N32[label="10.4"];
N33[label="Normal"];
N34[label="~"];
N35[label="~"];
N36[label="17.6"];
N37[label="Normal"];
N38[label="~"];
N0 -> N2;
N1 -> N2;
N1 -> N4;
N2 -> N3;
N3 -> N6;
N4 -> N5;
N5 -> N6;
N6 -> N7;
N6 -> N11;
N6 -> N15;
N6 -> N19;
N6 -> N23;
N6 -> N27;
N6 -> N31;
N6 -> N35;
N7 -> N9;
N8 -> N9;
N9 -> N10;
N11 -> N13;
N12 -> N13;
N13 -> N14;
N15 -> N17;
N16 -> N17;
N17 -> N18;
N19 -> N21;
N20 -> N21;
N21 -> N22;
N23 -> N25;
N24 -> N25;
N25 -> N26;
N27 -> N29;
N28 -> N29;
N29 -> N30;
N31 -> N33;
N32 -> N33;
N33 -> N34;
N35 -> N37;
N36 -> N37;
N37 -> N38;
O0[label="Observation"];
N10 -> O0;
O1[label="Observation"];
N14 -> O1;
O2[label="Observation"];
N18 -> O2;
O3[label="Observation"];
N22 -> O3;
O4[label="Observation"];
N26 -> O4;
O5[label="Observation"];
N30 -> O5;
O6[label="Observation"];
N34 -> O6;
O7[label="Observation"];
N38 -> O7;
Q0[label="Query"];
N7 -> Q0;
Q1[label="Query"];
N11 -> Q1;
Q2[label="Query"];
N15 -> Q2;
Q3[label="Query"];
N19 -> Q3;
Q4[label="Query"];
N23 -> Q4;
Q5[label="Query"];
N27 -> Q5;
Q6[label="Query"];
N31 -> Q6;
Q7[label="Query"];
N35 -> Q7;
Q8[label="Query"];
N3 -> Q8;
Q9[label="Query"];
N5 -> Q9;
}"""
self.assertTrue(expected, observed)
means = g.infer_mean(3000, graph.InferenceType.NMC)
for idx, (mean, std) in enumerate(EXPECTED):
self.assertTrue(
abs(means[idx] - mean) < std * 0.5,
f"index {idx} expected {mean} +- {std*0.5} actual {means[idx]}",
)
# TODO: The following tests should be turned into working tests focused on
# n-schools (rather than the CLARA examples they are templated on.)
def disabled_test_nschools_tensor_cgm_no_update_inference(self) -> None:
"""Check BM and BMG inference both terminate"""
self.maxDiff = None
num_samples = 10
# First, let's see how the model fairs with Random Walk inference
inference = bm.SingleSiteRandomWalk() # or NUTS
mcsamples = inference.infer(queries, observations, num_samples)
for rand_var, inferences, value, delta in expecteds:
if ssrw in inferences:
samples = mcsamples[rand_var]
observed = samples.mean()
expected = tensor([value])
self.assertAlmostEqual(first=observed, second=expected, delta=delta)
# Second, let's see how it fairs with the bmg inference
inference = BMGInference()
mcsamples = inference.infer(queries, observations, num_samples)
for rand_var, inferences, value, delta in expecteds:
if bmgi in inferences:
samples = mcsamples[rand_var]
observed = samples.mean()
expected = tensor([value])
self.assertAlmostEqual(first=observed, second=expected, delta=delta)
def disabled_test_nschools_tensor_cgm_no_update_to_dot_cpp_python(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N00[label="[1.0,0.0]"];
N01[label=Dirichlet];
N02[label=Sample];
N03[label=Sample];
N04[label=Sample];
N05[label=0];
N06[label=index];
N07[label=Log];
N08[label=index];
N09[label=Log];
N10[label="+"];
N11[label=-0.010050326585769653];
N12[label="+"];
N13[label=1];
N14[label=index];
N15[label=Log];
N16[label=index];
N17[label=Log];
N18[label="+"];
N19[label=-4.605170249938965];
N20[label="+"];
N21[label=LogSumExp];
N22[label=ToReal];
N23[label=Exp];
N24[label=ToProb];
N25[label=Bernoulli];
N26[label=Sample];
N27[label="Observation True"];
N28[label=Query];
N29[label=Query];
N30[label=Query];
N00 -> N01;
N01 -> N02;
N01 -> N03;
N01 -> N04;
N02 -> N06;
N02 -> N14;
N02 -> N29;
N03 -> N08;
N03 -> N30;
N04 -> N16;
N05 -> N06;
N05 -> N08;
N05 -> N16;
N06 -> N07;
N07 -> N10;
N08 -> N09;
N09 -> N10;
N10 -> N12;
N11 -> N12;
N12 -> N21;
N12 -> N28;
N13 -> N14;
N14 -> N15;
N15 -> N18;
N16 -> N17;
N17 -> N18;
N18 -> N20;
N19 -> N20;
N20 -> N21;
N21 -> N22;
N22 -> N23;
N23 -> N24;
N24 -> N25;
N25 -> N26;
N26 -> N27;
}
"""
self.assertEqual(observed.strip(), expected.strip())
observed = BMGInference().to_cpp(queries, observations)
expected = """
graph::Graph g;
Eigen::MatrixXd m0(2, 1);
m0 << 1.0, 0.0;
uint n0 = g.add_constant_pos_matrix(m0);
uint n1 = g.add_distribution(
graph::DistributionType::DIRICHLET,
graph::ValueType(
graph::VariableType::COL_SIMPLEX_MATRIX,
graph::AtomicType::PROBABILITY,
2,
1
)
std::vector<uint>({n0}));
uint n2 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n3 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n4 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n5 = g.add_constant(0);
uint n6 = g.add_operator(
graph::OperatorType::INDEX, std::vector<uint>({n2, n5}));
uint n7 = g.add_operator(
graph::OperatorType::LOG, std::vector<uint>({n6}));
uint n8 = g.add_operator(
graph::OperatorType::INDEX, std::vector<uint>({n3, n5}));
uint n9 = g.add_operator(
graph::OperatorType::LOG, std::vector<uint>({n8}));
uint n10 = g.add_operator(
graph::OperatorType::ADD, std::vector<uint>({n7, n9}));
uint n11 = g.add_constant_neg_real(-0.010050326585769653);
uint n12 = g.add_operator(
graph::OperatorType::ADD, std::vector<uint>({n10, n11}));
uint n13 = g.add_constant(1);
uint n14 = g.add_operator(
graph::OperatorType::INDEX, std::vector<uint>({n2, n13}));
uint n15 = g.add_operator(
graph::OperatorType::LOG, std::vector<uint>({n14}));
uint n16 = g.add_operator(
graph::OperatorType::INDEX, std::vector<uint>({n4, n5}));
uint n17 = g.add_operator(
graph::OperatorType::LOG, std::vector<uint>({n16}));
uint n18 = g.add_operator(
graph::OperatorType::ADD, std::vector<uint>({n15, n17}));
uint n19 = g.add_constant_neg_real(-4.605170249938965);
uint n20 = g.add_operator(
graph::OperatorType::ADD, std::vector<uint>({n18, n19}));
n21 = g.add_operator(
graph::OperatorType::LOGSUMEXP,
std::vector<uint>({n12, n20}));
uint n22 = g.add_operator(
graph::OperatorType::TO_REAL, std::vector<uint>({n21}));
uint n23 = g.add_operator(
graph::OperatorType::EXP, std::vector<uint>({n22}));
uint n24 = g.add_operator(
graph::OperatorType::TO_PROBABILITY, std::vector<uint>({n23}));
uint n25 = g.add_distribution(
graph::DistributionType::BERNOULLI,
graph::AtomicType::BOOLEAN,
std::vector<uint>({n24}));
uint n26 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n25}));
g.observe(n26, true);
g.query(n12);
g.query(n2);
g.query(n3);
"""
self.assertEqual(observed.strip(), expected.strip())
observed = BMGInference().to_python(queries, observations)
expected = """
from beanmachine import graph
from torch import tensor
g = graph.Graph()
n0 = g.add_constant_pos_matrix(tensor([[1.0],[0.0]]))
n1 = g.add_distribution(
graph.DistributionType.DIRICHLET,
graph.ValueType(
graph.VariableType.COL_SIMPLEX_MATRIX,
graph.AtomicType.PROBABILITY,
2,
1,
),
[n0],
)
n2 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
n3 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
n4 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
n5 = g.add_constant(0)
n6 = g.add_operator(graph.OperatorType.INDEX, [n2, n5])
n7 = g.add_operator(graph.OperatorType.LOG, [n6])
n8 = g.add_operator(graph.OperatorType.INDEX, [n3, n5])
n9 = g.add_operator(graph.OperatorType.LOG, [n8])
n10 = g.add_operator(graph.OperatorType.ADD, [n7, n9])
n11 = g.add_constant_neg_real(-0.010050326585769653)
n12 = g.add_operator(graph.OperatorType.ADD, [n10, n11])
n13 = g.add_constant(1)
n14 = g.add_operator(graph.OperatorType.INDEX, [n2, n13])
n15 = g.add_operator(graph.OperatorType.LOG, [n14])
n16 = g.add_operator(graph.OperatorType.INDEX, [n4, n5])
n17 = g.add_operator(graph.OperatorType.LOG, [n16])
n18 = g.add_operator(graph.OperatorType.ADD, [n15, n17])
n19 = g.add_constant_neg_real(-4.605170249938965)
n20 = g.add_operator(graph.OperatorType.ADD, [n18, n19])
n21 = g.add_operator(
graph.OperatorType.LOGSUMEXP,
[n12, n20])
n22 = g.add_operator(graph.OperatorType.TO_REAL, [n21])
n23 = g.add_operator(graph.OperatorType.EXP, [n22])
n24 = g.add_operator(graph.OperatorType.TO_PROBABILITY, [n23])
n25 = g.add_distribution(
graph.DistributionType.BERNOULLI,
graph.AtomicType.BOOLEAN,
[n24])
n26 = g.add_operator(graph.OperatorType.SAMPLE, [n25])
g.observe(n26, True)
g.query(n12)
g.query(n2)
g.query(n3)
"""
self.assertEqual(observed.strip(), expected.strip())
| beanmachine-main | tests/ppl/compiler/n-schools_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for single_assignment.py"""
import ast
import unittest
import astor
from beanmachine.ppl.compiler.ast_patterns import ast_domain
from beanmachine.ppl.compiler.rules import (
FirstMatch as first,
TryMany as many,
TryOnce as once,
)
from beanmachine.ppl.compiler.single_assignment import SingleAssignment
_some_top_down = ast_domain.some_top_down
class SingleAssignmentTest(unittest.TestCase):
s = SingleAssignment()
default_rule = s._rule
default_rules = s._rules
def test_single_assignment_sanity_check(self) -> None:
"""If you manually change one of the two numbers in the test it should fail"""
self.assertEqual(3, 3)
def test_single_assignment_unique_id_preserves_prefix(self) -> None:
"""The method unique_id preserves name prefix"""
s = SingleAssignment()
root = "root"
name = s._unique_id(root)
self.assertEqual(root, name[0 : len(root)])
def check_rewrite(
self, source, expected, rules=default_rules, msg=None, reset=True
):
"""Applying rules to source yields expected"""
self.maxDiff = None
if reset:
self.s._count = 0
self.s._rules = rules
m = ast.parse(source)
result = self.s.single_assignment(m)
self.assertEqual(astor.to_source(result).strip(), expected.strip(), msg=msg)
def check_rewrites(self, sources, rule=default_rule, reset=True):
"""Applying rules to each element of sources yelds the next one"""
self.assertIsInstance(sources, list, msg="\nSources should be list of strings.")
self.assertGreater(len(sources), 0, msg="\nSources should be a non-empty list.")
if len(sources) == 1:
return self.check_rewrite(
sources[0],
sources[0],
once(_some_top_down(rule)),
msg="\nExpected the term to be a normal form for rule.",
reset=reset,
)
source, *rest = sources
expected, *_ = rest
self.check_rewrite(
source,
expected,
_some_top_down(rule),
msg="\nExpected rule to rewrite one term to the other",
reset=reset,
)
self.check_rewrites(rest, rule, reset=False)
def test_check_rewrites(self) -> None:
"""The method check_rewrites performs several rewrites for it in one shot.
This method illustrates these functions."""
# The tests use a running example consisting of three terms that are the first,
# intermediate, and final terms in a sequence of rewrites by the rule
# self.s._handle_boolop_binarize()
# The three terms are simple as follows:
source1 = """
def f(x):
x = a and b and c and d
"""
source2 = """
def f(x):
x = (a and b) and c and d
"""
source3 = """
def f(x):
x = ((a and b) and c) and d
"""
# First, check that it raises errors on bad inputs
with self.assertRaises(
AssertionError, msg="The following line should raise an error!"
):
self.check_rewrites(42, self.s._handle_boolop_binarize())
with self.assertRaises(
AssertionError, msg="The following line should raise an error!"
):
self.check_rewrites([], self.s._handle_boolop_binarize())
# Second, make sure it it does what is expected on normal forms
self.check_rewrites([source3], self.s._handle_boolop_binarize())
with self.assertRaises(
AssertionError, msg="The following line should raise an error!"
):
self.check_rewrites([source1], self.s._handle_boolop_binarize())
# Third, normal forms are unchanged if we have one "many" too many
self.check_rewrites([source3], many(self.s._handle_boolop_binarize()))
with self.assertRaises(
AssertionError, msg="The following line should raise an error!"
):
self.check_rewrites([source1], many(self.s._handle_boolop_binarize()))
# Fourth, it will recognize valid rewrites
self.check_rewrites(
[source1, source2, source3], self.s._handle_boolop_binarize()
)
# In common use, it is expect that the intermediate expressions are
# all gathered in a list (if we would like to test the sequence in)
# multiple ways, or they may be inlined directly. To get a sense of
# the way the automatic formatting renders such uses, we include both
# here:
sources = [
"""
def f(x):
x = a and b and c and d
""",
"""
def f(x):
x = (a and b) and c and d
""",
"""
def f(x):
x = ((a and b) and c) and d
""",
]
self.check_rewrites(sources, self.s._handle_boolop_binarize())
# and
self.check_rewrites(
[
"""
def f(x):
x = a and b and c and d
""",
"""
def f(x):
x = (a and b) and c and d
""",
"""
def f(x):
x = ((a and b) and c) and d
""",
],
self.s._handle_boolop_binarize(),
)
# Both forms are a bit verbose, but the first is somewhat more passable
# Fifth, the above call is essentially the following reduction but
# with the intermediate term(s) spelled out:
self.check_rewrite(
source1, source3, many(_some_top_down(self.s._handle_boolop_binarize()))
)
# Sixth, we can use the default rules to document full reduction sequences
sources_continued = [
source3,
"""
def f(x):
a1 = (a and b) and c
x = a1 and d
""",
"""
def f(x):
a2 = a and b
a1 = a2 and c
if a1:
x = d
else:
x = a1
""",
"""
def f(x):
if a:
a2 = b
else:
a2 = a
if a2:
a1 = c
else:
a1 = a2
if a1:
x = d
else:
x = a1
""",
]
self.check_rewrites(sources_continued)
# TODO: Remarks based on the sequence above:
# At some point we may decide to use top_down rather than some_top_down
def check_rewrite_as_ast(self, source, expected, rules=default_rules):
"""Applying rules to source yields expected -- checked as ASTs"""
self.maxDiff = None
self.s._count = 0
m = ast.parse(source)
result = self.s.single_assignment(m)
self.assertEqual(ast.dump(result), ast.dump(ast.parse(expected)))
def test_single_assignment_pre_unassigned_expressions(self) -> None:
"""Tests for state before adding rule to handle unassigned expressions"""
source = """
def f(x):
g(x)+x
"""
expected = """
def f(x):
g(x) + x
"""
self.check_rewrite(
source,
expected,
many( # Custom wire rewrites to rewrites existing before this diff
_some_top_down(
first(
[
self.s._handle_return(),
self.s._handle_for(),
self.s._handle_assign(),
]
)
)
),
)
def test_single_assignment_unassigned_expressions(self) -> None:
"""Test unassiged expressions rewrite"""
# Check that the unassigned expressions rule (unExp) works alone
source = """
def f(x):
g(x)+x
"""
expected = """
def f(x):
u1 = g(x) + x
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_unassigned())
)
# Check that the unassigned expressions rule (unExp) works in context
source = """
def f(x):
g(x)+x
"""
expected = """
def f(x):
r3 = [x]
r4 = {}
a2 = g(*r3, **r4)
u1 = a2 + x
"""
self.check_rewrite(
source,
expected,
many(
_some_top_down(
first(
[
self.s._handle_unassigned(),
self.s._handle_return(),
self.s._handle_for(),
self.s._handle_assign(),
]
)
)
),
)
def test_single_assignment_if(self) -> None:
"""Test if rewrite"""
# Check that rule will leave uninteresting expressions alone
source = """
def f(x):
if x:
c=a+b+c
else:
b=c+a+b
"""
expected = """
def f(x):
if x:
c = a + b + c
else:
b = c + a + b
"""
self.check_rewrite(
source, expected, many(_some_top_down(first([self.s._handle_if()])))
)
# Check that the if rule works (alone) on an elementary expression
source = """
def f(x):
if x+x>x:
c=a+b+c
else:
b=c+a+b
"""
expected = """
def f(x):
r1 = x + x > x
if r1:
c = a + b + c
else:
b = c + a + b
"""
self.check_rewrite(source, expected, _some_top_down(self.s._handle_if()))
# Check that the if rule works (alone) with elif clauses
source = """
def f(x):
if x+x>x:
c=a+b+c
elif y+y>y:
a=c+b+a
else:
b=c+a+b
"""
expected = """
def f(x):
r1 = x + x > x
if r1:
c = a + b + c
else:
r2 = y + y > y
if r2:
a = c + b + a
else:
b = c + a + b
"""
self.check_rewrite(source, expected, many(_some_top_down(self.s._handle_if())))
# Check that the if rule works (with others) on an elementary expression
source = """
def f(x):
if gt(x+x,x):
c=a+b+c
else:
b=c+a+b
"""
expected = """
def f(x):
a6 = x + x
a5 = [a6]
a7 = [x]
r4 = a5 + a7
r8 = {}
r1 = gt(*r4, **r8)
if r1:
a2 = a + b
c = a2 + c
else:
a3 = c + a
b = a3 + b
"""
self.check_rewrite(
source,
expected,
many(
_some_top_down(
first(
[
self.s._handle_if(),
self.s._handle_unassigned(),
self.s._handle_return(),
self.s._handle_for(),
self.s._handle_assign(),
]
)
)
),
)
def test_single_assignment_while(self) -> None:
"""Test while rewrite"""
# Check that while_not_True rule works (alone) on simple cases
source = """
def f(x):
while c:
x=x+1
"""
expected = """
def f(x):
while True:
w1 = c
if w1:
x = x + 1
else:
break
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_while_not_True())
)
# Check that the while_not_True rewrite reaches normal form
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_while_not_True()))
)
# Check that while_not_True_else rule works (alone) on simple cases
source = """
def f(x):
while c:
x=x+1
else:
x=x-1
"""
expected = """
def f(x):
while True:
w1 = c
if w1:
x = x + 1
else:
break
if not w1:
x = x - 1
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_while_not_True_else())
)
# Check that the while_not_True_else rewrite reaches normal form
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_while_not_True_else()))
)
# Check that while_True_else rule works (alone) on simple cases
source = """
def f(x):
while True:
x=x+1
else:
x=x-1
"""
expected = """
def f(x):
while True:
x = x + 1
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_while_True_else())
)
# Check that while_True_else rule, alone, on simple cases, reaches a normal form
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_while_True_else()))
)
# Check that (combined) while rule works (alone) on simple cases
source = """
def f(x):
while c:
x=x+1
while d:
y=y+1
else:
y=y-1
while True:
z=z+1
else:
z=z-1
"""
expected = """
def f(x):
while True:
w1 = c
if w1:
x = x + 1
else:
break
while True:
w2 = d
if w2:
y = y + 1
else:
break
if not w2:
y = y - 1
while True:
z = z + 1
"""
self.check_rewrite(source, expected, _some_top_down(self.s._handle_while()))
# Extra check: Make sure they are idempotent
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_while()))
)
# Check that the while rewrite works with everything else
self.maxDiff = None
source = """
def f(x):
while c:
x=(x+1)-s
else:
x=(x-1)+s
while True:
y=(y+1)-s
else:
y=(y-1)+s
"""
expected = """
def f(x):
while True:
w1 = c
if w1:
a5 = 1
a2 = x + a5
x = a2 - s
else:
break
r3 = not w1
if r3:
a8 = 1
a6 = x - a8
x = a6 + s
while True:
a7 = 1
a4 = y + a7
y = a4 - s
"""
self.check_rewrite(source, expected)
def test_single_assignment_boolop_binarize(self) -> None:
"""Test the rule for converting boolean operators into binary operators"""
source = """
def f(x):
x = a and b and c and d
"""
expected = """
def f(x):
x = ((a and b) and c) and d
"""
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_boolop_binarize()))
)
source = """
def f(x):
x = a and b and c or d or e
"""
expected = """
def f(x):
x = ((a and b) and c or d) or e
"""
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_boolop_binarize()))
)
def test_single_assignment_boolop_linearize(self) -> None:
"""Test the assign rule for linearizing binary boolean ops"""
source = """
def f(x):
x = (a and b) and c
"""
expected = """
def f(x):
a1 = a and b
x = a1 and c
"""
self.check_rewrite(
source,
expected,
many(_some_top_down(self.s._handle_assign_boolop_linearize())),
)
source = """
def f(x):
x = ((a and b) and c) and d
"""
expected = """
def f(x):
a2 = a and b
a1 = a2 and c
x = a1 and d
"""
self.check_rewrite(
source,
expected,
many(_some_top_down(self.s._handle_assign_boolop_linearize())),
)
def test_single_assignment_and2if(self) -> None:
"""Test the assign rule for converting a binary and into an if statement"""
source = """
def f(x):
x = a and b
"""
expected = """
def f(x):
if a:
x = b
else:
x = a
"""
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_assign_and2if()))
)
def test_single_assignment_or2if(self) -> None:
"""Test the assign rule for converting a binary or into an if statement"""
source = """
def f(x):
x = a or b
"""
expected = """
def f(x):
if a:
x = a
else:
x = b
"""
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_assign_or2if()))
)
def test_single_assignment_boolop_all(self) -> None:
"""Test the combined rules for boolean operators"""
source = """
def f(x):
x = a and b and c and d
"""
expected = """
def f(x):
if a:
a2 = b
else:
a2 = a
if a2:
a1 = c
else:
a1 = a2
if a1:
x = d
else:
x = a1
"""
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_boolop_all()))
)
source = """
def f(x):
x = a and b and c or d or e
"""
expected = """
def f(x):
if a:
a3 = b
else:
a3 = a
if a3:
a2 = c
else:
a2 = a3
if a2:
a1 = a2
else:
a1 = d
if a1:
x = a1
else:
x = e
"""
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_boolop_all()))
)
def test_single_assignment_handle_compare_binarize(self) -> None:
"""Test the rule for converting n-way comparisons into binary ones"""
source = """
def f(x):
x = a < b > c == d
"""
expected = """
def f(x):
x = a < b and (b > c and c == d)
"""
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_compare_binarize()))
)
source = """
def f(x):
x = a < 1 + b > c == d
"""
expected = """
def f(x):
x = a < 1 + b > c == d
"""
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_compare_binarize()))
)
source = """
def f(x):
x = a + 1 < b > c + 1 == d
"""
expected = """
def f(x):
x = a + 1 < b and b > c + 1 == d
""" # Note that the term after the "and" is not reduced
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_compare_binarize()))
)
def test_single_assignment_handle_assign_compare_lefthandside(self) -> None:
"""Test the rule for lifting first argument of n-way comparison"""
source = """
def f(x):
x = 1 + a < 1 + b > c == d
"""
expected = """
def f(x):
a1 = 1 + a
x = a1 < 1 + b > c == d
"""
self.check_rewrite(
source,
expected,
many(_some_top_down(self.s._handle_assign_compare_lefthandside())),
)
def test_single_assignment_handle_assign_compare_righthandside(self) -> None:
"""Test the rule for lifting second argument of n-way comparison"""
source = """
def f(x):
z = 1 + a
x = z < 1 + b > c == d
"""
expected = """
def f(x):
z = 1 + a
a1 = 1 + b
x = z < a1 > c == d
"""
self.check_rewrite(
source,
expected,
many(_some_top_down(self.s._handle_assign_compare_righthandside())),
)
def test_single_assignment_handle_assign_compare_bothhandsides(self) -> None:
"""Test the rules for lifting first and second args of n-way comparison"""
source = """
def f(x):
x = 1 + a < 1 + b > c == d
"""
expected = """
def f(x):
a1 = 1 + a
a2 = 1 + b
x = a1 < a2 > c == d
"""
self.check_rewrite(
source,
expected,
many(
_some_top_down(
first(
[
self.s._handle_assign_compare_lefthandside(),
self.s._handle_assign_compare_righthandside(),
]
)
)
),
)
def test_single_assignment_handle_assign_compare_all(self) -> None:
"""Test alls rules for n-way comparisons"""
source = """
def f(x):
x = 1 + a < 1 + b > c == d
"""
expected = """
def f(x):
a1 = 1 + a
a2 = 1 + b
x = a1 < a2 and (a2 > c and c == d)
"""
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_compare_all()))
)
def test_single_assignment_handle_assign_compare_all_combined(self) -> None:
"""Test alls rules for n-way comparisons combined with rest"""
source = """
def f(x):
x = 1 + a < 1 + b > c == d
"""
expected = """
def f(x):
a2 = 1
a1 = a2 + a
a4 = 1
a3 = a4 + b
a5 = a1 < a3
if a5:
a6 = a3 > c
if a6:
x = c == d
else:
x = a6
else:
x = a5
"""
self.check_rewrite(source, expected)
def test_single_assignment_lists(self) -> None:
"""Test the assign rule for lists"""
source = """
def f(x):
x = [1+a,a+b,c+d]
"""
expected = """
def f(x):
a2 = 1
a1 = a2 + a
a3 = a + b
a4 = c + d
x = [a1, a3, a4]
"""
self.check_rewrite(source, expected)
def test_single_assignment_dict(self) -> None:
"""Test the assign rule for dictionaries"""
source = """
def f(x):
x = {"a"+"b":x+x}
"""
expected = """
def f(x):
a2 = 'a'
a4 = 'b'
a1 = a2 + a4
a3 = x + x
x = {a1: a3}
"""
self.check_rewrite(source, expected)
source = """
def f(x):
x = {"a"+"b":x+x, "c"+"d":x-x}
"""
expected = """
def f(x):
a2 = 'a'
a4 = 'b'
a1 = a2 + a4
a3 = x + x
a6 = 'c'
a8 = 'd'
a5 = a6 + a8
a7 = x - x
x = {a1: a3, a5: a7}"""
self.check_rewrite(source, expected)
def test_single_assignment_tuple(self) -> None:
"""Test the assign rule for tuples"""
source = """
def f(x):
x = 1+a,a+b,c+d
"""
expected = """
def f(x):
a2 = 1
a1 = a2 + a
a3 = a + b
a4 = c + d
x = a1, a3, a4
"""
self.check_rewrite(source, expected)
def test_single_assignment_1(self) -> None:
"""Tests for single_assignment.py"""
self.maxDiff = None
source = """
def f():
aab = a + b
if aab:
return 1 + ~x + 2 + g(5, y=6)
z = torch.tensor([1.0 + 2.0, 4.0])
for x in [[10, 20], [30, 40]]:
for y in x:
_1 = x+y
_2 = print(_1)
return 8 * y / (4 * z)
"""
expected = """
def f():
aab = a + b
if aab:
a16 = 1
a22 = ~x
a8 = a16 + a22
a17 = 2
a5 = a8 + a17
a28 = 5
r23 = [a28]
a33 = 6
r31 = dict(y=a33)
a9 = g(*r23, **r31)
r1 = a5 + a9
return r1
a2 = torch.tensor
a29 = 1.0
a32 = 2.0
a24 = a29 + a32
a30 = 4.0
a18 = [a24, a30]
r10 = [a18]
r25 = {}
z = a2(*r10, **r25)
a11 = 10
a19 = 20
a6 = [a11, a19]
a20 = 30
a26 = 40
a12 = [a20, a26]
f3 = [a6, a12]
for x in f3:
for y in x:
_1 = x + y
r13 = [_1]
r27 = {}
_2 = print(*r13, **r27)
a14 = 8
a7 = a14 * y
a21 = 4
a15 = a21 * z
r4 = a7 / a15
return r4
"""
self.check_rewrite(source, expected)
def test_single_assignment_2(self) -> None:
"""Tests for single_assignment.py"""
self.maxDiff = None
source = "b = c(d + e).f(g + h)"
expected = """
a6 = d + e
r4 = [a6]
r8 = {}
a2 = c(*r4, **r8)
a1 = a2.f
a5 = g + h
r3 = [a5]
r7 = {}
b = a1(*r3, **r7)
"""
self.check_rewrite(source, expected)
def test_single_assignment_3(self) -> None:
"""Tests for single_assignment.py"""
self.maxDiff = None
source = "a = (b+c)[f(d+e)]"
expected = """
a1 = b + c
a4 = d + e
r3 = [a4]
r5 = {}
a2 = f(*r3, **r5)
a = a1[a2]
"""
self.check_rewrite(source, expected)
def test_single_assignment_call_single_star_arg(self) -> None:
"""Test the assign rule final step in rewriting regular call arguments"""
source = """
x = f(*([1]+[2]))
"""
expected = """
r1 = [1] + [2]
x = f(*r1)
"""
self.check_rewrite(
source,
expected,
_some_top_down(self.s._handle_assign_call_single_star_arg()),
)
self.check_rewrite(
source,
expected,
many(_some_top_down(self.s._handle_assign_call_single_star_arg())),
)
expected = """
a3 = 1
a2 = [a3]
a6 = 2
a4 = [a6]
r1 = a2 + a4
r5 = {}
x = f(*r1, **r5)
"""
self.check_rewrite(source, expected)
def test_single_assignment_call_single_double_star_arg(self) -> None:
"""Test the assign rule final step in rewriting keyword call arguments"""
source = """
x = f(*d, **({x: 5}))
"""
expected = """
r1 = {x: 5}
x = f(*d, **r1)
"""
self.check_rewrite(
source,
expected,
_some_top_down(self.s._handle_assign_call_single_double_star_arg()),
)
self.check_rewrite(
source,
expected,
many(_some_top_down(self.s._handle_assign_call_single_double_star_arg())),
)
expected = """
a2 = 5
r1 = {x: a2}
x = f(*d, **r1)"""
self.check_rewrite(source, expected)
def test_single_assignment_call_two_star_args(self) -> None:
"""Test the assign rule for merging starred call arguments"""
source = """
x = f(*[1],*[2])
"""
expected = """
x = f(*([1] + [2]))
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_assign_call_two_star_args())
)
self.check_rewrite(
source,
expected,
many(_some_top_down(self.s._handle_assign_call_two_star_args())),
)
expected = """
a3 = 1
a2 = [a3]
a6 = 2
a4 = [a6]
r1 = a2 + a4
r5 = {}
x = f(*r1, **r5)
"""
self.check_rewrite(source, expected)
def test_single_assignment_call_two_double_star_args(self) -> None:
"""Test the assign rule for merging double starred call arguments"""
source = """
x = f(*d,**a, **b, **c)
"""
expected = """
x = f(*d, **dict(**a, **b), **c)
"""
self.check_rewrite(
source,
expected,
_some_top_down(self.s._handle_assign_call_two_double_star_args()),
)
expected = """
x = f(*d, **dict(**dict(**a, **b), **c))
"""
self.check_rewrite(
source,
expected,
many(_some_top_down(self.s._handle_assign_call_two_double_star_args())),
)
source = expected
expected = """
r1 = dict(**dict(**a, **b), **c)
x = f(*d, **r1)
"""
self.check_rewrite(
source,
expected,
many(_some_top_down(self.s._handle_assign_call_single_double_star_arg())),
)
expected = """
a2 = dict(**a, **b)
r1 = dict(**a2, **c)
x = f(*d, **r1)
"""
self.check_rewrite(source, expected)
source = """
x= f(**{a:1},**{b:3})
"""
expected = """
x = f(**dict(**{a: 1}, **{b: 3}))
"""
self.check_rewrite(
source,
expected,
_some_top_down(self.s._handle_assign_call_two_double_star_args()),
)
def test_single_assignment_call_regular_arg(self) -> None:
"""Test the assign rule for starring an unstarred regular arg"""
source = """
x = f(*[1], 2)
"""
expected = """
x = f(*[1], *[2])
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_assign_call_regular_arg())
)
self.check_rewrite(
source,
expected,
many(_some_top_down(self.s._handle_assign_call_regular_arg())),
)
expected = """
a3 = 1
a2 = [a3]
a6 = 2
a4 = [a6]
r1 = a2 + a4
r5 = {}
x = f(*r1, **r5)
"""
self.check_rewrite(source, expected)
def test_single_assignment_call_keyword_arg(self) -> None:
"""Test the assign rule for starring an unstarred keyword arg"""
source = """
x = f(**dict(**d), k=42, **dict(**e))
"""
expected = """
x = f(**dict(**d), **dict(k=42), **dict(**e))
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_assign_call_keyword_arg())
)
self.check_rewrite(
source,
expected,
many(_some_top_down(self.s._handle_assign_call_keyword_arg())),
)
# TODO: This just for debugging a non-terminating loop
expected = """
x = f(*[], **dict(**d), k=42, **dict(**e))
"""
self.check_rewrite(source, expected, _some_top_down(self.s._handle_assign()))
source = expected
expected = """
r1 = []
x = f(*r1, **dict(**d), k=42, **dict(**e))
"""
self.check_rewrite(source, expected, _some_top_down(self.s._handle_assign()))
source = expected
expected = """
r1 = []
x = f(*r1, **dict(**d), **dict(k=42), **dict(**e))
"""
self.check_rewrite(source, expected, _some_top_down(self.s._handle_assign()))
source = expected
expected = """
r1 = []
x = f(*r1, **dict(**dict(**d), **dict(k=42)), **dict(**e))
"""
self.check_rewrite(source, expected, _some_top_down(self.s._handle_assign()))
source = expected
expected = """
r1 = []
x = f(*r1, **dict(**dict(**dict(**d), **dict(k=42)), **dict(**e)))
"""
self.check_rewrite(source, expected, _some_top_down(self.s._handle_assign()))
source = expected
expected = """
r1 = []
a3 = dict(**d)
a6 = 42
a5 = dict(k=a6)
a2 = dict(**a3, **a5)
a4 = dict(**e)
r1 = dict(**a2, **a4)
x = f(*r1, **r1)
"""
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_assign()))
)
source = """
x = f(**dict(**d), k=42, **dict(**e))
"""
expected = """
r1 = []
a4 = dict(**d)
a7 = 42
a6 = dict(k=a7)
a3 = dict(**a4, **a6)
a5 = dict(**e)
r2 = dict(**a3, **a5)
x = f(*r1, **r2)
"""
self.check_rewrite(source, expected)
source = """
x = f()
"""
expected = """
r1 = []
r2 = {}
x = f(*r1, **r2)
"""
self.check_rewrite(source, expected)
def test_single_assignment_call_empty_regular_arg(self) -> None:
"""Test the assign rule for starring an empty regular arg"""
source = """
x = f()
"""
expected = """
x = f(*[])
"""
self.check_rewrite(
source,
expected,
_some_top_down(self.s._handle_assign_call_empty_regular_arg()),
)
self.check_rewrite(
source,
expected,
many(_some_top_down(self.s._handle_assign_call_empty_regular_arg())),
)
expected = """
r1 = []
r2 = {}
x = f(*r1, **r2)
"""
self.check_rewrite(source, expected)
def test_single_assignment_call_three_arg(self) -> None:
"""Test the assign rule for starring an unstarred regular arg"""
source = """
x = f(1, 2, 3)
"""
expected = """
a6 = 1
a3 = [a6]
a9 = 2
a7 = [a9]
a2 = a3 + a7
a8 = 3
a4 = [a8]
r1 = a2 + a4
r5 = {}
x = f(*r1, **r5)
"""
self.check_rewrite(source, expected)
def disabled_test_crashing_case(self) -> None:
"""Debugging a crash in an external test"""
# PYTHON VERSIONING ISSUE
# TODO: There is some difference in the parse trees in the new version of
# Python that we are not expecting. Until we understand what is going on,
# disable this test.
source = """
def flip_logit_constant():
return Bernoulli(logits=tensor(-2.0))
"""
expected = """
def flip_logit_constant():
r2 = []
a7 = 2.0
a6 = -a7
r5 = [a6]
r8 = {}
a4 = tensor(*r5, **r8)
r3 = dict(logits=a4)
r1 = Bernoulli(*r2, **r3)
return r1
"""
self.check_rewrite(source, expected)
self.check_rewrite_as_ast(source, expected)
def test_single_assignment_listComp(self) -> None:
"""Test the assign rule for desugaring listComps"""
# TODO: We should add some tests to check that we
# handle nested function definitions correctly
self.maxDiff = None
source = """
x = [i for i in range(0,j) if even(i+j)]
"""
expected = """
def p1():
r2 = []
for i in range(0, j):
if even(i + j):
r2.append(i)
return r2
x = p1()
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_assign_listComp())
)
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_assign_listComp()))
)
expected = """
def p1():
r2 = []
a15 = 0
a12 = [a15]
a16 = [j]
r10 = a12 + a16
r17 = {}
f3 = range(*r10, **r17)
for i in f3:
a11 = i + j
r7 = [a11]
r13 = {}
r4 = even(*r7, **r13)
if r4:
a8 = r2.append
r14 = [i]
r18 = {}
u6 = a8(*r14, **r18)
return r2
r5 = []
r9 = {}
x = p1(*r5, **r9)
"""
self.check_rewrite(source, expected)
source = """
y = [(x,y) for x in range(0,10) for y in range (x,10) if y == 2*x]
"""
expected = """
def p1():
r2 = []
for x in range(0, 10):
for y in range(x, 10):
if y == 2 * x:
r2.append((x, y))
return r2
y = p1()
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_assign_listComp())
)
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_assign_listComp()))
)
expected = """
def p1():
r2 = []
a15 = 0
a13 = [a15]
a20 = 10
a16 = [a20]
r10 = a13 + a16
r17 = {}
f3 = range(*r10, **r17)
for x in f3:
a18 = [x]
a24 = 10
a21 = [a24]
r14 = a18 + a21
r22 = {}
f4 = range(*r14, **r22)
for y in f4:
a11 = 2
a7 = a11 * x
r6 = y == a7
if r6:
a12 = r2.append
a23 = x, y
r19 = [a23]
r25 = {}
u8 = a12(*r19, **r25)
return r2
r5 = []
r9 = {}
y = p1(*r5, **r9)
"""
self.check_rewrite(source, expected)
source = """
y = [(x,y) for x in range(0,10) if x>0 for y in range (x,10) if y == 2*x]
"""
expected = """
def p1():
r2 = []
for x in range(0, 10):
if x > 0:
for y in range(x, 10):
if y == 2 * x:
r2.append((x, y))
return r2
y = p1()
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_assign_listComp())
)
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_assign_listComp()))
)
expected = """
def p1():
r2 = []
a16 = 0
a13 = [a16]
a20 = 10
a17 = [a20]
r10 = a13 + a17
r18 = {}
f3 = range(*r10, **r18)
for x in f3:
a6 = 0
r4 = x > a6
if r4:
a21 = [x]
a26 = 10
a23 = [a26]
r19 = a21 + a23
r24 = {}
f7 = range(*r19, **r24)
for y in f7:
a14 = 2
a11 = a14 * x
r8 = y == a11
if r8:
a15 = r2.append
a25 = x, y
r22 = [a25]
r27 = {}
u12 = a15(*r22, **r27)
return r2
r5 = []
r9 = {}
y = p1(*r5, **r9)
"""
self.check_rewrite(source, expected)
def test_single_assignment_setComp(self) -> None:
"""Test the assign rule for desugaring setComps"""
# TODO: We should add some tests to check that we
# handle nested function definitions correctly
self.maxDiff = None
source = """
x = {i for i in range(0,j) if even(i+j)}
"""
expected = """
def p1():
r2 = set()
for i in range(0, j):
if even(i + j):
r2.add(i)
return r2
x = p1()
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_assign_setComp())
)
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_assign_setComp()))
)
expected = """
def p1():
r2 = set()
a15 = 0
a12 = [a15]
a16 = [j]
r10 = a12 + a16
r17 = {}
f3 = range(*r10, **r17)
for i in f3:
a11 = i + j
r7 = [a11]
r13 = {}
r4 = even(*r7, **r13)
if r4:
a8 = r2.add
r14 = [i]
r18 = {}
u6 = a8(*r14, **r18)
return r2
r5 = []
r9 = {}
x = p1(*r5, **r9)
"""
self.check_rewrite(source, expected)
source = """
y = {(x,y) for x in range(0,10) for y in range (x,10) if y == 2*x}
"""
expected = """
def p1():
r2 = set()
for x in range(0, 10):
for y in range(x, 10):
if y == 2 * x:
r2.add((x, y))
return r2
y = p1()
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_assign_setComp())
)
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_assign_setComp()))
)
expected = """
def p1():
r2 = set()
a15 = 0
a13 = [a15]
a20 = 10
a16 = [a20]
r10 = a13 + a16
r17 = {}
f3 = range(*r10, **r17)
for x in f3:
a18 = [x]
a24 = 10
a21 = [a24]
r14 = a18 + a21
r22 = {}
f4 = range(*r14, **r22)
for y in f4:
a11 = 2
a7 = a11 * x
r6 = y == a7
if r6:
a12 = r2.add
a23 = x, y
r19 = [a23]
r25 = {}
u8 = a12(*r19, **r25)
return r2
r5 = []
r9 = {}
y = p1(*r5, **r9)
"""
self.check_rewrite(source, expected)
source = """
y = {(x,y) for x in range(0,10) if x>0 for y in range (x,10) if y == 2*x}
"""
expected = """
def p1():
r2 = set()
for x in range(0, 10):
if x > 0:
for y in range(x, 10):
if y == 2 * x:
r2.add((x, y))
return r2
y = p1()
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_assign_setComp())
)
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_assign_setComp()))
)
expected = """
def p1():
r2 = set()
a16 = 0
a13 = [a16]
a20 = 10
a17 = [a20]
r10 = a13 + a17
r18 = {}
f3 = range(*r10, **r18)
for x in f3:
a6 = 0
r4 = x > a6
if r4:
a21 = [x]
a26 = 10
a23 = [a26]
r19 = a21 + a23
r24 = {}
f7 = range(*r19, **r24)
for y in f7:
a14 = 2
a11 = a14 * x
r8 = y == a11
if r8:
a15 = r2.add
a25 = x, y
r22 = [a25]
r27 = {}
u12 = a15(*r22, **r27)
return r2
r5 = []
r9 = {}
y = p1(*r5, **r9)
"""
self.check_rewrite(source, expected)
def test_single_assignment_dictComp(self) -> None:
"""Test the assign rule for desugaring dictComps"""
# TODO: We should add some tests to check that we
# handle nested function definitions correctly
self.maxDiff = None
source = """
x = {i:i for i in range(0,j) if even(i+j)}
"""
expected = """
def p1():
r2 = {}
for i in range(0, j):
if even(i + j):
r2.__setitem__(i, i)
return r2
x = p1()
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_assign_dictComp())
)
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_assign_dictComp()))
)
expected = """
def p1():
r2 = {}
a14 = 0
a12 = [a14]
a15 = [j]
r10 = a12 + a15
r16 = {}
f3 = range(*r10, **r16)
for i in f3:
a11 = i + j
r7 = [a11]
r13 = {}
r4 = even(*r7, **r13)
if r4:
a8 = r2.__setitem__
a18 = [i]
a19 = [i]
r17 = a18 + a19
r20 = {}
u6 = a8(*r17, **r20)
return r2
r5 = []
r9 = {}
x = p1(*r5, **r9)
"""
self.check_rewrite(source, expected)
source = """
y = {x:y for x in range(0,10) for y in range (x,10) if y == 2*x}
"""
expected = """
def p1():
r2 = {}
for x in range(0, 10):
for y in range(x, 10):
if y == 2 * x:
r2.__setitem__(x, y)
return r2
y = p1()
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_assign_dictComp())
)
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_assign_dictComp()))
)
expected = """
def p1():
r2 = {}
a15 = 0
a13 = [a15]
a19 = 10
a16 = [a19]
r10 = a13 + a16
r17 = {}
f3 = range(*r10, **r17)
for x in f3:
a18 = [x]
a22 = 10
a20 = [a22]
r14 = a18 + a20
r21 = {}
f4 = range(*r14, **r21)
for y in f4:
a11 = 2
a7 = a11 * x
r6 = y == a7
if r6:
a12 = r2.__setitem__
a24 = [x]
a25 = [y]
r23 = a24 + a25
r26 = {}
u8 = a12(*r23, **r26)
return r2
r5 = []
r9 = {}
y = p1(*r5, **r9)
"""
self.check_rewrite(source, expected)
source = """
y = {x:y for x in range(0,10) if x>0 for y in range (x,10) if y == 2*x}
"""
expected = """
def p1():
r2 = {}
for x in range(0, 10):
if x > 0:
for y in range(x, 10):
if y == 2 * x:
r2.__setitem__(x, y)
return r2
y = p1()
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_assign_dictComp())
)
self.check_rewrite(
source, expected, many(_some_top_down(self.s._handle_assign_dictComp()))
)
expected = """
def p1():
r2 = {}
a16 = 0
a13 = [a16]
a20 = 10
a17 = [a20]
r10 = a13 + a17
r18 = {}
f3 = range(*r10, **r18)
for x in f3:
a6 = 0
r4 = x > a6
if r4:
a21 = [x]
a24 = 10
a22 = [a24]
r19 = a21 + a22
r23 = {}
f7 = range(*r19, **r23)
for y in f7:
a14 = 2
a11 = a14 * x
r8 = y == a11
if r8:
a15 = r2.__setitem__
a26 = [x]
a27 = [y]
r25 = a26 + a27
r28 = {}
u12 = a15(*r25, **r28)
return r2
r5 = []
r9 = {}
y = p1(*r5, **r9)
"""
self.check_rewrite(source, expected)
def test_single_assignment_nested_call_named_arg(self) -> None:
self.maxDiff = None
# This test originally pointed out a bug in the rewriting logic.
# We should be pulling the invocation of c() out into
# it's own top-level function call.
#
# The code below should be rewritten as something like:
#
# t1 = []
# t2 = {}
# t3 = c(*t1, **t2)
# t4 = []
# t5 = {'n' : t3}
# t6 = b(*t4, **t5)
# return t6
source = """
def f():
return b(n=c())
"""
expected = """
def f():
r2 = []
r3 = dict(n=c())
r1 = b(*r2, **r3)
return r1
"""
# The previous "expected" was the undesirable output, which we got at the time of the bug report
# The following "expected" is after the bug fix
expected = """
def f():
r2 = []
r5 = []
r6 = {}
a4 = c(*r5, **r6)
r3 = dict(n=a4)
r1 = b(*r2, **r3)
return r1
"""
self.check_rewrite(source, expected)
# Helper tests to fix the bug identified above
# Interestingly, regular arguments are OK
source = """
def f():
return b(c())
"""
expected = """
def f():
r5 = []
r6 = {}
a3 = c(*r5, **r6)
r2 = [a3]
r4 = {}
r1 = b(*r2, **r4)
return r1
"""
self.check_rewrite(source, expected)
# It was further noted that the following expression was
# also not handled well
source = """
def f():
return b(n1=c1(),n2=c2(),n3=c3())
"""
# In particular, it produced the following output, which
# has nested "dict" calls that are should be removed
expected = """
def f():
r2 = []
r3 = dict(**dict(**dict(n1=c1()), **dict(n2=c2())), **dict(n3=c3()))
r1 = b(*r2, **r3)
return r1
"""
# To fix this, first we introduced the rewrite "binary_dict_left"
# With the introduction of that rule we get
expected = """
def f():
r2 = []
r7 = []
r8 = {}
a6 = c1(*r7, **r8)
a5 = dict(n1=a6)
a4 = dict(**a5, **dict(n2=c2()))
r3 = dict(**a4, **dict(n3=c3()))
r1 = b(*r2, **r3)
return r1
"""
# Next, we introduced "binary_dict_right" and then we get
expected = """
def f():
r2 = []
r11 = []
r14 = {}
a7 = c1(*r11, **r14)
a5 = dict(n1=a7)
r13 = []
r16 = {}
a10 = c2(*r13, **r16)
a8 = dict(n2=a10)
a4 = dict(**a5, **a8)
r12 = []
r15 = {}
a9 = c3(*r12, **r15)
a6 = dict(n3=a9)
r3 = dict(**a4, **a6)
r1 = b(*r2, **r3)
return r1
"""
self.check_rewrite(source, expected)
# It was useful to note that there was no similar problem with
# calls that have regular arguments
source = """
def f():
return b(*[c()])
"""
expected = """
def f():
r5 = []
r6 = {}
a3 = c(*r5, **r6)
r2 = [a3]
r4 = {}
r1 = b(*r2, **r4)
return r1
"""
self.check_rewrite(source, expected)
# No similar problem with multiple regular arguments also:
source = """
def f():
return b(c1(),c2())
"""
expected = """
def f():
r8 = []
r10 = {}
a4 = c1(*r8, **r10)
a3 = [a4]
r9 = []
r11 = {}
a7 = c2(*r9, **r11)
a5 = [a7]
r2 = a3 + a5
r6 = {}
r1 = b(*r2, **r6)
return r1
"""
self.check_rewrite(source, expected)
def test_single_assignment_assign_unary_dict(self) -> None:
"""Test the first special rule for dict (the unary case)"""
self.maxDiff = None
source = """
x = dict(n=c())
"""
expected = """
a1 = c()
x = dict(n=a1)
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_assign_unary_dict())
)
def test_single_assignment_assign_binary_dict_left(self) -> None:
"""Test the first special rule for dict (the binary left case)"""
self.maxDiff = None
source = """
x = dict(**c(),**d())
"""
expected = """
a1 = c()
x = dict(**a1, **d())
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_assign_binary_dict_left())
)
def test_single_assignment_assign_binary_dict_right(self) -> None:
"""Test the first special rule for dict (the binary right case)"""
self.maxDiff = None
source = """
x = dict(**c,**d())
"""
expected = """
a1 = d()
x = dict(**c, **a1)
"""
self.check_rewrite(
source, expected, _some_top_down(self.s._handle_assign_binary_dict_right())
)
def test_left_value_all(self) -> None:
"""General tests for the full set of assignment left value rules"""
# First, some "most general" normal forms. These are terms that are not
# reduced by this set of rewrites, nor by all the other rules for that matter.
normal_forms = [
"""
def f(x):
a = z
a.b = z
a[b] = z
a[b:c] = z
a[b:] = z
a[:c] = z
a[b:c:d] = z
a[b::d] = z
a[:c:d] = z
a[::d] = z
[] = z
[a] = z
[*a] = z
"""
]
# These terms are normal forms for this specific set
self.check_rewrites(normal_forms, self.s._handle_left_value_all())
# They are also "in most general form" because they are also normal forms for all sets
self.check_rewrites(normal_forms)
# It would be nice of course if we could check that we have captured (at least
# representatives) of all normal form productions, but no idea how to do this yet.
# Second, some terms that are only in normal form for this set (but could be
# reducible by other rules). This type of terms helps us check the rules in
# this set do not rewrite terms prematurely (which could alter order of evaluation).
# Note: It's good for such terms to actually contain a reduction that can be done
# once the subterm that is "waited upon" is released. This means that if we want
# to systematically derive waiting terms from normal forms, two subterms would
# typically need to be changed.
waiting_forms = [
"""
def f(x):
x.y.a = z + 1
x.y[b] = z + 1
a[x.y] = z + 1
x.y[b:c] = z + 1
x.y[b:] = z + 1
x.y[:c] = z + 1
a[x.y:c] = z + 1
a[x.y:] = z + 1
a[b:x.y] = z + 1
a[:x.y] = z + 1
x.y[:c:d] = z + 1
x.y[b::d] = z + 1
x.y[::d] = z + 1
x.y[b:c:d] = z + 1
a[x.y:c:d] = z + 1
a[x.y::d] = z + 1
a[x.y:] = z + 1
a[b:x.y:d] = z + 1
a[b:x.y:d] = z + 1
a[b:x.y] = z + 1
a[:x.y:d] = z + 1
a[:c:x.y] = z + 1
a[::x.y] = z + 1
[x.y.a] = z + 1
[*x.y.a] = z + 1
"""
]
self.check_rewrites(waiting_forms, self.s._handle_left_value_all())
# Third, an example that involves several of the rewrite rules in this
# set
# TODO: The following reduction sequence is incomplete (and does not
# reach a normal form) because we need rewrite rules for splicing
# terms such as z[1:]. That functionality is in place we should
# be able to continue this rewrite until the whole LHS pattern has been
# converted into SSA form.
terms = [
"""
def f(x):
[[a],b,*[c],d,(e.f[g:h[i]].j)] = z
""",
"""
def f(x):
[a] = z[0]
[b, *[c], d, e.f[g:h[i]].j] = z[1:]
""",
"""
def f(x):
a1 = 0
[a] = z[a1]
a2 = 1
[b, *[c], d, e.f[g:h[i]].j] = z[a2:]
""", # Name RHS:
"""
def f(x):
a1 = 0
a3 = z[a1]
[a] = a3
a2 = 1
a4 = z[a2:]
[b, *[c], d, e.f[g:h[i]].j] = a4
""", # Process first element:
"""
def f(x):
a1 = 0
a3 = z[a1]
[a] = a3
a2 = 1
a4 = z[a2:]
b = a4[0]
[*[c], d, e.f[g:h[i]].j] = a4[1:]
""", # Name constants:
"""
def f(x):
a1 = 0
a3 = z[a1]
[a] = a3
a2 = 1
a4 = z[a2:]
a5 = 0
b = a4[a5]
a6 = 1
[*[c], d, e.f[g:h[i]].j] = a4[a6:]
""", # Name RHS:
"""
def f(x):
a1 = 0
a3 = z[a1]
[a] = a3
a2 = 1
a4 = z[a2:]
a5 = 0
b = a4[a5]
a6 = 1
a7 = a4[a6:]
[*[c], d, e.f[g:h[i]].j] = a7
""", # Process last element:
"""
def f(x):
a1 = 0
a3 = z[a1]
[a] = a3
a2 = 1
a4 = z[a2:]
a5 = 0
b = a4[a5]
a6 = 1
a7 = a4[a6:]
[*[c], d] = a7[:-1]
e.f[g:h[i]].j = a7[-1]
""", # Name constants:
"""
def f(x):
a1 = 0
a3 = z[a1]
[a] = a3
a2 = 1
a4 = z[a2:]
a5 = 0
b = a4[a5]
a6 = 1
a7 = a4[a6:]
a8 = -1
[*[c], d] = a7[:a8]
a9 = -1
e.f[g:h[i]].j = a7[a9]
""", # Name RHS:
"""
def f(x):
a1 = 0
a3 = z[a1]
[a] = a3
a2 = 1
a4 = z[a2:]
a5 = 0
b = a4[a5]
a6 = 1
a7 = a4[a6:]
a10 = 1
a8 = -a10
a11 = a7[:a8]
[*[c], d] = a11
a12 = 1
a9 = -a12
a13 = a7[a9]
e.f[g:h[i]].j = a13
""", # Process last element and name LHS expression:
"""
def f(x):
a1 = 0
a3 = z[a1]
[a] = a3
a2 = 1
a4 = z[a2:]
a5 = 0
b = a4[a5]
a6 = 1
a7 = a4[a6:]
a10 = 1
a8 = -a10
a11 = a7[:a8]
[*[c]] = a11[:-1]
d = a11[-1]
a12 = 1
a9 = -a12
a13 = a7[a9]
x14 = e.f[g:h[i]]
x14.j = a13
""", # Name RHS expression:
"""
def f(x):
a1 = 0
a3 = z[a1]
[a] = a3
a2 = 1
a4 = z[a2:]
a5 = 0
b = a4[a5]
a6 = 1
a7 = a4[a6:]
a10 = 1
a8 = -a10
a11 = a7[:a8]
a15 = -1
[*[c]] = a11[:a15]
a16 = -1
d = a11[a16]
a12 = 1
a9 = -a12
a13 = a7[a9]
a17 = e.f
x14 = a17[g:h[i]]
x14.j = a13
""", # Name RHS expression:
"""
def f(x):
a1 = 0
a3 = z[a1]
[a] = a3
a2 = 1
a4 = z[a2:]
a5 = 0
b = a4[a5]
a6 = 1
a7 = a4[a6:]
a10 = 1
a8 = -a10
a11 = a7[:a8]
a18 = 1
a15 = -a18
a19 = a11[:a15]
[*[c]] = a19
a20 = 1
a16 = -a20
d = a11[a16]
a12 = 1
a9 = -a12
a13 = a7[a9]
a17 = e.f
a21 = h[i]
x14 = a17[g:a21]
x14.j = a13
""", # Name LHS expression:
"""
def f(x):
a1 = 0
a3 = z[a1]
[a] = a3
a2 = 1
a4 = z[a2:]
a5 = 0
b = a4[a5]
a6 = 1
a7 = a4[a6:]
a10 = 1
a8 = -a10
a11 = a7[:a8]
a18 = 1
a15 = -a18
a19 = a11[:a15]
[*x22] = a19
[c] = x22
a20 = 1
a16 = -a20
d = a11[a16]
a12 = 1
a9 = -a12
a13 = a7[a9]
a17 = e.f
a21 = h[i]
x14 = a17[g:a21]
x14.j = a13
""",
]
self.check_rewrites(terms)
def test_left_value_attributeref(self) -> None:
"""Test rewrites like a.b.c = z → x = a.b; x.c = z"""
terms = [
"""
def f(x):
a.b.c = z""",
"""
def f(x):
x1 = a.b
x1.c = z""",
]
self.check_rewrites(terms, self.s._handle_left_value_attributeref())
self.check_rewrites(terms, self.s._handle_left_value_all())
self.check_rewrites(terms)
def test_left_value_subscript_value(self) -> None:
"""Test rewrites like a.b[c] = z → x = a.b; x[c] = z.
It also handles [c], [c:d], and [c:d:e] in the same way."""
terms = [
"""
def f(x):
a.b[c] = z""",
"""
def f(x):
x1 = a.b
x1[c] = z""",
]
self.check_rewrites(terms, self.s._handle_left_value_subscript_value())
self.check_rewrites(terms, self.s._handle_left_value_all())
self.check_rewrites(terms)
terms = [
"""
def f(x):
a.b[c:d] = z""",
"""
def f(x):
x1 = a.b
x1[c:d] = z""",
]
self.check_rewrites(terms, self.s._handle_left_value_subscript_value())
self.check_rewrites(terms, self.s._handle_left_value_all())
self.check_rewrites(terms)
terms = [
"""
def f(x):
a.b[c:d:e] = z""",
"""
def f(x):
x1 = a.b
x1[c:d:e] = z""",
]
self.check_rewrites(terms, self.s._handle_left_value_subscript_value())
self.check_rewrites(terms, self.s._handle_left_value_all())
self.check_rewrites(terms)
def test_left_value_subscript_slice_index(self) -> None:
"""Test rewrites like a[b.c] = z → x = b.c; a[x] = z."""
terms = [
"""
def f(x):
a[b.c] = z""",
"""
def f(x):
x1 = b.c
a[x1] = z""",
]
self.check_rewrites(terms, self.s._handle_left_value_subscript_slice_index())
self.check_rewrites(terms, self.s._handle_left_value_all())
self.check_rewrites(terms)
def test_left_value_subscript_slice_lower(self) -> None:
"""Test rewrites like a[b.c:] = z → x = b.c; a[x:] = z."""
terms = [
"""
def f(x):
a[b.c:] = z
a[b.c:d] = z
a[b.c:d:e] = z
a[:d:e] = z""",
"""
def f(x):
x1 = b.c
a[x1:] = z
x2 = b.c
a[x2:d] = z
x3 = b.c
a[x3:d:e] = z
a[:d:e] = z""",
]
self.check_rewrites(terms, self.s._handle_left_value_subscript_slice_lower())
self.check_rewrites(terms, self.s._handle_left_value_all())
self.check_rewrites(terms)
def test_left_value_subscript_slice_upper(self) -> None:
"""Test rewrites like a[:b.c] = z → x = b.c; a[:x] = z."""
terms = [
"""
def f(x):
a[:c.d] = z
a[b:c.d] = z
a[b:c.d:e] = z
a[a::e] = z
a[::e] = z
a[:] = z""",
"""
def f(x):
x1 = c.d
a[:x1] = z
x2 = c.d
a[b:x2] = z
x3 = c.d
a[b:x3:e] = z
a[a::e] = z
a[::e] = z
a[:] = z""",
]
self.check_rewrites(terms, self.s._handle_left_value_subscript_slice_upper())
self.check_rewrites(terms, self.s._handle_left_value_all())
self.check_rewrites(terms)
def test_left_value_subscript_slice_step(self) -> None:
"""Test rewrites like a[:b:c.d] = z → x = c.d; a[b:c:x] = z."""
terms = [
"""
def f(x):
a[::d.e] = z
a[b::d.e] = z
a[b:c:d.e] = z
a[b::e] = z
a[::e] = z
a[:] = z""",
"""
def f(x):
x1 = d.e
a[::x1] = z
x2 = d.e
a[b::x2] = z
x3 = d.e
a[b:c:x3] = z
a[b::e] = z
a[::e] = z
a[:] = z""",
]
self.check_rewrites(terms, self.s._handle_left_value_subscript_slice_step())
self.check_rewrites(terms, self.s._handle_left_value_all())
self.check_rewrites(terms)
def test_left_value_list_star(self) -> None:
"""Test rewrites like [*a.b] = z → [*y] = z; a.b = y."""
terms = [
"""
def f(x):
[*a.b] = z
[*a[b]] = z
(*a.b,) = z
(*a[b],) = z""",
"""
def f(x):
[*x1] = z
a.b = x1
[*x2] = z
a[b] = x2
[*x3] = z
a.b = x3
[*x4] = z
a[b] = x4""",
]
self.check_rewrites(terms, self.s._handle_left_value_list_star())
self.check_rewrites(terms, self.s._handle_left_value_all())
self.check_rewrites(terms)
def test_left_value_list_list(self) -> None:
"""Test rewrites like [[a.b]] = z → [y] = z; [a.b] = y.
Note that this should also work for things where a.b is simply c.
It also pattern matches both tuples and lists as if they are the same."""
terms = [
"""
def f(x):
[[a.b]] = z
[[a[b]]] = z
([a.b],) = z
([a[b]],) = z
[[a]] = z
[[a],b] = z""",
"""
def f(x):
[x1] = z
[a.b] = x1
[x2] = z
[a[b]] = x2
[x3] = z
[a.b] = x3
[x4] = z
[a[b]] = x4
[x5] = z
[a] = x5
[[a], b] = z""",
]
self.check_rewrites(terms, self.s._handle_left_value_list_list())
# The last line above is simplified by another rule in the set, so,
terms[
1
] = """
def f(x):
[x1] = z
[a.b] = x1
[x2] = z
[a[b]] = x2
[x3] = z
[a.b] = x3
[x4] = z
[a[b]] = x4
[x5] = z
[a] = x5
[a] = z[0]
[b] = z[1:]"""
self.check_rewrites(terms, self.s._handle_left_value_all())
# As a result of the change in the last line, further simplifications
# are triggered by other rewrites outside the set
terms += [
"""
def f(x):
[x1] = z
[a.b] = x1
[x2] = z
[a[b]] = x2
[x3] = z
[a.b] = x3
[x4] = z
[a[b]] = x4
[x5] = z
[a] = x5
a6 = 0
[a] = z[a6]
a7 = 1
[b] = z[a7:]""", # Name RHS:
"""
def f(x):
[x1] = z
[a.b] = x1
[x2] = z
[a[b]] = x2
[x3] = z
[a.b] = x3
[x4] = z
[a[b]] = x4
[x5] = z
[a] = x5
a6 = 0
a8 = z[a6]
[a] = a8
a7 = 1
a9 = z[a7:]
[b] = a9""",
]
self.check_rewrites(terms)
def test_left_value_list_not_starred(self) -> None:
"""Test rewrites like [a.b.c, d] = z → a.b.c = z[0]; [d] = z[1:].
Note that this should also work for things where a.b is simply c.
It also pattern matches both tuples and lists as if they are the same."""
terms = [
"""
def f(x):
[a.b.c, d] = z""",
"""
def f(x):
a.b.c = z[0]
[d] = z[1:]""",
]
self.check_rewrites(terms, self.s._handle_left_value_list_not_starred())
self.check_rewrites(terms, self.s._handle_left_value_all())
# TODO: To fully process such terms, we need to support slicing in target language
terms += [
"""
def f(x):
a1 = 0
a.b.c = z[a1]
a2 = 1
[d] = z[a2:]""", # Name RHS:
"""
def f(x):
a1 = 0
a3 = z[a1]
a.b.c = a3
a2 = 1
a4 = z[a2:]
[d] = a4""", # Name LHS expression:
"""
def f(x):
a1 = 0
a3 = z[a1]
x5 = a.b
x5.c = a3
a2 = 1
a4 = z[a2:]
[d] = a4""",
]
self.check_rewrites(terms)
def test_left_value_list_starred(self) -> None:
"""Test rewrites [*c, d] = z → [*c] = z[:-1]; d = z[-1].
It also pattern matches both tuples and lists as if they are the same."""
terms = [
"""
def f(x):
[*a,b] = z""",
"""
def f(x):
[*a] = z[:-1]
b = z[-1]""",
]
self.check_rewrites(terms, self.s._handle_left_value_list_starred())
self.check_rewrites(terms, self.s._handle_left_value_all())
# TODO: To fully process such terms, we need to support slicing in target language
terms += [
"""
def f(x):
a1 = -1
[*a] = z[:a1]
a2 = -1
b = z[a2]""", # Name RHS:
"""
def f(x):
a3 = 1
a1 = -a3
a4 = z[:a1]
[*a] = a4
a5 = 1
a2 = -a5
b = z[a2]""",
]
self.check_rewrites(terms)
def test_assign_subscript_slice_all(self) -> None:
"""General tests for the subsript rewrite set."""
# First, we give examples of regular normal forms, that is, forms
# where no more reduction is possible.
normal_forms = [
"""
def f(x):
a = b[c]
a = b[c:]
a = b[:d]
a = b[c:d]
a = b[c::e]
a = b[:d:e]
a = b[c:d:e]
a = b[::e]"""
]
self.check_rewrites(normal_forms, self.s._handle_assign_subscript_slice_all())
self.check_rewrites(normal_forms)
## Second, an example of the natural order of evaluation for these rewrite rules
progression = [
"""
def f(x):
a,b = c[d.e:f.g:h.i]""",
"""
def f(x):
a1 = d.e
a, b = c[a1:f.g:h.i]""",
"""
def f(x):
a1 = d.e
a2 = f.g
a, b = c[a1:a2:h.i]""",
"""
def f(x):
a1 = d.e
a2 = f.g
a3 = h.i
a, b = c[a1:a2:a3]""",
]
self.check_rewrites(progression, self.s._handle_assign_subscript_slice_all())
progression += [ # Name RHS:
"""
def f(x):
a1 = d.e
a2 = f.g
a3 = h.i
a4 = c[a1:a2:a3]
a, b = a4""", # Process first element:
"""
def f(x):
a1 = d.e
a2 = f.g
a3 = h.i
a4 = c[a1:a2:a3]
a = a4[0]
[b] = a4[1:]""", # Name constants:
"""
def f(x):
a1 = d.e
a2 = f.g
a3 = h.i
a4 = c[a1:a2:a3]
a5 = 0
a = a4[a5]
a6 = 1
[b] = a4[a6:]""", # Name RHS:
"""
def f(x):
a1 = d.e
a2 = f.g
a3 = h.i
a4 = c[a1:a2:a3]
a5 = 0
a = a4[a5]
a6 = 1
a7 = a4[a6:]
[b] = a7""",
]
self.check_rewrites(progression)
# Third, some stuck terms. The following form cannot go anywhere with any of the rules:
stuck = [
"""
def f(x):
a, b = 1 + c[d.e:f.g:h.i]"""
]
self.check_rewrites(stuck, self.s._handle_assign_subscript_slice_all())
# More specific stuck terms are also useful to express:
stuck = [
"""
def f(x):
a, b = 1 + c[d.e:f.g:h.i]"""
]
self.check_rewrites(stuck, self.s._handle_assign_subscript_slice_index_1())
stuck = [
"""
def f(x):
a, b = c.c[d.e]"""
]
self.check_rewrites(stuck, self.s._handle_assign_subscript_slice_index_2())
stuck = [
"""
def f(x):
a, b = c.c[d.e:]"""
]
self.check_rewrites(stuck, self.s._handle_assign_subscript_slice_lower())
stuck = [
"""
def f(x):
a, b = c[d.e:f.g]"""
]
self.check_rewrites(stuck, self.s._handle_assign_subscript_slice_upper())
stuck = [
"""
def f(x):
a, b = c[d:f.g:h.i]"""
]
self.check_rewrites(stuck, self.s._handle_assign_subscript_slice_step())
def test_assign_subscript_slice_index_1(self) -> None:
"""Test rewrites like a,b = c.d[e] → x = c.d; a,b = x[e]."""
terms = [
"""
def f(x):
a,b = c.d[e]""",
"""
def f(x):
a1 = c.d
a, b = a1[e]""",
]
self.check_rewrites(terms, self.s._handle_assign_subscript_slice_index_1())
self.check_rewrites(terms, self.s._handle_assign_subscript_slice_all())
terms += [ # Name RHS:
"""
def f(x):
a1 = c.d
a2 = a1[e]
a, b = a2""", # Process first element:
"""
def f(x):
a1 = c.d
a2 = a1[e]
a = a2[0]
[b] = a2[1:]""", # Name constants:
"""
def f(x):
a1 = c.d
a2 = a1[e]
a3 = 0
a = a2[a3]
a4 = 1
[b] = a2[a4:]""", # Name RHS:
"""
def f(x):
a1 = c.d
a2 = a1[e]
a3 = 0
a = a2[a3]
a4 = 1
a5 = a2[a4:]
[b] = a5""",
]
self.check_rewrites(terms)
def test_assign_subscript_slice_index_2(self) -> None:
"""Test rewrites like a,b = c[d.e] → x = d.e; a,b = c[x]."""
terms = [
"""
def f(x):
a,b = c[d.e]""",
"""
def f(x):
a1 = d.e
a, b = c[a1]""",
]
self.check_rewrites(terms, self.s._handle_assign_subscript_slice_index_2())
self.check_rewrites(terms, self.s._handle_assign_subscript_slice_all())
terms += [ # Name RHS:
"""
def f(x):
a1 = d.e
a2 = c[a1]
a, b = a2""", # Process first element:
"""
def f(x):
a1 = d.e
a2 = c[a1]
a = a2[0]
[b] = a2[1:]""", # Name constants:
"""
def f(x):
a1 = d.e
a2 = c[a1]
a3 = 0
a = a2[a3]
a4 = 1
[b] = a2[a4:]""", # Name RHS:
"""
def f(x):
a1 = d.e
a2 = c[a1]
a3 = 0
a = a2[a3]
a4 = 1
a5 = a2[a4:]
[b] = a5""",
]
self.check_rewrites(terms)
def test_assign_subscript_slice_index_2_not_too_soon(self) -> None:
"""Gives an example that shows that we do not rewrite too soon."""
terms = [
"""
def f(x):
a, b = c.c[d.e]""",
]
self.check_rewrites(terms, self.s._handle_assign_subscript_slice_index_2())
def test_assign_subscript_slice_lower(self) -> None:
"""Test rewrites like e = a[b.c:] → x = b.c; e = a[x:]."""
terms = [
"""
def f(x):
a,b = c[d.e:]
a = b[c.d:e:f]""",
"""
def f(x):
a1 = d.e
a, b = c[a1:]
a2 = c.d
a = b[a2:e:f]""",
]
self.check_rewrites(terms, self.s._handle_assign_subscript_slice_lower())
self.check_rewrites(terms, self.s._handle_assign_subscript_slice_all())
terms += [ # Name RHS:
"""
def f(x):
a1 = d.e
a3 = c[a1:]
a, b = a3
a2 = c.d
a = b[a2:e:f]""", # Process first element:
"""
def f(x):
a1 = d.e
a3 = c[a1:]
a = a3[0]
[b] = a3[1:]
a2 = c.d
a = b[a2:e:f]""", # Name constants:
"""
def f(x):
a1 = d.e
a3 = c[a1:]
a4 = 0
a = a3[a4]
a5 = 1
[b] = a3[a5:]
a2 = c.d
a = b[a2:e:f]""", # Process RHS:
"""
def f(x):
a1 = d.e
a3 = c[a1:]
a4 = 0
a = a3[a4]
a5 = 1
a6 = a3[a5:]
[b] = a6
a2 = c.d
a = b[a2:e:f]""",
]
self.check_rewrites(terms)
def disabled_test_assign_subscript_slice_upper_1(self) -> None:
"""Test rewrites like e = a[:b.c] → x = b.c; e = a[:x]."""
# TODO: Test does not pass; I suspect there was a merge conflict resolution
# error and this test should be updated or deleted. Disable it for now
# and sort it out later.
terms = [
"""
def f(x):
a,b = c[d:e.f]
a = b[c:d.e:f]
a,b = c [:e.f]
a,b = c [:e.f:g]""",
"""
def f(x):
a1 = e.f
a, b = c[d:a1]
a2 = d.e
a = b[c:a2:f]
a3 = e.f
a, b = c[:a3]
a4 = e.f
a, b = c[:a4:g]""",
]
self.check_rewrites(terms, self.s._handle_assign_subscript_slice_upper())
self.check_rewrites(terms, self.s._handle_assign_subscript_slice_all())
self.check_rewrites(terms)
def test_assign_subscript_slice_upper_2(self) -> None:
"""Test rewrites like e = a[::b.c] → x = b.c; e = a[::x]."""
terms = [
"""
def f(x):
a,b = c[d::e.f]
a = b[c:d:e.f]
a,b = c [::e.f]
a,b = c [:e:f.g]""",
"""
def f(x):
a1 = e.f
a, b = c[d::a1]
a2 = e.f
a = b[c:d:a2]
a3 = e.f
a, b = c[::a3]
a4 = f.g
a, b = c[:e:a4]""",
]
self.check_rewrites(terms, self.s._handle_assign_subscript_slice_step())
self.check_rewrites(terms, self.s._handle_assign_subscript_slice_all())
terms += [ # Name RHS:
"""
def f(x):
a1 = e.f
a5 = c[d::a1]
a, b = a5
a2 = e.f
a = b[c:d:a2]
a3 = e.f
a6 = c[::a3]
a, b = a6
a4 = f.g
a7 = c[:e:a4]
a, b = a7""", # Process first element:
"""
def f(x):
a1 = e.f
a5 = c[d::a1]
a = a5[0]
[b] = a5[1:]
a2 = e.f
a = b[c:d:a2]
a3 = e.f
a6 = c[::a3]
a = a6[0]
[b] = a6[1:]
a4 = f.g
a7 = c[:e:a4]
a = a7[0]
[b] = a7[1:]""", # Name constants:
"""
def f(x):
a1 = e.f
a5 = c[d::a1]
a8 = 0
a = a5[a8]
a9 = 1
[b] = a5[a9:]
a2 = e.f
a = b[c:d:a2]
a3 = e.f
a6 = c[::a3]
a10 = 0
a = a6[a10]
a11 = 1
[b] = a6[a11:]
a4 = f.g
a7 = c[:e:a4]
a12 = 0
a = a7[a12]
a13 = 1
[b] = a7[a13:]""", # Name RHS:
"""
def f(x):
a1 = e.f
a5 = c[d::a1]
a8 = 0
a = a5[a8]
a9 = 1
a14 = a5[a9:]
[b] = a14
a2 = e.f
a = b[c:d:a2]
a3 = e.f
a6 = c[::a3]
a10 = 0
a = a6[a10]
a11 = 1
a15 = a6[a11:]
[b] = a15
a4 = f.g
a7 = c[:e:a4]
a12 = 0
a = a7[a12]
a13 = 1
a16 = a7[a13:]
[b] = a16""",
]
self.check_rewrites(terms)
def test_assign_possibly_blocking_right_value(self) -> None:
"""Test rewrites like e1 = e2 → x = e2; e1 = x, as long as e1 and e2 are not names."""
# Here is what this rule achieves in isolation
terms = [
"""
def f(x):
a, b = a, b""",
"""
def f(x):
a1 = a, b
a, b = a1""",
]
self.check_rewrites(
terms, self.s._handle_assign_possibly_blocking_right_value()
)
# And here is what it achieves in the context of the other rules. In particular,
# it enables the left_value rules to go further than they can without it.
terms += [ # Process first element:
"""
def f(x):
a1 = a, b
a = a1[0]
[b] = a1[1:]""", # Name constants:
"""
def f(x):
a1 = a, b
a2 = 0
a = a1[a2]
a3 = 1
[b] = a1[a3:]""", # Name RHS:
"""
def f(x):
a1 = a, b
a2 = 0
a = a1[a2]
a3 = 1
a4 = a1[a3:]
[b] = a4""",
]
self.check_rewrites(terms)
def test_augmented_assignment(self) -> None:
"""Test rewrites involving += and other augmented assignments."""
source = """
def f(x):
x += 123
x *= 456
x.y.z /= 2
"""
expected = """
def f(x):
a1 = 123
x += a1
a2 = 456
x *= a2
a3 = x.y
a4 = a3.z
a5 = 2
a4 /= a5
a3.z = a4
"""
self.check_rewrite(source, expected)
source = """
def f(x):
a.b[c.d] -= 1
e.f[:] -= 2
g.h[:i.j] -= 2
k.m[n.o:] -= 3
p.q[r.s:t.u] -= 4
"""
expected = """
def f(x):
a1 = a.b
a6 = c.d
a11 = a1[a6]
a16 = 1
a11 -= a16
a1[a6] = a11
a2 = e.f
a7 = a2[:]
a12 = 2
a7 -= a12
a2[:] = a7
a3 = g.h
a8 = i.j
a13 = a3[:a8]
a17 = 2
a13 -= a17
a3[:a8] = a13
a4 = k.m
a9 = n.o
a14 = a4[a9:]
a18 = 3
a14 -= a18
a4[a9:] = a14
a5 = p.q
a10 = r.s
a15 = t.u
a19 = a5[a10:a15]
a20 = 4
a19 -= a20
a5[a10:a15] = a19
"""
self.check_rewrite(source, expected)
source = """
def f(x):
a.b[::] -= 1
c.d[e.f::] -= 2
g.h[:i.j:] -= 3
k.m[::n.o] -= 4
"""
expected = """
def f(x):
a1 = a.b
a5 = a1[:]
a9 = 1
a5 -= a9
a1[:] = a5
a2 = c.d
a6 = e.f
a10 = a2[a6:]
a13 = 2
a10 -= a13
a2[a6:] = a10
a3 = g.h
a7 = i.j
a11 = a3[:a7]
a14 = 3
a11 -= a14
a3[:a7] = a11
a4 = k.m
a8 = n.o
a12 = a4[::a8]
a15 = 4
a12 -= a15
a4[::a8] = a12
"""
self.check_rewrite(source, expected)
source = """
def f(x):
a.b[c.d:e.f:] -= 1
g.h[i.j::k.m] -= 2
n.o[:p.q:r.s] -= 3
t.u[v.w:x.y:z.zz] -= 4
"""
expected = """
def f(x):
a1 = a.b
a5 = c.d
a9 = e.f
a13 = a1[a5:a9]
a17 = 1
a13 -= a17
a1[a5:a9] = a13
a2 = g.h
a6 = i.j
a10 = k.m
a14 = a2[a6::a10]
a18 = 2
a14 -= a18
a2[a6::a10] = a14
a3 = n.o
a7 = p.q
a11 = r.s
a15 = a3[:a7:a11]
a19 = 3
a15 -= a19
a3[:a7:a11] = a15
a4 = t.u
a8 = v.w
a12 = x.y
a16 = z.zz
a20 = a4[a8:a12:a16]
a21 = 4
a20 -= a21
a4[a8:a12:a16] = a20
"""
self.check_rewrite(source, expected)
def test_rewrite_super(self) -> None:
# A call to super() with no arguments is very special in Python; it is a syntactic
# sugar for a call to super(__class__, self), where self is the leftmost parameter
# and __class__ is a magical outer variable automatically initialized to the
# declaring class. We will handle "super()" calls specially later; we must
# not rewrite them. They must stay just an ordinary call to "super()"
# rather than being reduced to the standard form
#
# a = []
# b = super(*a)
#
source = """
class D(B):
def f(self):
super(D, self).g()
super().h()
"""
expected = """
class D(B):
def f(self):
a12 = [D]
a13 = [self]
r11 = a12 + a13
a5 = super(*r11)
a3 = a5.g
r7 = []
r9 = {}
u1 = a3(*r7, **r9)
a6 = super()
a4 = a6.h
r8 = []
r10 = {}
u2 = a4(*r8, **r10)
"""
self.check_rewrite(source, expected)
def test_matrix_multiply_single_assignment(self) -> None:
source = """
def f(x):
return x @ (y * z)
"""
expected = """
def f(x):
a2 = y * z
r1 = x @ a2
return r1
"""
self.check_rewrite(source, expected)
def test_lambda_elimination(self) -> None:
source = """
def f(x):
return lambda y: x * y + 2
"""
expected = """
def f(x):
def a2(y):
a4 = x * y
a5 = 2
r3 = a4 + a5
return r3
r1 = a2
return r1
"""
self.check_rewrite(source, expected)
def test_decorator_elimination(self) -> None:
source = """
@x
@y(z)
def f():
pass
"""
expected = """
def f():
pass
r3 = [z]
r6 = {}
a1 = y(*r3, **r6)
r4 = [f]
r7 = {}
f = a1(*r4, **r7)
r2 = [f]
r5 = {}
f = x(*r2, **r5)
"""
self.check_rewrite(source, expected)
def test_ifexp_elimination(self) -> None:
source = """
x = a + b * c if d + e * f else g + h * i
"""
expected = """
a3 = e * f
r2 = d + a3
if r2:
a4 = b * c
a1 = a + a4
else:
a5 = h * i
a1 = g + a5
x = a1
"""
self.check_rewrite(source, expected)
def test_single_assignment_handle_assign(self) -> None:
"""Test the rule for removing annotations"""
source = """
def f(x):
z:float = 1 + a
"""
expected = """
def f(x):
a1 = 1
z = a1 + a
"""
self.check_rewrite(
source,
expected,
many(_some_top_down(self.s._handle_assign())),
)
| beanmachine-main | tests/ppl/compiler/single_assignment_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for bm_to_bmg.py"""
import unittest
import astor
import beanmachine.ppl as bm
from beanmachine.ppl.compiler.bm_to_bmg import _bm_function_to_bmg_ast
from beanmachine.ppl.inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli, Dirichlet, Normal
class BaseModel:
@bm.random_variable
def normal(self):
return Normal(0.0, 1.0)
@bm.functional
def foo(self):
return self.normal() + 2.0
def bar(self):
return 3.0
class DerivedModel(BaseModel):
@bm.functional
def foo(self):
f = super().foo()
b = super(DerivedModel, self).bar()
return f * b # This should be (n() + 2) * 3
def bar(self):
return 4.0
@bm.random_variable
def legal_subscript_mutations():
t = tensor([0.0, 0.0])
t[0] = 0.0
t[1] = 1.0
t[0:] = 2.0
t[:1] = 3.0
t[0:1] = 4.0
t[0::] = 5.0
t[:1:] = 6.0
t[::1] = 7.0
t[0:1:] = 8.0
t[0::1] = 9.0
t[:1:1] = 10.0
t[0:1:1] = 11.0
return Dirichlet(t)
@bm.random_variable
def normal():
return Normal(0.0, 1.0)
@bm.random_variable
def flip():
return Bernoulli(0.5)
@bm.functional
def illegal_subscript_mutation_1():
# Mutate a tensor with a stochastic value:
t = tensor([0.0, 0.0])
t[0] = normal()
return t
@bm.functional
def illegal_subscript_mutation_2():
# Mutate a stochastic tensor
t = legal_subscript_mutations()
t[0] = 0.0
return t
@bm.functional
def illegal_subscript_mutation_3():
# Mutate a tensor with a stochastic index
t = tensor([0.0, 0.0])
t[flip()] = 1.0
return t
@bm.functional
def illegal_subscript_mutation_4():
# Mutate a tensor with a stochastic upper
t = tensor([0.0, 0.0])
t[0 : flip()] = 1.0
return t
@bm.functional
def illegal_subscript_mutation_5():
# Mutate a tensor with a stochastic step
t = tensor([0.0, 0.0])
t[0 : 1 : flip() + 1] = 1.0
return t
class CompilerTest(unittest.TestCase):
def test_super_call(self) -> None:
self.maxDiff = None
# A call to super() in Python is not a normal function. Consider:
def outer(s):
return s().x()
class B:
def x(self):
return 1
class D(B):
def x(self):
return 2
def ordinary(self):
return self.x() # 2
def sup1(self):
return super().x() # 1
def sup2(self):
s = super
return s().x() # Doesn't have to be a keyword
def callout(self):
return outer(super) # but the call to super() needs to be inside D.
self.assertEqual(D().ordinary(), 2)
self.assertEqual(D().sup1(), 1)
self.assertEqual(D().sup2(), 1)
# What's happening here is: "super()" is a syntactic sugar for "super(__class__, self)"
# where __class__ is an automatically-generated outer variable of the method that
# contains the call to super(). That variable has the value of the containing class.
# When we call D().callout() here, there is no automatically-generated outer variable
# when super() is ultimately called, and therefore we get this confusing but expected
# exception raised:
with self.assertRaises(RuntimeError) as ex:
D().callout()
expected = "super(): __class__ cell not found"
observed = str(ex.exception)
self.assertEqual(expected.strip(), observed.strip())
# bm_to_bmg rewrites all random variables, all functionals, and their callees.
# We must ensure that all calls to super() are (1) syntactically exactly that;
# these calls must not be rewritten to bmg.handle_call, and (2) must have an
# outer variable __class__ which is initialized to the class which originally
# declared the random variable.
d = DerivedModel()
observed = BMGInference().to_dot([d.foo()], {})
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=2.0];
N5[label="+"];
N6[label=3.0];
N7[label="*"];
N8[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N5;
N4 -> N5;
N5 -> N7;
N6 -> N7;
N7 -> N8;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# We do this by:
# * the single assignment rewriter does not fully rewrite
# calls to super to their most general form; in particular
# it will not rewrite super() to x = [] / super(*x).
# * the bm_to_bmg rewriter does not rewrite calls to super
# into bmg.handle_function.
# * if the original function has an outer variable __class__ then
# we generate a new outer variable with the same name and value.
# Obtain the random variable for d.foo()
rv = d.foo()
# The random variable has a reference to the original *undecorated*
# D.foo, which has an outer variable __class__. Verify that we
# correctly recreate that outer variable in the rewritten function:
bmgast = _bm_function_to_bmg_ast(rv.function, "foo_helper")
observed = astor.to_source(bmgast)
expected = """
def foo_helper(bmg, __class__):
import operator
def foo(self):
a5 = super()
a1 = bmg.handle_dot_get(a5, 'foo')
r7 = []
r10 = {}
f = bmg.handle_function(a1, r7, r10)
a14 = [DerivedModel]
a15 = [self]
r13 = bmg.handle_function(operator.add, [a14, a15])
a6 = super(*r13)
a2 = bmg.handle_dot_get(a6, 'bar')
r8 = []
r11 = {}
b = bmg.handle_function(a2, r8, r11)
r3 = bmg.handle_function(operator.mul, [f, b])
return r3
a4 = bmg.handle_dot_get(bm, 'functional')
r9 = [foo]
r12 = {}
foo = bmg.handle_function(a4, r9, r12)
return foo
"""
self.assertEqual(observed.strip(), expected.strip())
def test_subscript_mutations(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot([legal_subscript_mutations()], {})
expected = """
digraph "graph" {
N0[label="[11.0,10.0]"];
N1[label=Dirichlet];
N2[label=Sample];
N3[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
}"""
self.assertEqual(observed.strip(), expected.strip())
with self.assertRaises(ValueError) as ex:
BMGInference().to_dot([illegal_subscript_mutation_1()], {})
# TODO: Better error message
expected = (
"Mutating a tensor with a stochastic value "
+ "is not supported in Bean Machine Graph."
)
self.assertEqual(expected, str(ex.exception))
with self.assertRaises(ValueError) as ex:
BMGInference().to_dot([illegal_subscript_mutation_2()], {})
# TODO: Better error message
expected = "Mutating a stochastic value is not supported in Bean Machine Graph."
self.assertEqual(expected, str(ex.exception))
with self.assertRaises(ValueError) as ex:
BMGInference().to_dot([illegal_subscript_mutation_3()], {})
# TODO: Better error message
expected = (
"Mutating a collection or tensor with a stochastic index "
+ "is not supported in Bean Machine Graph."
)
self.assertEqual(expected, str(ex.exception))
with self.assertRaises(ValueError) as ex:
BMGInference().to_dot([illegal_subscript_mutation_4()], {})
# TODO: Better error message
expected = (
"Mutating a collection or tensor with a stochastic upper index "
+ "is not supported in Bean Machine Graph."
)
self.assertEqual(expected, str(ex.exception))
with self.assertRaises(ValueError) as ex:
BMGInference().to_dot([illegal_subscript_mutation_5()], {})
# TODO: Better error message
expected = (
"Mutating a collection or tensor with a stochastic step "
+ "is not supported in Bean Machine Graph."
)
self.assertEqual(expected, str(ex.exception))
| beanmachine-main | tests/ppl/compiler/bm_to_bmg_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""End-to-end test of realistic logistic regression model"""
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli, Normal
# We have N points with K coordinates each classified into one
# of two categories: red or blue. There is a line separating
# the sets of points; the idea is to deduce the most likely
# parameters of that line. Parameters are beta(0), beta(1)
# and beta(2); line is y = (-b1/b2) x - (b0/b2).
#
# We have three parameters to define the line instead of two because
# these parameters also define how "mixed" the points are when close
# to the line.
# Points are generated so that posteriors should be
# centered on beta(0) around -1.0, beta(1) around 2.0,
# beta(2) around -3.0
N = 8
K = 2
X = [
[1.0000, 7.6483, 5.6988],
[1.0000, -6.2928, 1.1692],
[1.0000, 1.6583, -4.7142],
[1.0000, -7.7588, 7.9859],
[1.0000, -1.2421, 5.4628],
[1.0000, 6.4529, 2.3994],
[1.0000, -4.9269, 7.8679],
[1.0000, 4.2130, 2.6175],
]
# Classifications of those N points into two buckets:
red = tensor(0.0)
blue = tensor(1.0)
Y = [red, red, blue, red, red, blue, red, blue]
@bm.random_variable
def beta(k): # k is 0 to K
return Normal(0.0, 1.0)
@bm.random_variable
def y(n): # n is 0 to N-1
mu = X[n][0] * beta(0) + X[n][1] * beta(1) + X[n][2] * beta(2)
return Bernoulli(logits=mu)
queries = [beta(0), beta(1), beta(2)]
observations = {
y(0): Y[0],
y(1): Y[1],
y(2): Y[2],
y(3): Y[3],
y(4): Y[4],
y(5): Y[5],
y(6): Y[6],
y(7): Y[7],
}
expected_dot = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label=Sample];
N06[label=7.6483];
N07[label="*"];
N08[label=5.6988];
N09[label="*"];
N10[label="+"];
N11[label="Bernoulli(logits)"];
N12[label=Sample];
N13[label="Observation False"];
N14[label=-6.2928];
N15[label="*"];
N16[label=1.1692];
N17[label="*"];
N18[label="+"];
N19[label="Bernoulli(logits)"];
N20[label=Sample];
N21[label="Observation False"];
N22[label=1.6583];
N23[label="*"];
N24[label=-4.7142];
N25[label="*"];
N26[label="+"];
N27[label="Bernoulli(logits)"];
N28[label=Sample];
N29[label="Observation True"];
N30[label=-7.7588];
N31[label="*"];
N32[label=7.9859];
N33[label="*"];
N34[label="+"];
N35[label="Bernoulli(logits)"];
N36[label=Sample];
N37[label="Observation False"];
N38[label=-1.2421];
N39[label="*"];
N40[label=5.4628];
N41[label="*"];
N42[label="+"];
N43[label="Bernoulli(logits)"];
N44[label=Sample];
N45[label="Observation False"];
N46[label=6.4529];
N47[label="*"];
N48[label=2.3994];
N49[label="*"];
N50[label="+"];
N51[label="Bernoulli(logits)"];
N52[label=Sample];
N53[label="Observation True"];
N54[label=-4.9269];
N55[label="*"];
N56[label=7.8679];
N57[label="*"];
N58[label="+"];
N59[label="Bernoulli(logits)"];
N60[label=Sample];
N61[label="Observation False"];
N62[label=4.213];
N63[label="*"];
N64[label=2.6175];
N65[label="*"];
N66[label="+"];
N67[label="Bernoulli(logits)"];
N68[label=Sample];
N69[label="Observation True"];
N70[label=Query];
N71[label=Query];
N72[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N02 -> N04;
N02 -> N05;
N03 -> N10;
N03 -> N18;
N03 -> N26;
N03 -> N34;
N03 -> N42;
N03 -> N50;
N03 -> N58;
N03 -> N66;
N03 -> N70;
N04 -> N07;
N04 -> N15;
N04 -> N23;
N04 -> N31;
N04 -> N39;
N04 -> N47;
N04 -> N55;
N04 -> N63;
N04 -> N71;
N05 -> N09;
N05 -> N17;
N05 -> N25;
N05 -> N33;
N05 -> N41;
N05 -> N49;
N05 -> N57;
N05 -> N65;
N05 -> N72;
N06 -> N07;
N07 -> N10;
N08 -> N09;
N09 -> N10;
N10 -> N11;
N11 -> N12;
N12 -> N13;
N14 -> N15;
N15 -> N18;
N16 -> N17;
N17 -> N18;
N18 -> N19;
N19 -> N20;
N20 -> N21;
N22 -> N23;
N23 -> N26;
N24 -> N25;
N25 -> N26;
N26 -> N27;
N27 -> N28;
N28 -> N29;
N30 -> N31;
N31 -> N34;
N32 -> N33;
N33 -> N34;
N34 -> N35;
N35 -> N36;
N36 -> N37;
N38 -> N39;
N39 -> N42;
N40 -> N41;
N41 -> N42;
N42 -> N43;
N43 -> N44;
N44 -> N45;
N46 -> N47;
N47 -> N50;
N48 -> N49;
N49 -> N50;
N50 -> N51;
N51 -> N52;
N52 -> N53;
N54 -> N55;
N55 -> N58;
N56 -> N57;
N57 -> N58;
N58 -> N59;
N59 -> N60;
N60 -> N61;
N62 -> N63;
N63 -> N66;
N64 -> N65;
N65 -> N66;
N66 -> N67;
N67 -> N68;
N68 -> N69;
}
"""
class LogisticRegressionTest(unittest.TestCase):
def test_logistic_regression_inference(self) -> None:
self.maxDiff = None
bmg = BMGInference()
samples = bmg.infer(queries, observations, 1000)
b0 = samples[beta(0)].mean()
b1 = samples[beta(1)].mean()
b2 = samples[beta(2)].mean()
slope_ob = -b1 / b2
int_ob = -b0 / b2
slope_ex = 0.64 # Should be 0.67
int_ex = 0.16 # Should be -0.33; reasonable guess given thin data
self.assertAlmostEqual(first=slope_ob, second=slope_ex, delta=0.05)
self.assertAlmostEqual(first=int_ob, second=int_ex, delta=0.05)
def test_logistic_regression_to_dot(self) -> None:
self.maxDiff = None
bmg = BMGInference()
observed = bmg.to_dot(queries, observations)
self.assertEqual(expected_dot.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/logistic_regression_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for error_report.py"""
import unittest
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.bmg_types import NegativeReal, Probability
from beanmachine.ppl.compiler.error_report import ErrorReport, Violation
class ErrorReportTest(unittest.TestCase):
def test_error_report(self) -> None:
"""test_error_report"""
bmg = BMGraphBuilder()
r = bmg.add_real(-2.5)
b = bmg.add_bernoulli(r)
v = Violation(r, NegativeReal, Probability, b, "probability", {})
e = ErrorReport()
e.add_error(v)
expected = """
The probability of a Bernoulli is required to be a probability but is a negative real."""
self.assertEqual(expected.strip(), str(e).strip())
| beanmachine-main | tests/ppl/compiler/error_report_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference import BMGInference
from torch import tensor
from torch.distributions import (
Bernoulli,
Beta,
Gamma,
HalfCauchy,
Normal,
StudentT,
Uniform,
)
@bm.random_variable
def beta(n):
return Beta(2.0, 2.0)
@bm.random_variable
def flip_beta():
return Bernoulli(tensor([beta(0), beta(1)]))
@bm.random_variable
def beta_2_2():
return Beta(2.0, tensor([3.0, 4.0]))
@bm.random_variable
def flip_beta_2_2():
return Bernoulli(beta_2_2())
@bm.random_variable
def uniform_2_2():
return Uniform(0.0, tensor([1.0, 1.0]))
@bm.random_variable
def flip_uniform_2_2():
return Bernoulli(uniform_2_2())
@bm.random_variable
def flip_logits():
return Bernoulli(logits=tensor([beta(0), beta(1)]))
@bm.random_variable
def flip_const():
return Bernoulli(tensor([0.25, 0.75]))
@bm.random_variable
def flip_const_4():
return Bernoulli(tensor([0.25, 0.75, 0.5, 0.5]))
@bm.random_variable
def flip_const_2_3():
return Bernoulli(tensor([[0.25, 0.75, 0.5], [0.125, 0.875, 0.625]]))
@bm.random_variable
def normal_2_3():
mus = flip_const_2_3() # 2 x 3 tensor of 0 or 1
sigmas = tensor([2.0, 3.0, 4.0])
return Normal(mus, sigmas)
@bm.random_variable
def hc_3():
return HalfCauchy(tensor([1.0, 2.0, 3.0]))
@bm.random_variable
def studentt_2_3():
return StudentT(hc_3(), normal_2_3(), hc_3())
@bm.functional
def operators():
# Note that we do NOT devectorize the multiplication; it gets
# turned into a MatrixScale.
phi = Normal(0, 1).cdf
return phi(((beta_2_2() + tensor([[5.0, 6.0], [7.0, 8.0]])) * 10.0).exp())
@bm.functional
def multiplication():
return beta_2_2() * tensor([5.0, 6.0])
@bm.functional
def complement_with_log1p():
return (-beta_2_2()).log1p()
@bm.random_variable
def beta1234():
return Beta(tensor([1.0, 2.0]), tensor([3.0, 4.0]))
@bm.functional
def sum_inverted_log_probs():
p = tensor([5.0, 6.0]) * (-beta1234()).log1p()
return p.sum()
@bm.random_variable
def gamma():
return Gamma(1, 1)
@bm.functional
def normal_log_probs():
mu = tensor([5.0, 6.0])
x = tensor([7.0, 8.0])
return Normal(mu, gamma()).log_prob(x)
class FixVectorizedModelsTest(unittest.TestCase):
def test_fix_vectorized_models_1(self) -> None:
self.maxDiff = None
observations = {flip_beta(): tensor([0.0, 1.0])}
queries = [flip_beta(), flip_const()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=Beta];
N02[label=Sample];
N03[label=Sample];
N04[label=Tensor];
N05[label=Bernoulli];
N06[label=Sample];
N07[label="Observation tensor([0., 1.])"];
N08[label=Query];
N09[label="[0.25,0.75]"];
N10[label=Bernoulli];
N11[label=Sample];
N12[label=Query];
N00 -> N01;
N00 -> N01;
N01 -> N02;
N01 -> N03;
N02 -> N04;
N03 -> N04;
N04 -> N05;
N05 -> N06;
N06 -> N07;
N06 -> N08;
N09 -> N10;
N10 -> N11;
N11 -> N12;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=Beta];
N02[label=Sample];
N03[label=Sample];
N04[label=Bernoulli];
N05[label=Sample];
N06[label=Bernoulli];
N07[label=Sample];
N08[label="Observation False"];
N09[label="Observation True"];
N10[label=2];
N11[label=1];
N12[label=ToMatrix];
N13[label=Query];
N14[label=0.25];
N15[label=Bernoulli];
N16[label=Sample];
N17[label=0.75];
N18[label=Bernoulli];
N19[label=Sample];
N20[label=ToMatrix];
N21[label=Query];
N00 -> N01;
N00 -> N01;
N01 -> N02;
N01 -> N03;
N02 -> N04;
N03 -> N06;
N04 -> N05;
N05 -> N08;
N05 -> N12;
N06 -> N07;
N07 -> N09;
N07 -> N12;
N10 -> N12;
N10 -> N20;
N11 -> N12;
N11 -> N20;
N12 -> N13;
N14 -> N15;
N15 -> N16;
N16 -> N20;
N17 -> N18;
N18 -> N19;
N19 -> N20;
N20 -> N21;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_2(self) -> None:
self.maxDiff = None
observations = {flip_const_4(): tensor([0.0, 1.0, 0.0, 1.0])}
queries = [flip_const_4()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N0[label="[0.25,0.75,0.5,0.5]"];
N1[label=Bernoulli];
N2[label=Sample];
N3[label="Observation tensor([0., 1., 0., 1.])"];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N2 -> N4;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
# Note that due to the order in which we do the rewriting we
# end up with a not-deduplicated Bernoulli(0.5) node here, which
# is slightly unfortunate but probably not worth fixing right now.
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=0.25];
N01[label=Bernoulli];
N02[label=Sample];
N03[label=0.75];
N04[label=Bernoulli];
N05[label=Sample];
N06[label=0.5];
N07[label=Bernoulli];
N08[label=Sample];
N09[label=Bernoulli];
N10[label=Sample];
N11[label="Observation False"];
N12[label="Observation True"];
N13[label="Observation False"];
N14[label="Observation True"];
N15[label=4];
N16[label=1];
N17[label=ToMatrix];
N18[label=Query];
N00 -> N01;
N01 -> N02;
N02 -> N11;
N02 -> N17;
N03 -> N04;
N04 -> N05;
N05 -> N12;
N05 -> N17;
N06 -> N07;
N06 -> N09;
N07 -> N08;
N08 -> N13;
N08 -> N17;
N09 -> N10;
N10 -> N14;
N10 -> N17;
N15 -> N17;
N16 -> N17;
N17 -> N18;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_3(self) -> None:
self.maxDiff = None
observations = {flip_const_2_3(): tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])}
queries = [flip_const_2_3()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N0[label="[[0.25,0.75,0.5],\\\\n[0.125,0.875,0.625]]"];
N1[label=Bernoulli];
N2[label=Sample];
N3[label="Observation tensor([[0., 0., 0.],\\n [1., 1., 1.]])"];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N2 -> N4;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=0.25];
N01[label=Bernoulli];
N02[label=Sample];
N03[label=0.75];
N04[label=Bernoulli];
N05[label=Sample];
N06[label=0.5];
N07[label=Bernoulli];
N08[label=Sample];
N09[label=0.125];
N10[label=Bernoulli];
N11[label=Sample];
N12[label=0.875];
N13[label=Bernoulli];
N14[label=Sample];
N15[label=0.625];
N16[label=Bernoulli];
N17[label=Sample];
N18[label="Observation False"];
N19[label="Observation False"];
N20[label="Observation False"];
N21[label="Observation True"];
N22[label="Observation True"];
N23[label="Observation True"];
N24[label=3];
N25[label=2];
N26[label=ToMatrix];
N27[label=Query];
N00 -> N01;
N01 -> N02;
N02 -> N18;
N02 -> N26;
N03 -> N04;
N04 -> N05;
N05 -> N19;
N05 -> N26;
N06 -> N07;
N07 -> N08;
N08 -> N20;
N08 -> N26;
N09 -> N10;
N10 -> N11;
N11 -> N21;
N11 -> N26;
N12 -> N13;
N13 -> N14;
N14 -> N22;
N14 -> N26;
N15 -> N16;
N16 -> N17;
N17 -> N23;
N17 -> N26;
N24 -> N26;
N25 -> N26;
N26 -> N27;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_4(self) -> None:
# Demonstrate we can also do devectorizations on logits-style Bernoullis.
# (A logits Bernoulli with a beta prior is a likely mistake in a real model,
# but it is a convenient test case.)
self.maxDiff = None
observations = {}
queries = [flip_logits()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N0[label=2.0];
N1[label=Beta];
N2[label=Sample];
N3[label=Sample];
N4[label=Tensor];
N5[label="Bernoulli(logits)"];
N6[label=Sample];
N7[label=Query];
N0 -> N1;
N0 -> N1;
N1 -> N2;
N1 -> N3;
N2 -> N4;
N3 -> N4;
N4 -> N5;
N5 -> N6;
N6 -> N7;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=Beta];
N02[label=Sample];
N03[label=Sample];
N04[label=ToReal];
N05[label="Bernoulli(logits)"];
N06[label=Sample];
N07[label=ToReal];
N08[label="Bernoulli(logits)"];
N09[label=Sample];
N10[label=2];
N11[label=1];
N12[label=ToMatrix];
N13[label=Query];
N00 -> N01;
N00 -> N01;
N01 -> N02;
N01 -> N03;
N02 -> N04;
N03 -> N07;
N04 -> N05;
N05 -> N06;
N06 -> N12;
N07 -> N08;
N08 -> N09;
N09 -> N12;
N10 -> N12;
N11 -> N12;
N12 -> N13;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_5(self) -> None:
self.maxDiff = None
observations = {}
queries = [studentt_2_3()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite. Note that we have a size[3] stochastic input and
# a size[2, 3] stochastic input to the StudentT, and we broadcast the three
# HalfCauchy samples correctly
expected = """
digraph "graph" {
N00[label="[1.0,2.0,3.0]"];
N01[label=HalfCauchy];
N02[label=Sample];
N03[label="[[0.25,0.75,0.5],\\\\n[0.125,0.875,0.625]]"];
N04[label=Bernoulli];
N05[label=Sample];
N06[label="[2.0,3.0,4.0]"];
N07[label=Normal];
N08[label=Sample];
N09[label=StudentT];
N10[label=Sample];
N11[label=Query];
N00 -> N01;
N01 -> N02;
N02 -> N09;
N02 -> N09;
N03 -> N04;
N04 -> N05;
N05 -> N07;
N06 -> N07;
N07 -> N08;
N08 -> N09;
N09 -> N10;
N10 -> N11;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=1.0];
N01[label=HalfCauchy];
N02[label=Sample];
N03[label=2.0];
N04[label=HalfCauchy];
N05[label=Sample];
N06[label=3.0];
N07[label=HalfCauchy];
N08[label=Sample];
N09[label=0.25];
N10[label=Bernoulli];
N11[label=Sample];
N12[label=0.75];
N13[label=Bernoulli];
N14[label=Sample];
N15[label=0.5];
N16[label=Bernoulli];
N17[label=Sample];
N18[label=0.125];
N19[label=Bernoulli];
N20[label=Sample];
N21[label=0.875];
N22[label=Bernoulli];
N23[label=Sample];
N24[label=0.625];
N25[label=Bernoulli];
N26[label=Sample];
N27[label=ToReal];
N28[label=Normal];
N29[label=Sample];
N30[label=ToReal];
N31[label=Normal];
N32[label=Sample];
N33[label=ToReal];
N34[label=4.0];
N35[label=Normal];
N36[label=Sample];
N37[label=ToReal];
N38[label=Normal];
N39[label=Sample];
N40[label=ToReal];
N41[label=Normal];
N42[label=Sample];
N43[label=ToReal];
N44[label=Normal];
N45[label=Sample];
N46[label=StudentT];
N47[label=Sample];
N48[label=StudentT];
N49[label=Sample];
N50[label=StudentT];
N51[label=Sample];
N52[label=StudentT];
N53[label=Sample];
N54[label=StudentT];
N55[label=Sample];
N56[label=StudentT];
N57[label=Sample];
N58[label=3];
N59[label=2];
N60[label=ToMatrix];
N61[label=Query];
N00 -> N01;
N01 -> N02;
N02 -> N46;
N02 -> N46;
N02 -> N52;
N02 -> N52;
N03 -> N04;
N03 -> N28;
N03 -> N38;
N04 -> N05;
N05 -> N48;
N05 -> N48;
N05 -> N54;
N05 -> N54;
N06 -> N07;
N06 -> N31;
N06 -> N41;
N07 -> N08;
N08 -> N50;
N08 -> N50;
N08 -> N56;
N08 -> N56;
N09 -> N10;
N10 -> N11;
N11 -> N27;
N12 -> N13;
N13 -> N14;
N14 -> N30;
N15 -> N16;
N16 -> N17;
N17 -> N33;
N18 -> N19;
N19 -> N20;
N20 -> N37;
N21 -> N22;
N22 -> N23;
N23 -> N40;
N24 -> N25;
N25 -> N26;
N26 -> N43;
N27 -> N28;
N28 -> N29;
N29 -> N46;
N30 -> N31;
N31 -> N32;
N32 -> N48;
N33 -> N35;
N34 -> N35;
N34 -> N44;
N35 -> N36;
N36 -> N50;
N37 -> N38;
N38 -> N39;
N39 -> N52;
N40 -> N41;
N41 -> N42;
N42 -> N54;
N43 -> N44;
N44 -> N45;
N45 -> N56;
N46 -> N47;
N47 -> N60;
N48 -> N49;
N49 -> N60;
N50 -> N51;
N51 -> N60;
N52 -> N53;
N53 -> N60;
N54 -> N55;
N55 -> N60;
N56 -> N57;
N57 -> N60;
N58 -> N60;
N59 -> N60;
N60 -> N61;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_6(self) -> None:
self.maxDiff = None
observations = {}
queries = [flip_beta_2_2(), flip_uniform_2_2()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite: notice that here torch automatically
# broadcast the 2.0 to [2.0, 2.0] for us when the node was accumulated,
# and similarly for 0.0.
expected = """
digraph "graph" {
N00[label="[2.0,2.0]"];
N01[label="[3.0,4.0]"];
N02[label=Beta];
N03[label=Sample];
N04[label=Bernoulli];
N05[label=Sample];
N06[label=Query];
N07[label="[0.0,0.0]"];
N08[label="[1.0,1.0]"];
N09[label=Uniform];
N10[label=Sample];
N11[label=Bernoulli];
N12[label=Sample];
N13[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N03 -> N04;
N04 -> N05;
N05 -> N06;
N07 -> N09;
N08 -> N09;
N09 -> N10;
N10 -> N11;
N11 -> N12;
N12 -> N13;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After: notice that we correctly generate two samples from a Flat distribution
# here.
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=3.0];
N02[label=Beta];
N03[label=Sample];
N04[label=4.0];
N05[label=Beta];
N06[label=Sample];
N07[label=Bernoulli];
N08[label=Sample];
N09[label=Bernoulli];
N10[label=Sample];
N11[label=2];
N12[label=1];
N13[label=ToMatrix];
N14[label=Query];
N15[label=Flat];
N16[label=Sample];
N17[label=Sample];
N18[label=Bernoulli];
N19[label=Sample];
N20[label=Bernoulli];
N21[label=Sample];
N22[label=ToMatrix];
N23[label=Query];
N00 -> N02;
N00 -> N05;
N01 -> N02;
N02 -> N03;
N03 -> N07;
N04 -> N05;
N05 -> N06;
N06 -> N09;
N07 -> N08;
N08 -> N13;
N09 -> N10;
N10 -> N13;
N11 -> N13;
N11 -> N22;
N12 -> N13;
N12 -> N22;
N13 -> N14;
N15 -> N16;
N15 -> N17;
N16 -> N18;
N17 -> N20;
N18 -> N19;
N19 -> N22;
N20 -> N21;
N21 -> N22;
N22 -> N23;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_7(self) -> None:
self.maxDiff = None
observations = {}
queries = [operators()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N00[label="[2.0,2.0]"];
N01[label="[3.0,4.0]"];
N02[label=Beta];
N03[label=Sample];
N04[label="[[5.0,6.0],\\\\n[7.0,8.0]]"];
N05[label="+"];
N06[label=10.0];
N07[label="*"];
N08[label=Exp];
N09[label=Phi];
N10[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N03 -> N05;
N04 -> N05;
N05 -> N07;
N06 -> N07;
N07 -> N08;
N08 -> N09;
N09 -> N10;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# Verify that it works in BMG.
g, _ = BMGInference().to_graph(queries, observations)
observed = g.to_dot()
expected = """
digraph "graph" {
N0[label="2"];
N1[label="3"];
N2[label="Beta"];
N3[label="~"];
N4[label="4"];
N5[label="Beta"];
N6[label="~"];
N7[label="10"];
N8[label="2"];
N9[label="1"];
N10[label="ToMatrix"];
N11[label="ToPosReal"];
N12[label="Broadcast"];
N13[label="matrix"];
N14[label="MatrixAdd"];
N15[label="MatrixScale"];
N16[label="MatrixExp"];
N17[label="ToReal"];
N18[label="MatrixPhi"];
N0 -> N2;
N0 -> N5;
N1 -> N2;
N2 -> N3;
N3 -> N10;
N4 -> N5;
N5 -> N6;
N6 -> N10;
N7 -> N15;
N8 -> N10;
N8 -> N12;
N8 -> N12;
N9 -> N10;
N10 -> N11;
N11 -> N12;
N12 -> N14;
N13 -> N14;
N14 -> N15;
N15 -> N16;
N16 -> N17;
N17 -> N18;
Q0[label="Query"];
N18 -> Q0;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_8(self) -> None:
self.maxDiff = None
observations = {}
queries = [multiplication()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N0[label="[2.0,2.0]"];
N1[label="[3.0,4.0]"];
N2[label=Beta];
N3[label=Sample];
N4[label="[5.0,6.0]"];
N5[label="*"];
N6[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N5;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=3.0];
N02[label=Beta];
N03[label=Sample];
N04[label=4.0];
N05[label=Beta];
N06[label=Sample];
N07[label=2];
N08[label=1];
N09[label=ToMatrix];
N10[label=ToPosRealMatrix];
N11[label="[5.0,6.0]"];
N12[label=ElementwiseMult];
N13[label=Query];
N00 -> N02;
N00 -> N05;
N01 -> N02;
N02 -> N03;
N03 -> N09;
N04 -> N05;
N05 -> N06;
N06 -> N09;
N07 -> N09;
N08 -> N09;
N09 -> N10;
N10 -> N12;
N11 -> N12;
N12 -> N13;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_9(self) -> None:
self.maxDiff = None
observations = {}
queries = [complement_with_log1p()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N0[label="[2.0,2.0]"];
N1[label="[3.0,4.0]"];
N2[label=Beta];
N3[label=Sample];
N4[label="-"];
N5[label=Log1p];
N6[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=3.0];
N02[label=Beta];
N03[label=Sample];
N04[label=4.0];
N05[label=Beta];
N06[label=Sample];
N07[label=2];
N08[label=1];
N09[label=complement];
N10[label=Log];
N11[label=complement];
N12[label=Log];
N13[label=ToMatrix];
N14[label=Query];
N00 -> N02;
N00 -> N05;
N01 -> N02;
N02 -> N03;
N03 -> N09;
N04 -> N05;
N05 -> N06;
N06 -> N11;
N07 -> N13;
N08 -> N13;
N09 -> N10;
N10 -> N13;
N11 -> N12;
N12 -> N13;
N13 -> N14;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_10(self) -> None:
self.maxDiff = None
queries = [sum_inverted_log_probs()]
observations = {}
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N00[label=1.0];
N01[label=3.0];
N02[label=Beta];
N03[label=Sample];
N04[label=2.0];
N05[label=4.0];
N06[label=Beta];
N07[label=Sample];
N08[label="[5.0,6.0]"];
N09[label=2];
N10[label=1];
N11[label=complement];
N12[label=Log];
N13[label=complement];
N14[label=Log];
N15[label=ToMatrix];
N16[label=ToRealMatrix];
N17[label=ElementwiseMult];
N18[label=MatrixSum];
N19[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N03 -> N11;
N04 -> N06;
N05 -> N06;
N06 -> N07;
N07 -> N13;
N08 -> N17;
N09 -> N15;
N10 -> N15;
N11 -> N12;
N12 -> N15;
N13 -> N14;
N14 -> N15;
N15 -> N16;
N16 -> N17;
N17 -> N18;
N18 -> N19;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_11(self) -> None:
self.maxDiff = None
queries = [normal_log_probs()]
observations = {}
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N00[label=1.0];
N01[label=Gamma];
N02[label=Sample];
N03[label=2];
N04[label=1];
N05[label=5.0];
N06[label=Normal];
N07[label=7.0];
N08[label=LogProb];
N09[label=6.0];
N10[label=Normal];
N11[label=8.0];
N12[label=LogProb];
N13[label=ToMatrix];
N14[label=Query];
N00 -> N01;
N00 -> N01;
N01 -> N02;
N02 -> N06;
N02 -> N10;
N03 -> N13;
N04 -> N13;
N05 -> N06;
N06 -> N08;
N07 -> N08;
N08 -> N13;
N09 -> N10;
N10 -> N12;
N11 -> N12;
N12 -> N13;
N13 -> N14;
}
"""
self.assertEqual(observed.strip(), expected.strip())
| beanmachine-main | tests/ppl/compiler/fix_vectorized_models_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for bm_graph_builder.py"""
import unittest
from typing import Any
import torch
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.gen_bmg_cpp import to_bmg_cpp
from beanmachine.ppl.compiler.gen_bmg_graph import to_bmg_graph
from beanmachine.ppl.compiler.gen_bmg_python import to_bmg_python
from beanmachine.ppl.compiler.gen_dot import to_dot
from beanmachine.ppl.compiler.runtime import BMGRuntime
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from torch import Tensor
def tidy(s: str) -> str:
return "\n".join(c.strip() for c in s.strip().split("\n")).strip()
def tensor_equality(x: Tensor, y: Tensor) -> bool:
# Tensor equality is weird. Suppose x and y are both
# tensor([1.0, 2.0]). Then x.eq(y) is tensor([True, True]),
# and x.eq(y).all() is tensor(True).
return bool(x.eq(y).all())
class BMGraphBuilderTest(unittest.TestCase):
def assertEqual(self, x: Any, y: Any) -> bool:
if isinstance(x, Tensor) and isinstance(y, Tensor):
return tensor_equality(x, y)
return super().assertEqual(x, y)
def test_graph_builder_1(self) -> None:
# Just a trivial model to test whether we can take a properly-typed
# accumulated graph and turn it into BMG, DOT, or a program that
# produces a BMG.
#
# @random_variable def flip(): return Bernoulli(0.5)
# @functional def mult(): return (-flip() + 2) * 2
bmg = BMGraphBuilder()
half = bmg.add_probability(0.5)
two = bmg.add_real(2)
flip = bmg.add_bernoulli(half)
samp = bmg.add_sample(flip)
real = bmg.add_to_real(samp)
neg = bmg.add_negate(real)
add = bmg.add_addition(two, neg)
mult = bmg.add_multiplication(two, add)
bmg.add_observation(samp, True)
bmg.add_query(mult, RVIdentifier(wrapper=lambda a, b: a, arguments=(1, 1)))
observed = to_dot(bmg, label_edges=False)
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label="Observation True"];
N4[label=2];
N5[label=ToReal];
N6[label="-"];
N7[label="+"];
N8[label="*"];
N9[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N2 -> N5;
N4 -> N7;
N4 -> N8;
N5 -> N6;
N6 -> N7;
N7 -> N8;
N8 -> N9;
}"""
self.maxDiff = None
self.assertEqual(expected.strip(), observed.strip())
g = to_bmg_graph(bmg).graph
observed = g.to_string()
expected = """
0: CONSTANT(probability 0.5) (out nodes: 1)
1: BERNOULLI(0) (out nodes: 2)
2: SAMPLE(1) (out nodes: 4) observed to be boolean 1
3: CONSTANT(real 2) (out nodes: 6, 7)
4: TO_REAL(2) (out nodes: 5)
5: NEGATE(4) (out nodes: 6)
6: ADD(3, 5) (out nodes: 7)
7: MULTIPLY(3, 6) (out nodes: ) queried
"""
self.assertEqual(tidy(expected), tidy(observed))
observed = to_bmg_python(bmg).code
expected = """
from beanmachine import graph
from torch import tensor
g = graph.Graph()
n0 = g.add_constant_probability(0.5)
n1 = g.add_distribution(
graph.DistributionType.BERNOULLI,
graph.AtomicType.BOOLEAN,
[n0],
)
n2 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
g.observe(n2, True)
n3 = g.add_constant_real(2.0)
n4 = g.add_operator(graph.OperatorType.TO_REAL, [n2])
n5 = g.add_operator(graph.OperatorType.NEGATE, [n4])
n6 = g.add_operator(graph.OperatorType.ADD, [n3, n5])
n7 = g.add_operator(graph.OperatorType.MULTIPLY, [n3, n6])
q0 = g.query(n7)
"""
self.assertEqual(expected.strip(), observed.strip())
observed = to_bmg_cpp(bmg).code
expected = """
graph::Graph g;
uint n0 = g.add_constant_probability(0.5);
uint n1 = g.add_distribution(
graph::DistributionType::BERNOULLI,
graph::AtomicType::BOOLEAN,
std::vector<uint>({n0}));
uint n2 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
g.observe(n2, true);
uint n3 = g.add_constant_real(2.0);
uint n4 = g.add_operator(
graph::OperatorType::TO_REAL, std::vector<uint>({n2}));
uint n5 = g.add_operator(
graph::OperatorType::NEGATE, std::vector<uint>({n4}));
uint n6 = g.add_operator(
graph::OperatorType::ADD, std::vector<uint>({n3, n5}));
uint n7 = g.add_operator(
graph::OperatorType::MULTIPLY, std::vector<uint>({n3, n6}));
uint q0 = g.query(n7);
"""
self.assertEqual(expected.strip(), observed.strip())
def test_graph_builder_2(self) -> None:
bmg = BMGraphBuilder()
one = bmg.add_pos_real(1)
two = bmg.add_pos_real(2)
# These should all be folded:
four = bmg.add_power(two, two)
fourth = bmg.add_division(one, four)
flip = bmg.add_bernoulli(fourth)
samp = bmg.add_sample(flip)
inv = bmg.add_complement(samp) # NOT operation
real = bmg.add_to_positive_real(inv)
div = bmg.add_division(real, two)
p = bmg.add_power(div, two)
lg = bmg.add_log(p)
bmg.add_query(lg, RVIdentifier(wrapper=lambda a, b: a, arguments=(1, 1)))
# Note that the orphan nodes "1" and "4" are not stripped out
# by default. If you want them gone, the "after_transform" flag does
# a type check and also removes everything that is not an ancestor
# of a query or observation.
observed = to_dot(bmg, label_edges=False)
expected = """
digraph "graph" {
N00[label=1];
N01[label=4];
N02[label=0.25];
N03[label=Bernoulli];
N04[label=Sample];
N05[label=complement];
N06[label=ToPosReal];
N07[label=2];
N08[label="/"];
N09[label="**"];
N10[label=Log];
N11[label=Query];
N02 -> N03;
N03 -> N04;
N04 -> N05;
N05 -> N06;
N06 -> N08;
N07 -> N08;
N07 -> N09;
N08 -> N09;
N09 -> N10;
N10 -> N11;
}
"""
self.maxDiff = None
self.assertEqual(expected.strip(), observed.strip())
g = to_bmg_graph(bmg).graph
observed = g.to_string()
# Here however the orphaned nodes are never added to the graph.
expected = """
0: CONSTANT(probability 0.25) (out nodes: 1)
1: BERNOULLI(0) (out nodes: 2)
2: SAMPLE(1) (out nodes: 3)
3: COMPLEMENT(2) (out nodes: 4)
4: TO_POS_REAL(3) (out nodes: 6)
5: CONSTANT(positive real 0.5) (out nodes: 6)
6: MULTIPLY(4, 5) (out nodes: 8)
7: CONSTANT(positive real 2) (out nodes: 8)
8: POW(6, 7) (out nodes: 9)
9: LOG(8) (out nodes: ) queried
"""
self.assertEqual(tidy(expected), tidy(observed))
def test_to_positive_real(self) -> None:
"""Test to_positive_real"""
bmg = BMGraphBuilder()
two = bmg.add_pos_real(2.0)
# to_positive_real on a positive real constant is an identity
self.assertEqual(bmg.add_to_positive_real(two), two)
beta22 = bmg.add_beta(two, two)
to_pr = bmg.add_to_positive_real(beta22)
# to_positive_real nodes are deduplicated
self.assertEqual(bmg.add_to_positive_real(beta22), to_pr)
def test_to_probability(self) -> None:
"""Test to_probability"""
bmg = BMGraphBuilder()
h = bmg.add_probability(0.5)
# to_probability on a prob constant is an identity
self.assertEqual(bmg.add_to_probability(h), h)
# We have (hc / (0.5 + hc)) which is always between
# 0 and 1, but the quotient of two positive reals
# is a positive real. Force it to be a probability.
hc = bmg.add_halfcauchy(h)
s = bmg.add_addition(hc, h)
q = bmg.add_division(hc, s)
to_p = bmg.add_to_probability(q)
# to_probability nodes are deduplicated
self.assertEqual(bmg.add_to_probability(q), to_p)
def test_if_then_else(self) -> None:
self.maxDiff = None
bmg = BMGraphBuilder()
p = bmg.add_constant(0.5)
z = bmg.add_constant(0.0)
o = bmg.add_constant(1.0)
b = bmg.add_bernoulli(p)
s = bmg.add_sample(b)
bmg.add_if_then_else(s, o, z)
observed = to_dot(bmg)
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=1.0];
N4[label=0.0];
N5[label=if];
N0 -> N1[label=probability];
N1 -> N2[label=operand];
N2 -> N5[label=condition];
N3 -> N5[label=consequence];
N4 -> N5[label=alternative];
}"""
self.assertEqual(expected.strip(), observed.strip())
def test_allowed_functions(self) -> None:
bmg = BMGRuntime()
p = bmg._bmg.add_constant(0.5)
b = bmg._bmg.add_bernoulli(p)
s = bmg._bmg.add_sample(b)
d = bmg.handle_function(dict, [[(1, s)]])
self.assertEqual(d, {1: s})
def test_add_tensor(self) -> None:
bmg = BMGraphBuilder()
p = bmg.add_constant(0.5)
b = bmg.add_bernoulli(p)
s = bmg.add_sample(b)
# Tensors are deduplicated
t1 = bmg.add_tensor(torch.Size([3]), s, s, p)
t2 = bmg.add_tensor(torch.Size([3]), *[s, s, p])
self.assertTrue(t1 is t2)
observed = to_dot(bmg)
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=Tensor];
N0 -> N1[label=probability];
N0 -> N3[label=2];
N1 -> N2[label=operand];
N2 -> N3[label=0];
N2 -> N3[label=1];
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_remove_leaf_from_builder(self) -> None:
bmg = BMGraphBuilder()
p = bmg.add_constant(0.5)
b = bmg.add_bernoulli(p)
s = bmg.add_sample(b)
o = bmg.add_observation(s, True)
observed = to_dot(bmg)
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label="Observation True"];
N0 -> N1[label=probability];
N1 -> N2[label=operand];
N2 -> N3[label=operand];
}
"""
self.assertEqual(observed.strip(), expected.strip())
with self.assertRaises(ValueError):
# Not a leaf
bmg.remove_leaf(s)
bmg.remove_leaf(o)
observed = to_dot(bmg)
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N0 -> N1[label=probability];
N1 -> N2[label=operand];
}
"""
self.assertEqual(observed.strip(), expected.strip())
# Is a leaf now.
bmg.remove_leaf(s)
observed = to_dot(bmg)
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N0 -> N1[label=probability];
}
"""
self.assertEqual(observed.strip(), expected.strip())
| beanmachine-main | tests/ppl/compiler/bm_graph_builder_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# LKJ Cholesky compiler tests
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference import BMGInference
from torch.distributions import HalfNormal, LKJCholesky
@bm.random_variable
def lkj_chol_1():
return LKJCholesky(3, 2.0)
@bm.random_variable
def lkj_chol_2():
# Distribution created in random variable, named argument
return LKJCholesky(concentration=2.0, dim=3)
@bm.random_variable
def half_normal():
return HalfNormal(1.0)
@bm.random_variable
def lkj_chol_3():
# Distribution parameterized by another rv
return LKJCholesky(3, half_normal())
@bm.random_variable
def bad_lkj_chol_1():
# LKJ Cholesky must have dimension at least 2
return LKJCholesky(1, half_normal())
@bm.random_variable
def bad_lkj_chol_2():
# LKJ Cholesky must have natural dimension
return LKJCholesky(3.5, half_normal())
@bm.random_variable
def bad_lkj_chol_3():
# LKJ Cholesky must have a positive concentration value
return LKJCholesky(3, -2.0)
class LKJCholeskyTest(unittest.TestCase):
expected_simple_case = """
digraph "graph" {
N0[label=3];
N1[label=2.0];
N2[label=LKJCholesky];
N3[label=Sample];
N4[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N4;
}
""".strip()
expected_random_parameter_case = """
digraph "graph" {
N0[label=1.0];
N1[label=HalfNormal];
N2[label=Sample];
N3[label=3];
N4[label=LKJCholesky];
N5[label=Sample];
N6[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N4;
N3 -> N4;
N4 -> N5;
N5 -> N6;
}
""".strip()
def test_lkj_chol_1(self) -> None:
observed = BMGInference().to_dot([lkj_chol_1()], {})
self.assertEqual(self.expected_simple_case, observed.strip())
def test_lkj_chol_2(self) -> None:
queries = [lkj_chol_2()]
observed = BMGInference().to_dot(queries, {})
self.assertEqual(self.expected_simple_case, observed.strip())
def test_lkj_chol_3(self) -> None:
queries = [lkj_chol_3()]
observed = BMGInference().to_dot(queries, {})
self.assertEqual(self.expected_random_parameter_case, observed.strip())
def test_bad_lkj_chol_1(self) -> None:
queries = [bad_lkj_chol_1()]
self.assertRaises(ValueError, lambda: BMGInference().infer(queries, {}, 1))
def test_bad_lkj_chol_2(self) -> None:
queries = [bad_lkj_chol_2()]
self.assertRaises(ValueError, lambda: BMGInference().infer(queries, {}, 1))
def test_bad_lkj_chol_3s(self) -> None:
queries = [bad_lkj_chol_3()]
self.assertRaises(ValueError, lambda: BMGInference().infer(queries, {}, 1))
| beanmachine-main | tests/ppl/compiler/lkj_cholesky_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for bm_to_bmg.py"""
import math
import unittest
import astor
import beanmachine.ppl as bm
from beanmachine.ppl.compiler.bm_to_bmg import (
_bm_function_to_bmg_ast,
_bm_function_to_bmg_function,
)
from beanmachine.ppl.compiler.bmg_nodes import ExpNode
from beanmachine.ppl.compiler.gen_dot import to_dot
from beanmachine.ppl.compiler.runtime import BMGRuntime
from torch import tensor
from torch.distributions import Bernoulli, Beta, Normal
def f(x):
return math.exp(x)
class C:
def m(self):
return
counter = 0
# Random variable that takes an argument
@bm.random_variable
def norm(n):
global counter
counter = counter + 1
return Normal(loc=0.0, scale=1.0)
# Random variable that takes no argument
@bm.random_variable
def coin():
return Beta(2.0, 2.0)
# Call to random variable inside random variable
@bm.random_variable
def flip():
return Bernoulli(coin())
# Functional that takes no argument
@bm.functional
def exp_coin():
return coin().exp()
# Functional that takes an ordinary value argument
@bm.functional
def exp_norm(n):
return norm(n).exp()
# Functional that takes an graph node argument
@bm.functional
def exp_coin_2(c):
return c.exp()
# Ordinary function
def add_one(x):
return 1 + x
# Functional that calls normal, functional, random variable functions
@bm.functional
def exp_coin_3():
return add_one(exp_coin_2(coin()))
@bm.random_variable
def coin_with_class():
C().m()
f = True
while f:
f = not f
return Beta(2.0, 2.0)
@bm.functional
def bad_functional_1():
# It's not legal to call a random variable function with
# a stochastic value that has infinite support.
return norm(coin())
@bm.random_variable
def flips(n):
return Bernoulli(0.5)
@bm.random_variable
def norm_ten(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9):
return Normal(loc=0.0, scale=1.0)
@bm.functional
def bad_functional_2():
# There are 1024 possibilities for this call; we give an
# error when the control flow is this complex.
return norm_ten(
flips(0),
flips(1),
flips(2),
flips(3),
flips(4),
flips(5),
flips(6),
flips(7),
flips(8),
flips(9),
)
@bm.functional
def bad_functional_3():
# Calling rv functions with named arguments is not allowed.
return flips(n=1)
@bm.functional
def bad_functional_4():
# Calling functionals with named arguments is not allowed.
return exp_coin_2(c=1)
@bm.random_variable
def beta(n):
return Beta(2.0, 2.0)
@bm.functional
def beta_tensor_1a():
# What happens if we have two uses of the same RV indexed
# with a tensor?
return beta(tensor(1)).log()
@bm.functional
def beta_tensor_1b():
return beta(tensor(1)).exp()
observable_side_effect = 0
def cause_side_effect():
global observable_side_effect
observable_side_effect = 1
return True
@bm.random_variable
def assertions_are_removed():
assert cause_side_effect()
return Bernoulli(0.5)
@bm.random_variable
def flip_with_comprehension():
_ = [0 for x in []]
return Bernoulli(0.5)
@bm.random_variable
def flip_with_nested_function():
def x():
return 0.5
x()
return Bernoulli(0.5)
# Verify that aliased decorator is allowed:
myrv = bm.random_variable
@myrv
def aliased_rv():
return Bernoulli(0.5)
# Verify that random variable constructed without explicit decorator is allowed:
def some_function():
return Bernoulli(0.5)
undecorated_rv = myrv(some_function)
# TODO: What if some_function is a lambda instead of a function definition?
# TODO: What if the function has outer variables?
class JITTest(unittest.TestCase):
def test_function_transformation_1(self) -> None:
"""Unit tests for JIT functions"""
self.maxDiff = None
# Verify code generation of lifted, nested form of
# functions f(x), norm(), above.
self.assertTrue(norm.is_random_variable)
# TODO: Stop using _bm_function_to_bmg_ast for testing.
bmgast = _bm_function_to_bmg_ast(f, "f_helper")
observed = astor.to_source(bmgast)
expected = """
def f_helper(bmg):
import operator
def f(x):
a2 = bmg.handle_dot_get(math, 'exp')
r3 = [x]
r4 = {}
r1 = bmg.handle_function(a2, r3, r4)
return r1
return f"""
self.assertEqual(observed.strip(), expected.strip())
bmgast = _bm_function_to_bmg_ast(norm().function, "norm_helper")
observed = astor.to_source(bmgast)
expected = """
def norm_helper(bmg):
import operator
def norm(n):
global counter
a1 = 1
counter = bmg.handle_function(operator.add, [counter, a1])
r4 = []
a9 = 0.0
a8 = dict(loc=a9)
a11 = 1.0
a10 = dict(scale=a11)
r7 = dict(**a8, **a10)
r2 = bmg.handle_function(Normal, r4, r7)
return r2
a3 = bmg.handle_dot_get(bm, 'random_variable')
r5 = [norm]
r6 = {}
norm = bmg.handle_function(a3, r5, r6)
return norm
"""
self.assertEqual(observed.strip(), expected.strip())
# * Obtain the lifted version of f.
# * Ask the graph builder to transform the rv associated
# with norm(0) to a sample node.
# * Invoke the lifted f and verify that we accumulate an
# exp(sample(normal(0, 1))) node into the graph.
bmg = BMGRuntime()
lifted_f = _bm_function_to_bmg_function(f, bmg)
norm_sample = bmg._rv_to_node(norm(0))
result = lifted_f(norm_sample)
self.assertTrue(isinstance(result, ExpNode))
dot = to_dot(bmg._bmg)
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=Exp];
N0 -> N2[label=mu];
N1 -> N2[label=sigma];
N2 -> N3[label=operand];
N3 -> N4[label=operand];
}
"""
self.assertEqual(dot.strip(), expected.strip())
# Verify that we've executed the body of the lifted
# norm(n) exactly once.
global counter
self.assertEqual(counter, 1)
# Turning an rv into a node should be idempotent;
# the second time, we do not increment the counter.
bmg._rv_to_node(norm(0))
self.assertEqual(counter, 1)
bmg._rv_to_node(norm(1))
self.assertEqual(counter, 2)
bmg._rv_to_node(norm(1))
self.assertEqual(counter, 2)
def test_function_transformation_2(self) -> None:
"""Unit tests for JIT functions"""
self.maxDiff = None
# We have flip() which calls Bernoulli(coin()). What should happen
# here is:
# * _rv_to_node jit-compiles flip() and executes the lifted version.
# * while executing the lifted flip() we encounter a call to
# coin(). We detect that coin is a random variable function,
# and call it.
# * We now have the RVIdentifier for coin() in hand, which we
# then jit-compile in turn, and execute the lifted version.
# * That completes the construction of the graph.
bmg = BMGRuntime()
bmg._rv_to_node(flip())
dot = to_dot(bmg._bmg)
expected = """
digraph "graph" {
N0[label=2.0];
N1[label=Beta];
N2[label=Sample];
N3[label=Bernoulli];
N4[label=Sample];
N0 -> N1[label=alpha];
N0 -> N1[label=beta];
N1 -> N2[label=operand];
N2 -> N3[label=probability];
N3 -> N4[label=operand];
}
"""
self.assertEqual(dot.strip(), expected.strip())
def test_function_transformation_3(self) -> None:
"""Unit tests for JIT functions"""
self.maxDiff = None
rt = BMGRuntime()
queries = [coin(), exp_coin()]
observations = {flip(): tensor(1.0)}
bmg = rt.accumulate_graph(queries, observations)
dot = to_dot(bmg)
expected = """
digraph "graph" {
N0[label=2.0];
N1[label=Beta];
N2[label=Sample];
N3[label=Bernoulli];
N4[label=Sample];
N5[label="Observation tensor(1.)"];
N6[label=Query];
N7[label=Exp];
N8[label=Query];
N0 -> N1[label=alpha];
N0 -> N1[label=beta];
N1 -> N2[label=operand];
N2 -> N3[label=probability];
N2 -> N6[label=operator];
N2 -> N7[label=operand];
N3 -> N4[label=operand];
N4 -> N5[label=operand];
N7 -> N8[label=operator];
}
"""
self.assertEqual(dot.strip(), expected.strip())
def test_function_transformation_4(self) -> None:
"""Unit tests for JIT functions"""
self.maxDiff = None
rt = BMGRuntime()
queries = [exp_norm(0)]
observations = {}
bmg = rt.accumulate_graph(queries, observations)
dot = to_dot(bmg)
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=Exp];
N5[label=Query];
N0 -> N2[label=mu];
N1 -> N2[label=sigma];
N2 -> N3[label=operand];
N3 -> N4[label=operand];
N4 -> N5[label=operator];
}
"""
self.assertEqual(dot.strip(), expected.strip())
def test_function_transformation_5(self) -> None:
"""Unit tests for JIT functions"""
self.maxDiff = None
rt = BMGRuntime()
queries = [exp_coin_3()]
observations = {}
bmg = rt.accumulate_graph(queries, observations)
dot = to_dot(bmg)
# Note that though functional exp_coin_3 calls functional exp_coin_2,
# we only get one query node emitted into the graph because the
# caller only asked for one query node.
expected = """
digraph "graph" {
N0[label=1];
N1[label=2.0];
N2[label=Beta];
N3[label=Sample];
N4[label=Exp];
N5[label="+"];
N6[label=Query];
N0 -> N5[label=left];
N1 -> N2[label=alpha];
N1 -> N2[label=beta];
N2 -> N3[label=operand];
N3 -> N4[label=operand];
N4 -> N5[label=right];
N5 -> N6[label=operator];
}
"""
self.assertEqual(expected.strip(), dot.strip())
def test_function_transformation_6(self) -> None:
"""Unit tests for JIT functions"""
# This test regresses some crashing bugs. The compiler crashed if an
# RV function contained:
#
# * a class constructor
# * a call to a class method
# * a while loop
self.maxDiff = None
rt = BMGRuntime()
queries = [coin_with_class()]
observations = {}
bmg = rt.accumulate_graph(queries, observations)
dot = to_dot(bmg)
expected = """
digraph "graph" {
N0[label=2.0];
N1[label=Beta];
N2[label=Sample];
N3[label=Query];
N0 -> N1[label=alpha];
N0 -> N1[label=beta];
N1 -> N2[label=operand];
N2 -> N3[label=operator];
}
"""
self.assertEqual(dot.strip(), expected.strip())
# TODO: Also test lambdas and nested functions.
# TODO: What should we do about closures?
def test_bad_control_flow_1(self) -> None:
"""Unit tests for JIT functions"""
self.maxDiff = None
bmg = BMGRuntime()
queries = [bad_functional_1()]
observations = {}
# TODO: Better exception class
with self.assertRaises(ValueError) as ex:
bmg.accumulate_graph(queries, observations)
self.assertEqual(
str(ex.exception), "Stochastic control flow must have finite support."
)
def test_bad_control_flow_2(self) -> None:
"""Unit tests for JIT functions"""
self.maxDiff = None
bmg = BMGRuntime()
queries = [bad_functional_2()]
observations = {}
# TODO: Better exception class
with self.assertRaises(ValueError) as ex:
bmg.accumulate_graph(queries, observations)
self.assertEqual(str(ex.exception), "Stochastic control flow is too complex.")
def test_bad_control_flow_3(self) -> None:
"""Unit tests for JIT functions"""
self.maxDiff = None
bmg = BMGRuntime()
queries = [bad_functional_3()]
observations = {}
# TODO: Better exception class
with self.assertRaises(ValueError) as ex:
bmg.accumulate_graph(queries, observations)
self.assertEqual(
str(ex.exception),
"Random variable function calls must not have named arguments.",
)
def test_bad_control_flow_4(self) -> None:
"""Unit tests for JIT functions"""
self.maxDiff = None
bmg = BMGRuntime()
queries = [bad_functional_4()]
observations = {}
# TODO: Better exception class
with self.assertRaises(ValueError) as ex:
bmg.accumulate_graph(queries, observations)
self.assertEqual(
str(ex.exception),
"Functional calls must not have named arguments.",
)
def test_rv_identity(self) -> None:
self.maxDiff = None
# This test demonstrates an invariant which we must maintain as we modify
# the implementation details of the jitter: two calls to the same RV with
# the same arguments must produce the same sample node. Here the two calls
# to beta(tensor(1)) must both produce the same sample node, not two samples.
#
# TODO:
#
# Right now this invariant is maintained by the @memoize modifier that is
# automatically generated on a lifted rv function, but that mechanism
# is redundant to the rv_map inside the graph builder, so we will eventually
# remove it. When we do so, we'll need to ensure that one of the following
# happens:
#
# * We add a hash function to RVIdentifier that treats identical-content tensors
# as the same argument
# * We build a special-purpose map for tracking RVID -> Sample node mappings.
# * We restrict arguments to rv functions to be hashable (and canonicalize tensor
# arguments to single values.)
# * Or some other similar mechanism for maintaining this invariant.
rt = BMGRuntime()
queries = [beta_tensor_1a(), beta_tensor_1b()]
observations = {}
bmg = rt.accumulate_graph(queries, observations)
observed = to_dot(bmg)
expected = """
digraph "graph" {
N0[label=2.0];
N1[label=Beta];
N2[label=Sample];
N3[label=Log];
N4[label=Query];
N5[label=Exp];
N6[label=Query];
N0 -> N1[label=alpha];
N0 -> N1[label=beta];
N1 -> N2[label=operand];
N2 -> N3[label=operand];
N2 -> N5[label=operand];
N3 -> N4[label=operator];
N5 -> N6[label=operator];
}"""
self.assertEqual(expected.strip(), observed.strip())
def test_assertions_are_removed(self) -> None:
# The lifted form of a function removes all assertion statements.
# We can demonstrate this by making an assertion that causes an
# observable effect, and then showing that the effect does not
# happen when the lifted form is executed.
global observable_side_effect
self.maxDiff = None
self.assertEqual(observable_side_effect, 0)
# In non-lifted code, the assertion causes a side effect.
assert cause_side_effect()
self.assertEqual(observable_side_effect, 1)
observable_side_effect = 0
bmg = BMGRuntime()
bmg.accumulate_graph([assertions_are_removed()], {})
# The side effect is not caused.
self.assertEqual(observable_side_effect, 0)
def test_nested_functions_and_comprehensions(self) -> None:
self.maxDiff = None
# We had a bug where a nested function or comprehension inside a
# random_variable would crash while accumulating the graph;
# this test regresses that bug by simply verifying that we do
# not crash in those scenarios now.
bmg = BMGRuntime()
bmg.accumulate_graph([flip_with_nested_function()], {})
bmg = BMGRuntime()
bmg.accumulate_graph([flip_with_comprehension()], {})
def test_aliased_rv(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
queries = [aliased_rv()]
observations = {}
bmg = rt.accumulate_graph(queries, observations)
observed = to_dot(bmg)
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=Query];
N0 -> N1[label=probability];
N1 -> N2[label=operand];
N2 -> N3[label=operator];
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_undecorated_rv(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
queries = [undecorated_rv()]
observations = {}
bmg = rt.accumulate_graph(queries, observations)
observed = to_dot(bmg)
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=Query];
N0 -> N1[label=probability];
N1 -> N2[label=operand];
N2 -> N3[label=operator];
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_nested_rv(self) -> None:
self.maxDiff = None
# A random variable that is nested inside another function and closed over
# an outer local variable works:
prob = 0.5
@bm.random_variable
def nested_flip():
return Bernoulli(prob)
observed = to_dot(BMGRuntime().accumulate_graph([nested_flip()], {}))
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=Query];
N0 -> N1[label=probability];
N1 -> N2[label=operand];
N2 -> N3[label=operator];
}
"""
self.assertEqual(expected.strip(), observed.strip())
# Check that we can close over an outer variable with a lambda also.
mean = 0.0
sigma = 1.0
shift = 2.0
# Lambda random variable:
lambda_norm = bm.random_variable(lambda: Normal(mean, sigma))
# Lambda that is not a functional, not declared inside a functional, but called
# from inside a functional:
lambda_mult = lambda x, y: x * y # noqa
# Lambda functional:
lambda_sum = bm.functional(lambda: lambda_mult(lambda_norm() + shift, 4.0))
observed = to_dot(BMGRuntime().accumulate_graph([lambda_sum()], {}))
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=2.0];
N5[label="+"];
N6[label=4.0];
N7[label="*"];
N8[label=Query];
N0 -> N2[label=mu];
N1 -> N2[label=sigma];
N2 -> N3[label=operand];
N3 -> N5[label=left];
N4 -> N5[label=right];
N5 -> N7[label=left];
N6 -> N7[label=right];
N7 -> N8[label=operator];
}
"""
self.assertEqual(expected.strip(), observed.strip())
# What if we have a nested function inside a random variable?
@bm.random_variable
def norm1():
return Normal(0.0, 1.0)
@bm.random_variable
def norm2():
def mult(x, y):
return x * y
return Normal(mult(norm1(), 2.0), 3.0)
observed = to_dot(BMGRuntime().accumulate_graph([norm2()], {}))
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=2.0];
N5[label="*"];
N6[label=3.0];
N7[label=Normal];
N8[label=Sample];
N9[label=Query];
N0 -> N2[label=mu];
N1 -> N2[label=sigma];
N2 -> N3[label=operand];
N3 -> N5[label=left];
N4 -> N5[label=right];
N5 -> N7[label=mu];
N6 -> N7[label=sigma];
N7 -> N8[label=operand];
N8 -> N9[label=operator];
}
"""
self.assertEqual(expected.strip(), observed.strip())
# What if we have a random variable nested inside another?
@bm.random_variable
def norm3():
@bm.random_variable
def norm4():
return Normal(0.0, 1.0)
return Normal(norm4() * 5.0, 6.0)
observed = to_dot(BMGRuntime().accumulate_graph([norm3()], {}))
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=5.0];
N5[label="*"];
N6[label=6.0];
N7[label=Normal];
N8[label=Sample];
N9[label=Query];
N0 -> N2[label=mu];
N1 -> N2[label=sigma];
N2 -> N3[label=operand];
N3 -> N5[label=left];
N4 -> N5[label=right];
N5 -> N7[label=mu];
N6 -> N7[label=sigma];
N7 -> N8[label=operand];
N8 -> N9[label=operator];
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/jit_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Compilation test of Todd's Bayesian Multiple Testing model"""
import unittest
import beanmachine.ppl as bm
import torch.distributions as dist
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
@bm.random_variable
def theta():
return dist.Beta(2, 5)
@bm.random_variable
def sigma():
return dist.HalfCauchy(5)
@bm.random_variable
def tau():
return dist.HalfCauchy(5)
@bm.random_variable
def z(i):
return dist.Bernoulli(theta())
@bm.random_variable
def mu(i):
return dist.Normal(0, tau())
@bm.random_variable
def x(i):
return dist.Normal(mu(i) * z(i), sigma())
class BMTModelTest(unittest.TestCase):
def test_bmt_to_dot(self) -> None:
self.maxDiff = None
x_obs = [3.0, -0.75, 2.0, -0.3]
n_obs = len(x_obs)
queries = (
[theta(), sigma(), tau()]
+ [z(i) for i in range(n_obs)]
+ [mu(i) for i in range(n_obs)]
)
observations = {x(i): tensor(v) for i, v in enumerate(x_obs)}
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N00[label=5.0];
N01[label=HalfCauchy];
N02[label=Sample];
N03[label=0.0];
N04[label=Normal];
N05[label=Sample];
N06[label=2.0];
N07[label=Beta];
N08[label=Sample];
N09[label=Bernoulli];
N10[label=Sample];
N11[label=Sample];
N12[label=if];
N13[label=Normal];
N14[label=Sample];
N15[label="Observation 3.0"];
N16[label=Sample];
N17[label=Sample];
N18[label=if];
N19[label=Normal];
N20[label=Sample];
N21[label="Observation -0.75"];
N22[label=Sample];
N23[label=Sample];
N24[label=if];
N25[label=Normal];
N26[label=Sample];
N27[label="Observation 2.0"];
N28[label=Sample];
N29[label=Sample];
N30[label=if];
N31[label=Normal];
N32[label=Sample];
N33[label="Observation -0.30000001192092896"];
N34[label=Query];
N35[label=Query];
N36[label=Query];
N37[label=Query];
N38[label=Query];
N39[label=Query];
N40[label=Query];
N41[label=Query];
N42[label=Query];
N43[label=Query];
N44[label=Query];
N00 -> N01;
N00 -> N07;
N01 -> N02;
N01 -> N11;
N02 -> N04;
N02 -> N36;
N03 -> N04;
N03 -> N12;
N03 -> N18;
N03 -> N24;
N03 -> N30;
N04 -> N05;
N04 -> N16;
N04 -> N22;
N04 -> N28;
N05 -> N12;
N05 -> N41;
N06 -> N07;
N07 -> N08;
N08 -> N09;
N08 -> N34;
N09 -> N10;
N09 -> N17;
N09 -> N23;
N09 -> N29;
N10 -> N12;
N10 -> N37;
N11 -> N13;
N11 -> N19;
N11 -> N25;
N11 -> N31;
N11 -> N35;
N12 -> N13;
N13 -> N14;
N14 -> N15;
N16 -> N18;
N16 -> N42;
N17 -> N18;
N17 -> N38;
N18 -> N19;
N19 -> N20;
N20 -> N21;
N22 -> N24;
N22 -> N43;
N23 -> N24;
N23 -> N39;
N24 -> N25;
N25 -> N26;
N26 -> N27;
N28 -> N30;
N28 -> N44;
N29 -> N30;
N29 -> N40;
N30 -> N31;
N31 -> N32;
N32 -> N33;
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/bmt_model_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from beanmachine.ppl.compiler.broadcaster import broadcast_fnc
from torch import Size
class BroadcastTest(unittest.TestCase):
def test_broadcast_success(self) -> None:
input_sizes = [Size([3]), Size([3, 2, 1]), Size([1, 2, 1]), Size([2, 3])]
expectations = [
[0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2],
[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5],
]
target_size = Size([3, 2, 3])
i = 0
for input_size in input_sizes:
broadcaster = broadcast_fnc(input_size, target_size)
expectation = expectations[i]
i = i + 1
for j in range(0, 18):
input_index = broadcaster(j)
expected = expectation[j]
self.assertEqual(input_index, expected)
| beanmachine-main | tests/ppl/compiler/broadcaster_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for patterns.py"""
import ast
import re
import unittest
from beanmachine.ppl.compiler.ast_patterns import (
ast_str,
binop,
compare,
expr,
module,
name_constant,
num,
)
from beanmachine.ppl.compiler.patterns import (
HeadTail,
ListAll,
ListAny,
match,
negate,
twoPlusList,
)
def tidy(s: str) -> str:
return re.sub(" +", " ", s.replace("\n", " ")).strip()
class PatternsTest(unittest.TestCase):
def test_twoPlus(self) -> None:
"""Test the twoPlusList pattern"""
self.assertTrue(match(twoPlusList, [1, 2, 3]).is_success)
self.assertTrue(match(twoPlusList, [1, 2]).is_fail)
self.assertTrue(match(twoPlusList, [1]).is_fail)
self.assertTrue(match(twoPlusList, []).is_fail)
def test_atomic(self) -> None:
"""Test atomic patterns"""
p = binop(
left=binop(left=num(n=0), right=name_constant(value=True)), right=num(n=1.5)
)
observed = str(p)
expected = """
(isinstance(test, BinOp) and
(isinstance(test.left, BinOp) and
(isinstance(test.left.left, Num) and test.left.left.n==0) and
(isinstance(test.left.right, NameConstant) and test.left.right.value==True)) and
(isinstance(test.right, Num) and test.right.n==1.5))
"""
self.maxDiff = None
self.assertEqual(tidy(observed), tidy(expected))
result = p(ast.parse("0 * True + 1.5").body[0].value)
self.assertTrue(result)
# This one fails because it is binop(0, binop(true, 1.5)), and the
# pattern is looking for binop(binop(0, true), 1.5)
result = p(ast.parse("0 + True * 1.5").body[0].value)
self.assertFalse(result)
def test_negate(self) -> None:
"""Test negate"""
p = negate(ast_str(s="abc"))
result = p(ast.parse("'abc'").body[0].value)
self.assertTrue(result.is_fail())
result = p(ast.parse("1+2").body[0].value)
self.assertTrue(result.is_success())
def test_list_patterns(self) -> None:
"""Tests for list patterns"""
p = module(body=[])
observed = str(p)
expected = """(isinstance(test, Module) and test.body==[])"""
self.maxDiff = None
self.assertEqual(tidy(observed), tidy(expected))
result = p(ast.parse(""))
self.assertTrue(result.is_success())
result = p(ast.parse("1 + 2"))
self.assertTrue(result.is_fail())
p = module(body=[expr(value=binop()), expr(value=binop())])
observed = str(p)
expected = """
(isinstance(test, Module) and
[(isinstance(test.body[0], Expr) and isinstance(test.body[0].value, BinOp)),
(isinstance(test.body[1], Expr) and isinstance(test.body[1].value, BinOp))])
"""
self.assertEqual(tidy(observed), tidy(expected))
result = p(ast.parse("1 + 2"))
self.assertTrue(result.is_fail())
result = p(ast.parse("1 + 2; 3 * 4"))
self.assertTrue(result.is_success())
p = module(ListAny(expr(compare())))
observed = str(p)
expected = """
(isinstance(test, Module) and
test.body.any(x:(isinstance(x, Expr) and isinstance(x.value, Compare))))
"""
self.assertEqual(tidy(observed), tidy(expected))
result = p(ast.parse("1 + 2; x is None"))
self.assertTrue(result.is_success())
result = p(ast.parse("1 + 2; 3 * 4"))
self.assertTrue(result.is_fail())
p = module(ListAll(expr(compare())))
observed = str(p)
expected = """
(isinstance(test, Module) and
test.body.all(x:(isinstance(x, Expr) and isinstance(x.value, Compare))))
"""
self.assertEqual(tidy(observed), tidy(expected))
result = p(ast.parse("1 + 2; x is None"))
self.assertTrue(result.is_fail())
result = p(ast.parse("x is None; y is None"))
self.assertTrue(result.is_success())
# This pattern says that the body is a list where the head
# is a binop statement and the tail is empty; that is, there
# is only one item in the list. We could match any list pattern
# against the tail.
p = module(HeadTail(expr(binop()), []))
result = p(ast.parse("1 + 2; x is None"))
self.assertTrue(result.is_fail())
result = p(ast.parse("x is None"))
self.assertTrue(result.is_fail())
result = p(ast.parse("1 + 2"))
self.assertTrue(result.is_success())
| beanmachine-main | tests/ppl/compiler/patterns_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
from beanmachine.ppl.inference import BMGInference
from torch import exp, log, logsumexp
from torch.distributions import Normal
@bm.random_variable
def norm(x):
return Normal(0.0, 1.0)
@bm.functional
def log_1():
return log(exp(norm(0)) + exp(norm(1)) + exp(norm(2)))
@bm.functional
def logsumexp_1():
return logsumexp(torch.tensor([norm(0), norm(1), norm(2)]), 0)
@bm.functional
def log_2():
return log_1()
@bm.functional
def log_3():
return logsumexp_1()
@bm.functional
def exp_1():
return exp(norm(3)) + exp(norm(4))
@bm.functional
def log_4():
return log(exp(norm(0)) + exp(norm(1)) + exp(norm(2)) + exp_1())
@bm.functional
def log_5():
return log_4()
@bm.functional
def log_6():
return log_4() + exp_1()
class FixLogSumExpTest(unittest.TestCase):
def test_fix_log_sum_exp_1(self) -> None:
self.maxDiff = None
observations = {}
queries_observed = [log_2()]
graph_observed = BMGInference().to_dot(queries_observed, observations)
queries_expected = [log_3()]
graph_expected = BMGInference().to_dot(queries_expected, observations)
self.assertEqual(graph_observed.strip(), graph_expected.strip())
def test_fix_log_sum_exp_2(self) -> None:
self.maxDiff = None
observations = {}
queries_observed = [log_5()]
graph_observed = BMGInference().to_dot(queries_observed, observations)
graph_expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=Sample];
N5[label=Sample];
N6[label=Sample];
N7[label=Sample];
N8[label=LogSumExp];
N9[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N2 -> N4;
N2 -> N5;
N2 -> N6;
N2 -> N7;
N3 -> N8;
N4 -> N8;
N5 -> N8;
N6 -> N8;
N7 -> N8;
N8 -> N9;
}
"""
self.assertEqual(graph_observed.strip(), graph_expected.strip())
def test_fix_log_sum_exp_3(self) -> None:
self.maxDiff = None
observations = {}
queries_observed = [log_6()]
graph_observed = BMGInference().to_dot(queries_observed, observations)
graph_expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label=Sample];
N06[label=Sample];
N07[label=Sample];
N08[label=Exp];
N09[label=Exp];
N10[label=Exp];
N11[label=Exp];
N12[label=Exp];
N13[label="+"];
N14[label="+"];
N15[label=Log];
N16[label=ToReal];
N17[label="+"];
N18[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N02 -> N04;
N02 -> N05;
N02 -> N06;
N02 -> N07;
N03 -> N08;
N04 -> N09;
N05 -> N10;
N06 -> N11;
N07 -> N12;
N08 -> N14;
N09 -> N14;
N10 -> N14;
N11 -> N13;
N12 -> N13;
N13 -> N14;
N13 -> N16;
N14 -> N15;
N15 -> N17;
N16 -> N17;
N17 -> N18;
}
"""
self.assertEqual(graph_observed.strip(), graph_expected.strip())
| beanmachine-main | tests/ppl/compiler/fix_logsumexp_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
from beanmachine.ppl.inference import BMGInference
from torch.distributions import Beta
@bm.random_variable
def beta(n):
return Beta(2, 2)
@bm.functional
def logaddexp_1():
return torch.logaddexp(beta(0), beta(1))
class FixLogAddExpTest(unittest.TestCase):
def test_logaddexp_1(self) -> None:
queries = [logaddexp_1()]
graph_observed = BMGInference().to_dot(queries, {}, after_transform=False)
graph_expected = """
digraph "graph" {
N0[label=2.0];
N1[label=Beta];
N2[label=Sample];
N3[label=Sample];
N4[label=LogAddExp];
N5[label=Query];
N0 -> N1;
N0 -> N1;
N1 -> N2;
N1 -> N3;
N2 -> N4;
N3 -> N4;
N4 -> N5;
}
"""
self.assertEqual(graph_observed.strip(), graph_expected.strip())
def test_logaddexp_2(self) -> None:
queries = [logaddexp_1()]
graph_observed = BMGInference().to_dot(queries, {})
graph_expected = """
digraph "graph" {
N0[label=2.0];
N1[label=Beta];
N2[label=Sample];
N3[label=Sample];
N4[label=ToReal];
N5[label=ToReal];
N6[label=LogSumExp];
N7[label=Query];
N0 -> N1;
N0 -> N1;
N1 -> N2;
N1 -> N3;
N2 -> N4;
N3 -> N5;
N4 -> N6;
N5 -> N6;
N6 -> N7;
}
"""
self.assertEqual(graph_observed.strip(), graph_expected.strip())
| beanmachine-main | tests/ppl/compiler/fix_logaddexp_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""End-to-end test for tutorial on Robust Linear Regression"""
# This file is a manual replica of the Bento tutorial with the same name
### TODO: This tutorial has a couple of different calls to inference, and currently only the
### first call is being considered. It would be good to go through the other parts as well
import logging
import unittest
# TODO: Check imports for conistency
import beanmachine.ppl as bm
import torch # from torch import manual_seed, tensor
import torch.distributions as dist # from torch.distributions import Bernoulli, Normal, Uniform
from beanmachine.ppl.inference.bmg_inference import BMGInference
from sklearn import model_selection
from torch import tensor
# This makes the results deterministic and reproducible.
logging.getLogger("beanmachine").setLevel(50)
torch.manual_seed(12)
# Model
@bm.random_variable
def beta():
"""
Regression Coefficient
"""
return dist.Normal(0, 1000)
@bm.random_variable
def alpha():
"""
Regression Bias/Offset
"""
return dist.Normal(0, 1000)
@bm.random_variable
def sigma_regressor():
"""
Deviation parameter for Student's T
Controls the magnitude of the errors.
"""
return dist.HalfNormal(1000)
@bm.random_variable
def df_nu():
"""
Degrees of Freedom of a Student's T
Check https://en.wikipedia.org/wiki/Student%27s_t-distribution for effect
"""
return dist.Gamma(2, 0.1)
@bm.random_variable
def y_robust(X):
"""
Heavy-Tailed Noise model for regression utilizing StudentT
Student's T : https://en.wikipedia.org/wiki/Student%27s_t-distribution
"""
return dist.StudentT(df=df_nu(), loc=beta() * X + alpha(), scale=sigma_regressor())
# Creating sample data
sigma_data = torch.tensor([20, 40])
rho = -0.95
N = 200
cov = torch.tensor(
[
[torch.pow(sigma_data[0], 2), sigma_data[0] * sigma_data[1] * rho],
[sigma_data[0] * sigma_data[1] * rho, torch.pow(sigma_data[1], 2)],
]
)
dist_clean = dist.MultivariateNormal(loc=torch.zeros(2), covariance_matrix=cov)
points = tensor([dist_clean.sample().tolist() for i in range(N)]).view(N, 2)
X = X_clean = points[:, 0]
Y = Y_clean = points[:, 1]
true_beta_1 = 2.0
true_beta_0 = 5.0
true_epsilon = 1.0
points_noisy = points
points_noisy[0, :] = torch.tensor([-20, -80])
points_noisy[1, :] = torch.tensor([20, 100])
points_noisy[2, :] = torch.tensor([40, 40])
X_corr = points_noisy[:, 0]
Y_corr = points_noisy[:, 1]
X_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, Y)
# Inference parameters
num_samples = (
2 ###000 - Sample size reduced since it should not affect compilation issues
)
num_chains = 4
observations = {y_robust(X_train): Y_train}
queries = [beta(), alpha(), sigma_regressor(), df_nu()]
### The following is old code
class tutorialRobustLinearRegresionTest(unittest.TestCase):
def test_tutorial_Robust_Linear_Regression(self) -> None:
"""Check BM and BMG inference both terminate"""
self.maxDiff = None
# Inference with BM
# Note: No explicit seed here (in original tutorial model). Should we add one?
amh = bm.SingleSiteAncestralMetropolisHastings() # Added local binding
_ = amh.infer(
queries=queries,
observations=observations,
num_samples=num_samples,
num_chains=num_chains,
)
self.assertTrue(True, msg="We just want to check this point is reached")
def test_tutorial_Robust_Linear_Regression_to_dot_cpp_python(
self,
) -> None:
self.maxDiff = None
## Intermediate forms too large w devectorization
# observed = BMGInference().to_dot(queries, observations)
# expected = """
# """
# self.assertEqual(expected.strip(), observed.strip())
#
# observed = BMGInference().to_cpp(queries, observations)
# expected = """"""
# self.assertEqual(expected.strip(), observed.strip())
#
# observed = BMGInference().to_python(queries, observations)
# expected = """"""
# self.assertEqual(expected.strip(), observed.strip())
_ = BMGInference().infer(
queries=queries, observations=observations, num_samples=num_samples
)
| beanmachine-main | tests/ppl/compiler/tutorial_Robust_Linear_Regression_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# BM -> BMG compiler arithmetic tests
import math
import operator
import unittest
import beanmachine.ppl as bm
import numpy as np
import torch
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch.distributions import Bernoulli, Beta, Binomial, HalfCauchy, Normal
@bm.random_variable
def bern():
return Bernoulli(0.5)
@bm.random_variable
def beta():
return Beta(2.0, 2.0)
@bm.random_variable
def norm():
return Normal(0.0, 1.0)
@bm.random_variable
def hc():
return HalfCauchy(1.0)
@bm.functional
def expm1_prob():
return beta().expm1()
@bm.functional
def expm1_real():
return torch.expm1(norm())
@bm.functional
def expm1_negreal():
return torch.Tensor.expm1(-hc())
@bm.functional
def logistic_prob():
return beta().sigmoid()
@bm.functional
def logistic_real():
return torch.sigmoid(norm())
@bm.functional
def logistic_negreal():
return torch.Tensor.sigmoid(-hc())
@bm.random_variable
def ordinary_arithmetic(n):
return Bernoulli(
probs=torch.tensor(0.5) + torch.log(torch.exp(n * torch.tensor(0.1)))
)
@bm.random_variable
def stochastic_arithmetic():
s = 0.0
# Verify that mutating += works on lists normally:
items = [0]
items += [1]
# Verify that +=, *=, -= all work on graph nodes:
for n in items:
p = torch.log(torch.tensor(0.01))
p *= ordinary_arithmetic(n)
s += p
m = 1
m -= torch.exp(input=torch.log(torch.tensor(0.99)) + s)
return Bernoulli(m)
@bm.functional
def mutating_assignments():
# Torch supports mutating tensors in-place, which allows for
# aliasing. THE COMPILER DOES NOT CORRECTLY DETECT ALIASING
# WHEN A STOCHASTIC QUANTITY IS INVOLVED!
x = torch.tensor(1.0)
y = x # y is an alias for x
y += 2.0 # y is now 3, and so is x
y = y + 4.0 # y is now 7, but x is still 3
# So far we're all fine; every mutated tensor has been non-stochastic.
b = beta() * x + y # b is beta_sample * 3 + 7
# Now let's see how things go wrong. We'll alias stochastic quantity b:
c = b
c *= 5.0
# In Python Bean Machine, c and b are now both (beta() * 3 + 7) * 5
# but the compiler does not detect that c and b are aliases, and does
# not represent tensor mutations in graph nodes. The compiler thinks
# that c is (beta() * 3 + 7) * 5 but b is still (beta() * 3 + 7):
return b
@bm.random_variable
def neg_of_neg():
return Normal(-torch.neg(norm()), 1.0)
@bm.functional
def subtractions():
# Show that we can handle a bunch of different ways to subtract things
# Show that unary plus operations are discarded.
n = +norm()
b = +beta()
h = +hc()
return +torch.sub(+n.sub(+b), +b - h)
@bm.random_variable
def bino():
return Binomial(total_count=3, probs=0.5)
@bm.functional
def unsupported_add():
# What happens if we use a stochastic quantity in an operation with
# a non-tensor, non-number?
return bino() + "foo"
@bm.functional
def log_1():
# Ordinary constant, math.log. Note that a functional is
# required to return a tensor. Verify that ordinary
# arithmetic still works in a model.
return torch.tensor(math.log(1.0))
@bm.functional
def log_2():
# Tensor constant, math.log; this is legal.
# A multi-valued tensor would be an error.
return torch.tensor(math.log(torch.tensor(2.0)))
@bm.functional
def log_3():
# Tensor constant, Tensor.log.
# An ordinary constant would be an error.
return torch.Tensor.log(torch.tensor(3.0))
@bm.functional
def log_4():
# Tensor constant, instance log
return torch.tensor([4.0, 4.0]).log()
@bm.functional
def log_5():
# Stochastic value, math.log
return torch.tensor(math.log(beta() + 5.0))
@bm.functional
def log_6():
# Stochastic value, Tensor.log
return torch.Tensor.log(beta() + 6.0)
@bm.functional
def log_7():
# Stochastic value, instance log
return (beta() + 7.0).log()
@bm.functional
def log10_1():
# Tensor constant, torch.log10.
return torch.log10(torch.tensor(10.0))
@bm.functional
def log10_2():
# Stochastic tensor, torch.log10.
return torch.log10(beta() + 2.0)
@bm.functional
def log10_3():
# Tensor constant, Tensor.log10.
return torch.Tensor.log10(torch.tensor(1000.0))
@bm.functional
def log10_4():
# Tensor constant, instance log10
return torch.tensor(10000.0).log10()
@bm.functional
def log10_5():
# Stochastic value, Tensor.log10
return torch.Tensor.log10(beta() + 5.0)
@bm.functional
def log10_6():
# Stochastic value, instance log10
return (beta() + 6.0).log10()
@bm.functional
def log1p_1():
# Tensor constant, torch.log1p.
return torch.log1p(torch.tensor(1.0))
@bm.functional
def log1p_2():
# Stochastic tensor, torch.log1p.
return torch.log1p(beta() + 2.0)
@bm.functional
def log1p_3():
# Tensor constant, torch.special.log1p.
return torch.special.log1p(torch.tensor(3.0))
@bm.functional
def log1p_4():
# Stochastic tensor, torch.special.log1p.
return torch.special.log1p(beta() + 4.0)
@bm.functional
def log1p_5():
# Tensor constant, Tensor.log1p.
return torch.Tensor.log1p(torch.tensor(5.0))
@bm.functional
def log1p_6():
# Tensor constant, instance log1p
return torch.tensor(6.0).log1p()
@bm.functional
def log1p_7():
# Stochastic value, Tensor.log1p
return torch.Tensor.log1p(beta() + 7.0)
@bm.functional
def log1p_8():
# Stochastic value, instance log1p
return (beta() + 8.0).log1p()
@bm.functional
def log2_1():
# Tensor constant, torch.log2.
return torch.log2(torch.tensor(2.0))
@bm.functional
def log2_2():
# Stochastic tensor, torch.log2.
return torch.log2(beta() + 2.0)
@bm.functional
def log2_3():
# Tensor constant, Tensor.log2.
return torch.Tensor.log2(torch.tensor(8.0))
@bm.functional
def log2_4():
# Tensor constant, instance log2
return torch.tensor(16.0).log2()
@bm.functional
def log2_5():
# Stochastic value, Tensor.log2
return torch.Tensor.log2(beta() + 5.0)
@bm.functional
def log2_6():
# Stochastic value, instance log2
return (beta() + 6.0).log2()
@bm.functional
def sqrt_1():
# Tensor constant, torch.sqrt.
return torch.sqrt(torch.tensor(1.0))
@bm.functional
def sqrt_2():
# Stochastic tensor, torch.sqrt.
return torch.sqrt(beta() + 2.0)
@bm.functional
def sqrt_3():
# Tensor constant, Tensor.sqrt.
return torch.Tensor.sqrt(torch.tensor(9.0))
@bm.functional
def sqrt_4():
# Tensor constant, instance sqrt
return torch.tensor(16.0).sqrt()
@bm.functional
def sqrt_5():
# Stochastic value, Tensor.sqrt
return torch.Tensor.sqrt(beta() + 5.0)
@bm.functional
def sqrt_6():
# Stochastic value, instance sqrt
return (beta() + 6.0).sqrt()
@bm.functional
def exp_1():
# Ordinary constant, math.exp. Note that a functional is
# required to return a tensor. Verify that ordinary
# arithmetic still works in a model.
return torch.tensor(math.exp(1.0))
@bm.functional
def exp_2():
# Tensor constant, math.exp; this is legal.
# A multi-valued tensor would be an error.
return torch.tensor(math.exp(torch.tensor(2.0)))
@bm.functional
def exp_3():
# Tensor constant, Tensor.exp.
# An ordinary constant would be an error.
return torch.Tensor.exp(torch.tensor(3.0))
@bm.functional
def exp_4():
# Tensor constant, instance exp
return torch.tensor([4.0, 4.0]).exp()
@bm.functional
def exp_5():
# Stochastic value, math.exp
return torch.tensor(math.exp(beta() + 5.0))
@bm.functional
def exp_6():
# Stochastic value, Tensor.exp
return torch.Tensor.exp(beta() + 6.0)
@bm.functional
def exp_7():
# Stochastic value, instance exp
return (beta() + 7.0).exp()
@bm.functional
def exp2_1():
# Tensor constant, torch.exp2.
return torch.exp2(torch.tensor(1.0))
@bm.functional
def exp2_2():
# Stochastic tensor, torch.exp2.
return torch.exp2(beta() + 2.0)
@bm.functional
def exp2_3():
# Tensor constant, torch.special.exp2.
return torch.special.exp2(torch.tensor(3.0))
@bm.functional
def exp2_4():
# Stochastic tensor, torch.special.exp2.
return torch.special.exp2(beta() + 4.0)
@bm.functional
def exp2_5():
# Tensor constant, Tensor.exp2.
return torch.Tensor.exp2(torch.tensor(5.0))
@bm.functional
def exp2_6():
# Tensor constant, instance exp2
return torch.tensor(6.0).exp2()
@bm.functional
def exp2_7():
# Stochastic value, Tensor.exp2
return torch.Tensor.exp2(beta() + 7.0)
@bm.functional
def exp2_8():
# Stochastic value, instance exp2
return (beta() + 8.0).exp2()
@bm.functional
def pow_1():
# Ordinary constant, power operator. Note that a functional is
# required to return a tensor. Verify that ordinary
# arithmetic still works in a model.
return torch.tensor(1.0**10.0)
@bm.functional
def pow_2():
# Tensor constant, power operator.
return torch.tensor(2.0) ** 2.0
@bm.functional
def pow_3():
# Tensor constant, Tensor.pow, named argument.
return torch.Tensor.pow(torch.tensor(3.0), exponent=torch.tensor(3.0))
@bm.functional
def pow_4():
# Tensor constant, instance pow, named argument
return torch.tensor(4.0).pow(exponent=torch.tensor(4.0))
@bm.functional
def pow_5():
# Stochastic value, power operator
return beta() ** 5.0
@bm.functional
def pow_6():
# Stochastic value, Tensor.pow
return torch.Tensor.pow(torch.tensor(6.0), exponent=beta())
@bm.functional
def pow_7():
# Stochastic value, instance exp
return torch.tensor(7.0).pow(exponent=beta())
@bm.functional
def pow_8():
# Constant values, operator.pow
return operator.pow(torch.tensor(8.0), torch.tensor(2.0))
@bm.functional
def pow_9():
# Stochastic values, operator.pow
return operator.pow(beta(), torch.tensor(9.0))
@bm.functional
def to_real_1():
# Calling float() causes a TO_REAL node to be emitted into the graph.
# TODO: Is this actually a good idea? We already automatically insert
# TO_REAL when necessary to make the type system happy. float() could
# just be an identity on graph nodes instead of adding TO_REAL.
#
# Once again, a functional is required to return a tensor.
return torch.tensor([float(bern()), 1.0])
@bm.functional
def to_real_2():
# Similarly for the tensor float() instance method.
return bern().float()
@bm.functional
def not_1():
# Ordinary constant, not operator. Note that a functional is
# required to return a tensor. Verify that ordinary
# arithmetic still works in a model.
return torch.tensor(not 1.0)
@bm.functional
def not_2():
# Tensor constant; not operator. This is legal.
# A multi-valued tensor would be an error.
return torch.tensor(not torch.tensor(2.0))
@bm.functional
def not_3():
# Tensor constant, Tensor.logical_not.
# An ordinary constant would be an error.
return torch.Tensor.logical_not(torch.tensor(3.0))
@bm.functional
def not_4():
# Tensor constant, instance logical_not
return torch.tensor(4.0).logical_not()
@bm.functional
def not_5():
# Stochastic value, not operator
return torch.tensor(not (beta() + 5.0))
@bm.functional
def not_6():
# Stochastic value, Tensor.logical_not
return torch.Tensor.logical_not(beta() + 6.0)
@bm.functional
def not_7():
# Stochastic value, instance logical_not
return (beta() + 7.0).logical_not()
@bm.functional
def not_8():
# Constant value, operator.not_
return torch.tensor(operator.not_(torch.tensor(8.0)))
@bm.functional
def not_9():
# Stochastic value, operator.not_
return torch.tensor(operator.not_(beta() + 9.0))
@bm.functional
def neg_1():
# Ordinary constant, - operator. Note that a functional is
# required to return a tensor. Verify that ordinary
# arithmetic still works in a model.
return torch.tensor(-1.0)
@bm.functional
def neg_2():
# Tensor constant; - operator.
return -torch.tensor(2.0)
@bm.functional
def neg_3():
# Tensor constant, Tensor.neg.
return torch.Tensor.neg(torch.tensor(3.0))
@bm.functional
def neg_4():
# Tensor constant, instance neg
return torch.tensor(4.0).neg()
@bm.functional
def neg_5():
# Stochastic value, - operator
return -(beta() + 5.0)
@bm.functional
def neg_6():
# Stochastic value, Tensor.neg.
# TODO: "negative" is a synonym; make it work too.
return torch.Tensor.neg(beta() + 6.0)
@bm.functional
def neg_7():
# Stochastic value, instance neg
# TODO: "negative" is a synonym; make it work too.
return (beta() + 7.0).neg()
@bm.functional
def neg_8():
# Constant value, operator.neg
return operator.neg(torch.tensor(8.0))
@bm.functional
def neg_9():
# Stochastic value, operator.neg
return operator.neg(beta() + 9.0)
@bm.functional
def add_1():
# Ordinary arithmetic, + operator
return torch.tensor(1.0 + 1.0)
@bm.functional
def add_2():
# Tensor arithmetic, + operator
return torch.tensor(2.0) + torch.tensor(2.0)
@bm.functional
def add_3():
# Tensor constants, Tensor.add.
# TODO: Tensor.add takes an optional third argument with the semantics
# add(a, b, c) --> a + b * c. Test that as well.
return torch.Tensor.add(torch.tensor(3.0), torch.tensor(3.0))
@bm.functional
def add_4():
# Tensor constant, instance add
# TODO: Tensor.add takes an optional third argument with the semantics
# a.add(b, c) --> a + b * c. Test that as well.
return torch.tensor(4.0).add(torch.tensor(4.0))
@bm.functional
def add_5():
# Stochastic value, + operator
return beta() + 5.0
@bm.functional
def add_6():
# Stochastic value, Tensor.add.
return torch.Tensor.add(beta(), torch.tensor(6.0))
@bm.functional
def add_7():
# Stochastic value, instance add
return beta().add(torch.tensor(7.0))
@bm.functional
def add_8():
# Constant values, operator.add
return operator.add(torch.tensor(8.0), torch.tensor(8.0))
@bm.functional
def add_9():
# Stochastic values, operator.add
return operator.add(beta(), torch.tensor(9.0))
@bm.functional
def and_1():
# Ordinary arithmetic, & operator
return torch.tensor(1 & 3)
@bm.functional
def and_2():
# Tensor arithmetic, & operator
return torch.tensor(6) & torch.tensor(2)
@bm.functional
def and_3():
# Tensor constants, Tensor.bitwise_and.
return torch.Tensor.bitwise_and(torch.tensor(7), torch.tensor(3))
@bm.functional
def and_4():
# Tensor constant, instance bitwise_and
return torch.tensor(7).bitwise_and(torch.tensor(4))
@bm.functional
def and_5():
# Stochastic value, & operator
return beta() & 2
@bm.functional
def and_6():
# Stochastic value, Tensor.bitwise_and
return torch.Tensor.bitwise_and(beta(), torch.tensor(4))
@bm.functional
def and_7():
# Stochastic value, instance bitwise_and
return beta().bitwise_and(torch.tensor(8))
@bm.functional
def and_8():
# Constant values, operator.and_
return operator.and_(torch.tensor(15), torch.tensor(8))
@bm.functional
def and_9():
# Stochastic values, operator.and_
return operator.and_(beta(), torch.tensor(16))
@bm.functional
def div_1():
# Ordinary arithmetic, / operator
return torch.tensor(1.0 / 1.0)
@bm.functional
def div_2():
# Tensor arithmetic, / operator
return torch.tensor(4.0) / torch.tensor(2.0)
@bm.functional
def div_3():
# Tensor constants, Tensor.div.
# TODO: div also takes an optional rounding flag; test that.
return torch.Tensor.div(torch.tensor(6.0), torch.tensor(2.0))
@bm.functional
def div_4():
# Tensor constant, instance divide (a synonym).
return torch.tensor(8.0).divide(torch.tensor(2.0))
@bm.functional
def div_5():
# Stochastic value, / operator
return beta() / 2.0
@bm.functional
def div_6():
# Stochastic value, Tensor.true_divide (a synonym)
return torch.Tensor.true_divide(beta(), torch.tensor(4.0))
@bm.functional
def div_7():
# Stochastic value, instance div
return beta().div(torch.tensor(8.0))
@bm.functional
def div_8():
# Constant values, operator.truediv
return operator.truediv(torch.tensor(16.0), torch.tensor(2.0))
@bm.functional
def div_9():
# Stochastic values, operator.truediv
return operator.truediv(beta(), torch.tensor(16.0))
@bm.functional
def eq_1():
# Ordinary arithmetic, == operator
return torch.tensor(1.0 == 1.0)
@bm.functional
def eq_2():
# Tensor arithmetic, == operator
return torch.tensor(4.0) == torch.tensor(2.0)
@bm.functional
def eq_3():
# Tensor constants, Tensor.eq.
return torch.Tensor.eq(torch.tensor(6.0), torch.tensor(2.0))
@bm.functional
def eq_4():
# Tensor constant, instance eq
return torch.tensor(8.0).eq(torch.tensor(2.0))
@bm.functional
def eq_5():
# Stochastic value, == operator
return beta() == 4.0
@bm.functional
def eq_6():
# Stochastic value, Tensor.equal (a synonym)
return torch.Tensor.equal(beta(), torch.tensor(8.0))
@bm.functional
def eq_7():
# Stochastic value, instance equal
return beta().equal(torch.tensor(16.0))
@bm.functional
def eq_8():
# Constant values, operator.eq
return operator.eq(torch.tensor(16.0), torch.tensor(2.0))
@bm.functional
def eq_9():
# Stochastic values, operator.eq
return operator.eq(beta(), torch.tensor(32.0))
@bm.functional
def floordiv_1():
# Ordinary arithmetic, // operator
return torch.tensor(1.0 // 1.0)
@bm.functional
def floordiv_2():
# Tensor arithmetic, // operator
return torch.tensor(4.0) // torch.tensor(2.0)
@bm.functional
def floordiv_3():
# Tensor constants, Tensor.floor_divide.
return torch.Tensor.floor_divide(torch.tensor(6.0), torch.tensor(2.0))
@bm.functional
def floordiv_4():
# Tensor constant, instance floor_divide
return torch.tensor(8.0).floor_divide(torch.tensor(2.0))
@bm.functional
def floordiv_5():
# Stochastic value, // operator
return beta() // 4.0
@bm.functional
def floordiv_6():
# Stochastic value, Tensor.floor_divide
return torch.Tensor.floor_divide(beta(), torch.tensor(8.0))
@bm.functional
def floordiv_7():
# Stochastic value, instance floor_divide
return beta().floor_divide(torch.tensor(16.0))
@bm.functional
def floordiv_8():
# Constant values, operator.floordiv
return operator.floordiv(torch.tensor(16.0), torch.tensor(2.0))
@bm.functional
def floordiv_9():
# Stochastic values, operator.floordiv
return operator.floordiv(beta(), torch.tensor(32.0))
@bm.functional
def ge_1():
# Ordinary arithmetic, >= operator
return torch.tensor(1.0 >= 1.0)
@bm.functional
def ge_2():
# Tensor arithmetic, >= operator
return torch.tensor(4.0) >= torch.tensor(2.0)
@bm.functional
def ge_3():
# Tensor constants, Tensor.ge.
return torch.Tensor.ge(torch.tensor(6.0), torch.tensor(2.0))
@bm.functional
def ge_4():
# Tensor constant, instance ge
return torch.tensor(8.0).ge(torch.tensor(2.0))
@bm.functional
def ge_5():
# Stochastic value, >= operator
return beta() >= 4.0
@bm.functional
def ge_6():
# Stochastic value, Tensor.greater_equal (a synonym)
return torch.Tensor.greater_equal(beta(), torch.tensor(8.0))
@bm.functional
def ge_7():
# Stochastic value, instance greater_equal
return beta().greater_equal(torch.tensor(16.0))
@bm.functional
def ge_8():
# Constant values, operator.ge
return operator.ge(torch.tensor(16.0), torch.tensor(2.0))
@bm.functional
def ge_9():
# Stochastic values, operator.ge
return operator.ge(beta(), torch.tensor(32.0))
@bm.functional
def gt_1():
# Ordinary arithmetic, > operator
return torch.tensor(1.0 > 1.0)
@bm.functional
def gt_2():
# Tensor arithmetic, > operator
return torch.tensor(4.0) > torch.tensor(2.0)
@bm.functional
def gt_3():
# Tensor constants, Tensor.gt.
return torch.Tensor.gt(torch.tensor(6.0), torch.tensor(2.0))
@bm.functional
def gt_4():
# Tensor constant, instance gt
return torch.tensor(8.0).gt(torch.tensor(2.0))
@bm.functional
def gt_5():
# Stochastic value, > operator
return beta() > 4.0
@bm.functional
def gt_6():
# Stochastic value, Tensor.greater (a synonym)
return torch.Tensor.greater(beta(), torch.tensor(8.0))
@bm.functional
def gt_7():
# Stochastic value, instance greater
return beta().greater(torch.tensor(16.0))
@bm.functional
def gt_8():
# Constant values, operator.gt
return operator.gt(torch.tensor(16.0), torch.tensor(2.0))
@bm.functional
def gt_9():
# Stochastic values, operator.gt
return operator.gt(beta(), torch.tensor(32.0))
@bm.functional
def in_1():
# Ordinary arithmetic, in operator
return torch.tensor(1.0 in [1.0])
@bm.functional
def in_2():
# Tensor arithmetic, in operator
return torch.tensor(torch.tensor(4.0) in torch.tensor(2.0))
@bm.functional
def in_3():
# Stochastic value, in operator
return torch.tensor(beta() in torch.tensor(4.0))
@bm.functional
def in_4():
# Constant values, operator.contains
return torch.tensor(operator.contains(torch.tensor(16.0), torch.tensor(2.0)))
@bm.functional
def in_5():
# Stochastic values, operator.contains
return torch.tensor(operator.contains(torch.tensor(32.0), beta()))
@bm.functional
def is_1():
# Tensor arithmetic, is operator
return torch.tensor(torch.tensor(4.0) is torch.tensor(2.0))
@bm.functional
def is_2():
# Stochastic value, is operator
return torch.tensor(beta() is torch.tensor(4.0))
@bm.functional
def is_3():
# Constant values, operator.is_
return torch.tensor(operator.is_(torch.tensor(16.0), torch.tensor(2.0)))
@bm.functional
def is_4():
# Stochastic values, operator.is_
return torch.tensor(operator.is_(torch.tensor(32.0), beta()))
@bm.functional
def inv_1():
# Ordinary constant, ~ operator.
return torch.tensor(~1)
@bm.functional
def inv_2():
# Tensor constant; ~ operator.
return ~torch.tensor(2)
@bm.functional
def inv_3():
# Tensor constant, Tensor.bitwise_not.
return torch.Tensor.bitwise_not(torch.tensor(3))
@bm.functional
def inv_4():
# Tensor constant, instance bitwise_not
return torch.tensor(4).bitwise_not()
@bm.functional
def inv_5():
# Stochastic value, ~ operator
return ~(beta() + 5.0)
@bm.functional
def inv_6():
# Stochastic value, Tensor.bitwise_not
return torch.Tensor.bitwise_not(beta() + 6.0)
@bm.functional
def inv_7():
# Stochastic value, instance bitwise_not
return (beta() + 7.0).bitwise_not()
@bm.functional
def inv_8():
# Constant value, operator.inv
return operator.inv(torch.tensor(8))
@bm.functional
def inv_9():
# Stochastic value, operator.inv
return operator.inv(beta())
@bm.functional
def le_1():
# Ordinary arithmetic, <= operator
return torch.tensor(1.0 <= 1.0)
@bm.functional
def le_2():
# Tensor arithmetic, <= operator
return torch.tensor(4.0) <= torch.tensor(2.0)
@bm.functional
def le_3():
# Tensor constants, Tensor.le.
return torch.Tensor.le(torch.tensor(6.0), torch.tensor(2.0))
@bm.functional
def le_4():
# Tensor constant, instance le
return torch.tensor(8.0).le(torch.tensor(2.0))
@bm.functional
def le_5():
# Stochastic value, <= operator
return beta() <= 4.0
@bm.functional
def le_6():
# Stochastic value, Tensor.less_equal (a synonym)
return torch.Tensor.less_equal(beta(), torch.tensor(8.0))
@bm.functional
def le_7():
# Stochastic value, instance less_equal
return beta().less_equal(torch.tensor(16.0))
@bm.functional
def le_8():
# Constant values, operator.le
return operator.le(torch.tensor(16.0), torch.tensor(2.0))
@bm.functional
def le_9():
# Stochastic values, operator.le
return operator.le(beta(), torch.tensor(32.0))
@bm.functional
def lshift_1():
# Ordinary arithmetic, << operator
return torch.tensor(1 << 1)
@bm.functional
def lshift_2():
# Tensor arithmetic, << operator
return torch.tensor(2) << torch.tensor(2)
@bm.functional
def lshift_3():
# Tensor constants, Tensor.bitwise_left_shift.
return torch.Tensor.bitwise_left_shift(torch.tensor(6), torch.tensor(2))
@bm.functional
def lshift_4():
# Tensor constant, instance bitwise_left_shift
return torch.tensor(8).bitwise_left_shift(torch.tensor(2))
@bm.functional
def lshift_5():
# Stochastic value, << operator
return beta() << 4
@bm.functional
def lshift_6():
# Stochastic value, Tensor.bitwise_left_shift
return torch.Tensor.bitwise_left_shift(beta(), torch.tensor(8))
@bm.functional
def lshift_7():
# Stochastic value, instance bitwise_left_shift
return beta().bitwise_left_shift(torch.tensor(16))
@bm.functional
def lshift_8():
# Constant values, operator.lshift
return operator.lshift(torch.tensor(16), torch.tensor(2))
@bm.functional
def lshift_9():
# Stochastic values, operator.lshift
return operator.lshift(beta(), torch.tensor(32))
@bm.functional
def lt_1():
# Ordinary arithmetic, < operator
return torch.tensor(1.0 < 1.0)
@bm.functional
def lt_2():
# Tensor arithmetic, < operator
return torch.tensor(4.0) < torch.tensor(2.0)
@bm.functional
def lt_3():
# Tensor constants, Tensor.lt.
return torch.Tensor.lt(torch.tensor(6.0), torch.tensor(2.0))
@bm.functional
def lt_4():
# Tensor constant, instance lt
return torch.tensor(8.0).lt(torch.tensor(2.0))
@bm.functional
def lt_5():
# Stochastic value, < operator
return beta() < 4.0
@bm.functional
def lt_6():
# Stochastic value, Tensor.less (a synonym)
return torch.Tensor.less(beta(), torch.tensor(8.0))
@bm.functional
def lt_7():
# Stochastic value, instance less
return beta().less(torch.tensor(16.0))
@bm.functional
def lt_8():
# Constant values, operator.lt
return operator.lt(torch.tensor(16.0), torch.tensor(2.0))
@bm.functional
def lt_9():
# Stochastic values, operator.lt
return operator.lt(beta(), torch.tensor(32.0))
@bm.functional
def mod_1():
# Ordinary arithmetic, % operator
return torch.tensor(1 % 1)
@bm.functional
def mod_2():
# Tensor arithmetic, % operator
return torch.tensor(5.0) % torch.tensor(3.0)
@bm.functional
def mod_3():
# Tensor constants, Tensor.fmod.
return torch.Tensor.fmod(torch.tensor(11.0), torch.tensor(4.0))
@bm.functional
def mod_4():
# Tensor constant, instance remainder (a near synonym).
return torch.tensor(9.0).remainder(torch.tensor(5.0))
@bm.functional
def mod_5():
# Stochastic value, % operator
return beta() % 5.0
@bm.functional
def mod_6():
# Stochastic value, Tensor.fmod
return torch.Tensor.fmod(beta(), torch.tensor(6.0))
@bm.functional
def mod_7():
# Stochastic value, instance fmod
return beta().fmod(torch.tensor(7.0))
@bm.functional
def mod_8():
# Constant values, operator.mod
return operator.mod(torch.tensor(17.0), torch.tensor(9.0))
@bm.functional
def mod_9():
# Stochastic values, operator.mod
return operator.mod(beta(), torch.tensor(9.0))
@bm.functional
def mul_1():
# Ordinary arithmetic, * operator
return torch.tensor(1.0 * 1.0)
@bm.functional
def mul_2():
# Tensor arithmetic, * operator
return torch.tensor(2.0) * torch.tensor(2.0)
@bm.functional
def mul_3():
# Tensor constants, Tensor.mul.
return torch.Tensor.mul(torch.tensor(3.0), torch.tensor(3.0))
@bm.functional
def mul_4():
# Tensor constant, instance multiply (a synonym).
return torch.tensor(4.0).multiply(torch.tensor(4.0))
@bm.functional
def mul_5():
# Stochastic value, * operator
return beta() * 5.0
@bm.functional
def mul_6():
# Stochastic value, Tensor.multiply.
return torch.Tensor.multiply(beta(), torch.tensor(6.0))
@bm.functional
def mul_7():
# Stochastic value, instance mul
return beta().mul(torch.tensor(7.0))
@bm.functional
def mul_8():
# Constant values, operator.mul
return operator.mul(torch.tensor(8.0), torch.tensor(8.0))
@bm.functional
def mul_9():
# Stochastic values, operator.mul
return operator.mul(beta(), torch.tensor(9.0))
@bm.functional
def ne_1():
# Ordinary arithmetic, != operator
return torch.tensor(1.0 != 1.0)
@bm.functional
def ne_2():
# Tensor arithmetic, != operator
return torch.tensor(4.0) != torch.tensor(2.0)
@bm.functional
def ne_3():
# Tensor constants, Tensor.ne.
return torch.Tensor.ne(torch.tensor(6.0), torch.tensor(2.0))
@bm.functional
def ne_4():
# Tensor constant, instance ne
return torch.tensor(8.0).ne(torch.tensor(2.0))
@bm.functional
def ne_5():
# Stochastic value, != operator
return beta() != 4.0
@bm.functional
def ne_6():
# Stochastic value, Tensor.not_equal (a synonym)
return torch.Tensor.not_equal(beta(), torch.tensor(8.0))
@bm.functional
def ne_7():
# Stochastic value, instance not_equal
return beta().not_equal(torch.tensor(16.0))
@bm.functional
def ne_8():
# Constant values, operator.ne
return operator.ne(torch.tensor(16.0), torch.tensor(2.0))
@bm.functional
def ne_9():
# Stochastic values, operator.ne
return operator.ne(beta(), torch.tensor(32.0))
@bm.functional
def not_in_1():
# Ordinary arithmetic, not in operator
return torch.tensor(1.0 not in [1.0])
@bm.functional
def not_in_2():
# Tensor arithmetic, not in operator
return torch.tensor(torch.tensor(4.0) not in torch.tensor(2.0))
@bm.functional
def not_in_3():
# Stochastic value, not in operator
return torch.tensor(beta() not in torch.tensor(4.0))
@bm.functional
def is_not_1():
# Tensor arithmetic, is not operator
return torch.tensor(torch.tensor(4.0) is not torch.tensor(2.0))
@bm.functional
def is_not_2():
# Stochastic value, is not operator
return torch.tensor(beta() is not torch.tensor(4.0))
@bm.functional
def is_not_3():
# Constant values, operator.is_not
return torch.tensor(operator.is_not(torch.tensor(16.0), torch.tensor(2.0)))
@bm.functional
def is_not_4():
# Stochastic values, operator.is_not
return torch.tensor(operator.is_not(torch.tensor(32.0), beta()))
@bm.functional
def or_1():
# Ordinary arithmetic, | operator
return torch.tensor(1 | 3)
@bm.functional
def or_2():
# Tensor arithmetic, | operator
return torch.tensor(6) | torch.tensor(2)
@bm.functional
def or_3():
# Tensor constants, Tensor.bitwise_or.
return torch.Tensor.bitwise_or(torch.tensor(7), torch.tensor(3))
@bm.functional
def or_4():
# Tensor constant, instance bitwise_or
return torch.tensor(7).bitwise_or(torch.tensor(4))
@bm.functional
def or_5():
# Stochastic value, | operator
return beta() | 2
@bm.functional
def or_6():
# Stochastic value, Tensor.bitwise_or
return torch.Tensor.bitwise_or(beta(), torch.tensor(4))
@bm.functional
def or_7():
# Stochastic value, instance bitwise_or
return beta().bitwise_or(torch.tensor(8))
@bm.functional
def or_8():
# Constant values, operator.or_
return operator.or_(torch.tensor(15), torch.tensor(8))
@bm.functional
def or_9():
# Stochastic values, operator.or_
return operator.or_(beta(), torch.tensor(16))
@bm.functional
def pos_1():
# Ordinary constant, + operator.
return torch.tensor(+1.0)
@bm.functional
def pos_2():
# Tensor constant; + operator.
return +torch.tensor(2.0)
@bm.functional
def pos_5():
# Stochastic value, + operator
return +(beta() + 5.0)
@bm.functional
def pos_8():
# Constant value, operator.pos
return operator.pos(torch.tensor(8.0))
@bm.functional
def pos_9():
# Stochastic value, operator.pos
return operator.pos(beta() + 9.0)
@bm.functional
def rshift_1():
# Ordinary arithmetic, >> operator
return torch.tensor(2 >> 1)
@bm.functional
def rshift_2():
# Tensor arithmetic, << operator
return torch.tensor(4) >> torch.tensor(2)
@bm.functional
def rshift_3():
# Tensor constants, Tensor.bitwise_right_shift.
return torch.Tensor.bitwise_right_shift(torch.tensor(6), torch.tensor(2))
@bm.functional
def rshift_4():
# Tensor constant, instance bitwise_right_shift
return torch.tensor(8).bitwise_right_shift(torch.tensor(2))
@bm.functional
def rshift_5():
# Stochastic value, >> operator
return beta() >> 4
@bm.functional
def rshift_6():
# Stochastic value, Tensor.bitwise_right_shift
return torch.Tensor.bitwise_right_shift(beta(), torch.tensor(8))
@bm.functional
def rshift_7():
# Stochastic value, instance bitwise_right_shift
return beta().bitwise_right_shift(torch.tensor(16))
@bm.functional
def rshift_8():
# Constant values, operator.rshift
return operator.rshift(torch.tensor(16), torch.tensor(2))
@bm.functional
def rshift_9():
# Stochastic values, operator.rshift
return operator.rshift(beta(), torch.tensor(32))
@bm.functional
def sub_1():
# Ordinary arithmetic, - operator
return torch.tensor(2.0 - 1.0)
@bm.functional
def sub_2():
# Tensor arithmetic, - operator
return torch.tensor(5.0) - torch.tensor(2.0)
@bm.functional
def sub_3():
# Tensor constants, Tensor.sub.
# TODO: Tensor.sub takes an optional third argument with the semantics
# sub(a, b, c) --> a - b * c. Test that as well.
return torch.Tensor.sub(torch.tensor(6.0), torch.tensor(3.0))
@bm.functional
def sub_4():
# Tensor constant, instance add
# TODO: Tensor.add takes an optional third argument with the semantics
# a.sub(b, c) --> a - b * c. Test that as well.
return torch.tensor(8.0).sub(torch.tensor(4.0))
@bm.functional
def sub_5():
# Stochastic value, - operator
return beta() - 5.0
@bm.functional
def sub_6():
# Stochastic value, Tensor.subtract (a synonym)
return torch.Tensor.subtract(beta(), torch.tensor(6.0))
@bm.functional
def sub_7():
# Stochastic value, instance sub
return beta().sub(torch.tensor(7.0))
@bm.functional
def sub_8():
# Constant values, operator.sub
return operator.sub(torch.tensor(16.0), torch.tensor(8.0))
@bm.functional
def sub_9():
# Stochastic values, operator.sub
return operator.sub(beta(), torch.tensor(9.0))
@bm.functional
def sum_1():
# Constant value, Tensor.sum.
return torch.Tensor.sum(torch.tensor([1.0, 1.0, 1.0]))
@bm.functional
def sum_2():
# Constant value, instance sum
return torch.tensor([2.0, 2.0, 2.0]).sum()
@bm.functional
def sum_3():
# Stochastic value, Tensor.sum
return torch.Tensor.sum(torch.tensor([beta(), norm(), 3.0]))
@bm.functional
def sum_4():
# Stochastic value, instance sum
return torch.tensor([beta(), norm(), 4.0]).sum()
@bm.functional
def xor_1():
# Ordinary arithmetic, ^ operator
return torch.tensor(1 ^ 3)
@bm.functional
def xor_2():
# Tensor arithmetic, ^ operator
return torch.tensor(6) ^ torch.tensor(2)
@bm.functional
def xor_3():
# Tensor constants, Tensor.bitwise_xor.
return torch.Tensor.bitwise_xor(torch.tensor(7), torch.tensor(3))
@bm.functional
def xor_4():
# Tensor constant, instance bitwise_xor
return torch.tensor(7).bitwise_xor(torch.tensor(4))
@bm.functional
def xor_5():
# Stochastic value, ^ operator
return beta() ^ 2
@bm.functional
def xor_6():
# Stochastic value, Tensor.bitwise_xor
return torch.Tensor.bitwise_xor(beta(), torch.tensor(4))
@bm.functional
def xor_7():
# Stochastic value, instance bitwise_xor
return beta().bitwise_xor(torch.tensor(8))
@bm.functional
def xor_8():
# Constant values, operator.xor
return operator.xor(torch.tensor(15), torch.tensor(8))
@bm.functional
def xor_9():
# Stochastic values, operator.xor
return operator.xor(beta(), torch.tensor(16))
@bm.functional
def numpy_operand():
a = np.array([0.5, 0.25])
return a * beta()
class BMGArithmeticTest(unittest.TestCase):
def test_bmg_arithmetic_logical_not(self) -> None:
self.maxDiff = None
# "not" operators are not yet properly supported by the compiler/BMG;
# update this test when we get them working.
# TODO: Add test cases for not operators on Bernoulli samples.
queries = [
not_1(),
not_2(),
not_3(),
not_4(),
not_5(),
not_6(),
not_7(),
not_8(),
not_9(),
]
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, {}, 1)
expected = """
The model uses a 'not' operation unsupported by Bean Machine Graph.
The unsupported node was created in function call not_5().
The model uses a 'not' operation unsupported by Bean Machine Graph.
The unsupported node was created in function call not_6().
The model uses a 'not' operation unsupported by Bean Machine Graph.
The unsupported node was created in function call not_7().
The model uses a 'not' operation unsupported by Bean Machine Graph.
The unsupported node was created in function call not_9().
"""
observed = str(ex.exception)
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_float(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot([to_real_1(), to_real_2()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=2];
N4[label=1];
N5[label=ToReal];
N6[label=1.0];
N7[label=ToMatrix];
N8[label=Query];
N9[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N5;
N3 -> N7;
N4 -> N7;
N5 -> N7;
N5 -> N9;
N6 -> N7;
N7 -> N8;
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_log(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(
[
log_1(),
log_2(),
log_3(),
log_4(),
log_5(),
log_6(),
log_7(),
],
{},
)
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=Query];
N02[label=0.6931471824645996];
N03[label=Query];
N04[label=1.0986123085021973];
N05[label=Query];
N06[label="[1.3862943649291992,1.3862943649291992]"];
N07[label=Query];
N08[label=2.0];
N09[label=Beta];
N10[label=Sample];
N11[label=ToPosReal];
N12[label=5.0];
N13[label="+"];
N14[label=Log];
N15[label=Query];
N16[label=6.0];
N17[label="+"];
N18[label=Log];
N19[label=Query];
N20[label=7.0];
N21[label="+"];
N22[label=Log];
N23[label=Query];
N00 -> N01;
N02 -> N03;
N04 -> N05;
N06 -> N07;
N08 -> N09;
N08 -> N09;
N09 -> N10;
N10 -> N11;
N11 -> N13;
N11 -> N17;
N11 -> N21;
N12 -> N13;
N13 -> N14;
N14 -> N15;
N16 -> N17;
N17 -> N18;
N18 -> N19;
N20 -> N21;
N21 -> N22;
N22 -> N23;
}"""
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_log10(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(
[
log10_1(),
log10_2(),
log10_3(),
log10_4(),
log10_5(),
log10_6(),
],
{},
)
expected = """
digraph "graph" {
N00[label=1.0];
N01[label=Query];
N02[label=2.0];
N03[label=Beta];
N04[label=Sample];
N05[label=ToPosReal];
N06[label="+"];
N07[label=Log];
N08[label=0.43429448190325176];
N09[label="*"];
N10[label=Query];
N11[label=3.0];
N12[label=Query];
N13[label=4.0];
N14[label=Query];
N15[label=5.0];
N16[label="+"];
N17[label=Log];
N18[label="*"];
N19[label=Query];
N20[label=6.0];
N21[label="+"];
N22[label=Log];
N23[label="*"];
N24[label=Query];
N00 -> N01;
N02 -> N03;
N02 -> N03;
N02 -> N06;
N03 -> N04;
N04 -> N05;
N05 -> N06;
N05 -> N16;
N05 -> N21;
N06 -> N07;
N07 -> N09;
N08 -> N09;
N08 -> N18;
N08 -> N23;
N09 -> N10;
N11 -> N12;
N13 -> N14;
N15 -> N16;
N16 -> N17;
N17 -> N18;
N18 -> N19;
N20 -> N21;
N21 -> N22;
N22 -> N23;
N23 -> N24;
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_log1p(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(
[
log1p_1(),
log1p_2(),
log1p_3(),
log1p_4(),
log1p_5(),
log1p_6(),
log1p_7(),
log1p_8(),
],
{},
)
expected = """
digraph "graph" {
N00[label=0.6931471824645996];
N01[label=Query];
N02[label=2.0];
N03[label=Beta];
N04[label=Sample];
N05[label=1.0];
N06[label=ToPosReal];
N07[label="+"];
N08[label="+"];
N09[label=Log];
N10[label=Query];
N11[label=1.3862943649291992];
N12[label=Query];
N13[label=4.0];
N14[label="+"];
N15[label="+"];
N16[label=Log];
N17[label=Query];
N18[label=1.7917594909667969];
N19[label=Query];
N20[label=1.945910096168518];
N21[label=Query];
N22[label=7.0];
N23[label="+"];
N24[label="+"];
N25[label=Log];
N26[label=Query];
N27[label=8.0];
N28[label="+"];
N29[label="+"];
N30[label=Log];
N31[label=Query];
N00 -> N01;
N02 -> N03;
N02 -> N03;
N02 -> N07;
N03 -> N04;
N04 -> N06;
N05 -> N08;
N05 -> N15;
N05 -> N24;
N05 -> N29;
N06 -> N07;
N06 -> N14;
N06 -> N23;
N06 -> N28;
N07 -> N08;
N08 -> N09;
N09 -> N10;
N11 -> N12;
N13 -> N14;
N14 -> N15;
N15 -> N16;
N16 -> N17;
N18 -> N19;
N20 -> N21;
N22 -> N23;
N23 -> N24;
N24 -> N25;
N25 -> N26;
N27 -> N28;
N28 -> N29;
N29 -> N30;
N30 -> N31;
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_log2(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(
[
log2_1(),
log2_2(),
log2_3(),
log2_4(),
log2_5(),
log2_6(),
],
{},
)
expected = """
digraph "graph" {
N00[label=1.0];
N01[label=Query];
N02[label=2.0];
N03[label=Beta];
N04[label=Sample];
N05[label=ToPosReal];
N06[label="+"];
N07[label=Log];
N08[label=1.4426950408889634];
N09[label="*"];
N10[label=Query];
N11[label=3.0];
N12[label=Query];
N13[label=4.0];
N14[label=Query];
N15[label=5.0];
N16[label="+"];
N17[label=Log];
N18[label="*"];
N19[label=Query];
N20[label=6.0];
N21[label="+"];
N22[label=Log];
N23[label="*"];
N24[label=Query];
N00 -> N01;
N02 -> N03;
N02 -> N03;
N02 -> N06;
N03 -> N04;
N04 -> N05;
N05 -> N06;
N05 -> N16;
N05 -> N21;
N06 -> N07;
N07 -> N09;
N08 -> N09;
N08 -> N18;
N08 -> N23;
N09 -> N10;
N11 -> N12;
N13 -> N14;
N15 -> N16;
N16 -> N17;
N17 -> N18;
N18 -> N19;
N20 -> N21;
N21 -> N22;
N22 -> N23;
N23 -> N24;
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_sqrt(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(
[
sqrt_1(),
sqrt_2(),
sqrt_3(),
sqrt_4(),
sqrt_5(),
sqrt_6(),
],
{},
)
expected = """
digraph "graph" {
N00[label=1.0];
N01[label=Query];
N02[label=2.0];
N03[label=Beta];
N04[label=Sample];
N05[label=ToPosReal];
N06[label="+"];
N07[label=0.5];
N08[label="**"];
N09[label=Query];
N10[label=3.0];
N11[label=Query];
N12[label=4.0];
N13[label=Query];
N14[label=5.0];
N15[label="+"];
N16[label="**"];
N17[label=Query];
N18[label=6.0];
N19[label="+"];
N20[label="**"];
N21[label=Query];
N00 -> N01;
N02 -> N03;
N02 -> N03;
N02 -> N06;
N03 -> N04;
N04 -> N05;
N05 -> N06;
N05 -> N15;
N05 -> N19;
N06 -> N08;
N07 -> N08;
N07 -> N16;
N07 -> N20;
N08 -> N09;
N10 -> N11;
N12 -> N13;
N14 -> N15;
N15 -> N16;
N16 -> N17;
N18 -> N19;
N19 -> N20;
N20 -> N21;
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_pow(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(
[
pow_1(),
pow_2(),
pow_3(),
pow_4(),
pow_5(),
pow_6(),
pow_7(),
pow_8(),
pow_9(),
],
{},
)
expected = """
digraph "graph" {
N00[label=1.0];
N01[label=Query];
N02[label=4.0];
N03[label=Query];
N04[label=27.0];
N05[label=Query];
N06[label=256.0];
N07[label=Query];
N08[label=2.0];
N09[label=Beta];
N10[label=Sample];
N11[label=5.0];
N12[label="**"];
N13[label=Query];
N14[label=6.0];
N15[label=ToPosReal];
N16[label="**"];
N17[label=Query];
N18[label=7.0];
N19[label="**"];
N20[label=Query];
N21[label=64.0];
N22[label=Query];
N23[label=9.0];
N24[label="**"];
N25[label=Query];
N00 -> N01;
N02 -> N03;
N04 -> N05;
N06 -> N07;
N08 -> N09;
N08 -> N09;
N09 -> N10;
N10 -> N12;
N10 -> N15;
N10 -> N24;
N11 -> N12;
N12 -> N13;
N14 -> N16;
N15 -> N16;
N15 -> N19;
N16 -> N17;
N18 -> N19;
N19 -> N20;
N21 -> N22;
N23 -> N24;
N24 -> N25;
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_neg(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(
[
neg_1(),
neg_2(),
neg_3(),
neg_4(),
neg_5(),
neg_6(),
neg_7(),
neg_8(),
neg_9(),
],
{},
)
expected = """
digraph "graph" {
N00[label=-1.0];
N01[label=Query];
N02[label=-2.0];
N03[label=Query];
N04[label=-3.0];
N05[label=Query];
N06[label=-4.0];
N07[label=Query];
N08[label=2.0];
N09[label=Beta];
N10[label=Sample];
N11[label=ToPosReal];
N12[label=5.0];
N13[label="+"];
N14[label="-"];
N15[label=Query];
N16[label=6.0];
N17[label="+"];
N18[label="-"];
N19[label=Query];
N20[label=7.0];
N21[label="+"];
N22[label="-"];
N23[label=Query];
N24[label=-8.0];
N25[label=Query];
N26[label=9.0];
N27[label="+"];
N28[label="-"];
N29[label=Query];
N00 -> N01;
N02 -> N03;
N04 -> N05;
N06 -> N07;
N08 -> N09;
N08 -> N09;
N09 -> N10;
N10 -> N11;
N11 -> N13;
N11 -> N17;
N11 -> N21;
N11 -> N27;
N12 -> N13;
N13 -> N14;
N14 -> N15;
N16 -> N17;
N17 -> N18;
N18 -> N19;
N20 -> N21;
N21 -> N22;
N22 -> N23;
N24 -> N25;
N26 -> N27;
N27 -> N28;
N28 -> N29;
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_add(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(
[
add_1(),
add_2(),
add_3(),
add_4(),
add_5(),
add_6(),
add_7(),
add_8(),
add_9(),
],
{},
)
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=Query];
N02[label=4.0];
N03[label=Query];
N04[label=6.0];
N05[label=Query];
N06[label=8.0];
N07[label=Query];
N08[label=2.0];
N09[label=Beta];
N10[label=Sample];
N11[label=ToPosReal];
N12[label=5.0];
N13[label="+"];
N14[label=Query];
N15[label=6.0];
N16[label="+"];
N17[label=Query];
N18[label=7.0];
N19[label="+"];
N20[label=Query];
N21[label=16.0];
N22[label=Query];
N23[label=9.0];
N24[label="+"];
N25[label=Query];
N00 -> N01;
N02 -> N03;
N04 -> N05;
N06 -> N07;
N08 -> N09;
N08 -> N09;
N09 -> N10;
N10 -> N11;
N11 -> N13;
N11 -> N16;
N11 -> N19;
N11 -> N24;
N12 -> N13;
N13 -> N14;
N15 -> N16;
N16 -> N17;
N18 -> N19;
N19 -> N20;
N21 -> N22;
N23 -> N24;
N24 -> N25;
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_and(self) -> None:
self.maxDiff = None
# & operators are not yet properly supported by the compiler/BMG;
# update this test when we get them working.
queries = [
and_1(),
and_2(),
and_3(),
and_4(),
and_5(),
and_6(),
and_7(),
and_8(),
and_9(),
]
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, {}, 1)
expected = """
The model uses a 'bitwise and' (&) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call and_5().
The model uses a 'bitwise and' (&) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call and_6().
The model uses a 'bitwise and' (&) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call and_7().
The model uses a 'bitwise and' (&) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call and_9().
"""
observed = str(ex.exception)
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_div(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(
[
div_1(),
div_2(),
div_3(),
div_4(),
div_5(),
div_6(),
div_7(),
div_8(),
div_9(),
],
{},
)
expected = """
digraph "graph" {
N00[label=1.0];
N01[label=Query];
N02[label=2.0];
N03[label=Query];
N04[label=3.0];
N05[label=Query];
N06[label=4.0];
N07[label=Query];
N08[label=2.0];
N09[label=Beta];
N10[label=Sample];
N11[label=0.5];
N12[label="*"];
N13[label=Query];
N14[label=0.25];
N15[label="*"];
N16[label=Query];
N17[label=0.125];
N18[label="*"];
N19[label=Query];
N20[label=8.0];
N21[label=Query];
N22[label=0.0625];
N23[label="*"];
N24[label=Query];
N00 -> N01;
N02 -> N03;
N04 -> N05;
N06 -> N07;
N08 -> N09;
N08 -> N09;
N09 -> N10;
N10 -> N12;
N10 -> N15;
N10 -> N18;
N10 -> N23;
N11 -> N12;
N12 -> N13;
N14 -> N15;
N15 -> N16;
N17 -> N18;
N18 -> N19;
N20 -> N21;
N22 -> N23;
N23 -> N24;
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_eq(self) -> None:
self.maxDiff = None
# "==" operators are not yet properly supported by the compiler/BMG;
# update this test when we get them working.
queries = [
eq_1(),
eq_2(),
eq_3(),
eq_4(),
eq_5(),
eq_6(),
eq_7(),
eq_8(),
eq_9(),
]
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, {}, 1)
expected = """
The model uses an equality (==) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call eq_5().
The model uses an equality (==) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call eq_6().
The model uses an equality (==) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call eq_7().
The model uses an equality (==) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call eq_9().
"""
observed = str(ex.exception)
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_floordiv(self) -> None:
self.skipTest(
"Disabling floordiv tests; produces a deprecation warning in torch."
)
self.maxDiff = None
# "floordiv" operators are not yet properly supported by the compiler/BMG;
# update this test when we get them working.
queries = [
floordiv_1(),
floordiv_2(),
floordiv_3(),
floordiv_4(),
floordiv_5(),
floordiv_6(),
floordiv_7(),
floordiv_8(),
floordiv_9(),
]
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, {}, 1)
expected = """
The model uses a // operation unsupported by Bean Machine Graph.
The unsupported node was created in function call floordiv_5().
The model uses a // operation unsupported by Bean Machine Graph.
The unsupported node was created in function call floordiv_6().
The model uses a // operation unsupported by Bean Machine Graph.
The unsupported node was created in function call floordiv_7().
The model uses a // operation unsupported by Bean Machine Graph.
The unsupported node was created in function call floordiv_9().
"""
observed = str(ex.exception)
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_ge(self) -> None:
self.maxDiff = None
# ">=" operators are not yet properly supported by the compiler/BMG;
# update this test when we get them working.
queries = [
ge_1(),
ge_2(),
ge_3(),
ge_4(),
ge_5(),
ge_6(),
ge_7(),
ge_8(),
ge_9(),
]
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, {}, 1)
expected = """
The model uses a 'greater than or equal' (>=) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call ge_5().
The model uses a 'greater than or equal' (>=) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call ge_6().
The model uses a 'greater than or equal' (>=) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call ge_7().
The model uses a 'greater than or equal' (>=) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call ge_9().
"""
observed = str(ex.exception)
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_gt(self) -> None:
self.maxDiff = None
# ">=" operators are not yet properly supported by the compiler/BMG;
# update this test when we get them working.
queries = [
gt_1(),
gt_2(),
gt_3(),
gt_4(),
gt_5(),
gt_6(),
gt_7(),
gt_8(),
gt_9(),
]
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, {}, 1)
expected = """
The model uses a 'greater than' (>) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call gt_5().
The model uses a 'greater than' (>) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call gt_6().
The model uses a 'greater than' (>) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call gt_7().
The model uses a 'greater than' (>) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call gt_9().
"""
observed = str(ex.exception)
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_in(self) -> None:
self.maxDiff = None
# in and not in operators are not yet properly supported by the compiler/BMG;
# update this test when we get them working.
queries = [
in_1(),
in_2(),
in_3(),
in_4(),
in_5(),
not_in_1(),
not_in_2(),
not_in_3(),
]
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, {}, 1)
expected = """
The model uses a 'not in' operation unsupported by Bean Machine Graph.
The unsupported node was created in function call not_in_3().
The model uses an 'in' operation unsupported by Bean Machine Graph.
The unsupported node was created in function call in_3().
The model uses an 'in' operation unsupported by Bean Machine Graph.
The unsupported node was created in function call in_5().
"""
observed = str(ex.exception)
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_is(self) -> None:
self.maxDiff = None
# is and is not operators are not yet properly supported by the compiler/BMG;
# update this test when we get them working.
queries = [
is_1(),
is_2(),
is_3(),
is_4(),
is_not_1(),
is_not_2(),
is_not_3(),
is_not_4(),
]
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, {}, 1)
expected = """
The model uses an 'is not' operation unsupported by Bean Machine Graph.
The unsupported node was created in function call is_not_2().
The model uses an 'is not' operation unsupported by Bean Machine Graph.
The unsupported node was created in function call is_not_4().
The model uses an 'is' operation unsupported by Bean Machine Graph.
The unsupported node was created in function call is_2().
The model uses an 'is' operation unsupported by Bean Machine Graph.
The unsupported node was created in function call is_4()."""
observed = str(ex.exception)
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_inv(self) -> None:
self.maxDiff = None
# ~ operators are not yet properly supported by the compiler/BMG;
# update this test when we get them working.
queries = [
inv_1(),
inv_2(),
inv_3(),
inv_4(),
inv_5(),
inv_6(),
inv_7(),
inv_8(),
inv_9(),
]
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, {}, 1)
expected = """
The model uses a 'bitwise invert' (~) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call inv_5().
The model uses a 'bitwise invert' (~) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call inv_6().
The model uses a 'bitwise invert' (~) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call inv_7().
The model uses a 'bitwise invert' (~) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call inv_9().
"""
observed = str(ex.exception)
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_le(self) -> None:
self.maxDiff = None
# "<=" operators are not yet properly supported by the compiler/BMG;
# update this test when we get them working.
queries = [
le_1(),
le_2(),
le_3(),
le_4(),
le_5(),
le_6(),
le_7(),
le_8(),
le_9(),
]
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, {}, 1)
expected = """
The model uses a 'less than or equal' (<=) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call le_5().
The model uses a 'less than or equal' (<=) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call le_6().
The model uses a 'less than or equal' (<=) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call le_7().
The model uses a 'less than or equal' (<=) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call le_9().
"""
observed = str(ex.exception)
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_lshift(self) -> None:
self.maxDiff = None
# << operators are not yet properly supported by the compiler/BMG;
# update this test when we get them working.
queries = [
lshift_1(),
lshift_2(),
lshift_3(),
lshift_4(),
lshift_5(),
lshift_6(),
lshift_7(),
lshift_8(),
lshift_9(),
]
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, {}, 1)
expected = """
The model uses a 'left shift' (<<) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call lshift_5().
The model uses a 'left shift' (<<) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call lshift_6().
The model uses a 'left shift' (<<) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call lshift_7().
The model uses a 'left shift' (<<) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call lshift_9().
"""
observed = str(ex.exception)
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_lt(self) -> None:
self.maxDiff = None
# "<" operators are not yet properly supported by the compiler/BMG;
# update this test when we get them working.
queries = [
lt_1(),
lt_2(),
lt_3(),
lt_4(),
lt_5(),
lt_6(),
lt_7(),
lt_8(),
lt_9(),
]
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, {}, 1)
expected = """
The model uses a 'less than' (<) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call lt_5().
The model uses a 'less than' (<) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call lt_6().
The model uses a 'less than' (<) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call lt_7().
The model uses a 'less than' (<) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call lt_9().
"""
observed = str(ex.exception)
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_mod(self) -> None:
self.maxDiff = None
# % operators are not yet properly supported by the compiler/BMG;
# update this test when we get them working.
queries = [
mod_1(),
mod_2(),
mod_3(),
mod_4(),
mod_5(),
mod_6(),
mod_7(),
mod_8(),
mod_9(),
]
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, {}, 1)
expected = """
The model uses a modulus (%) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call mod_5().
The model uses a modulus (%) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call mod_6().
The model uses a modulus (%) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call mod_7().
The model uses a modulus (%) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call mod_9().
"""
observed = str(ex.exception)
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_mul(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(
[
mul_1(),
mul_2(),
mul_3(),
mul_4(),
mul_5(),
mul_6(),
mul_7(),
mul_8(),
mul_9(),
],
{},
)
expected = """
digraph "graph" {
N00[label=1.0];
N01[label=Query];
N02[label=4.0];
N03[label=Query];
N04[label=9.0];
N05[label=Query];
N06[label=16.0];
N07[label=Query];
N08[label=2.0];
N09[label=Beta];
N10[label=Sample];
N11[label=ToPosReal];
N12[label=5.0];
N13[label="*"];
N14[label=Query];
N15[label=6.0];
N16[label="*"];
N17[label=Query];
N18[label=7.0];
N19[label="*"];
N20[label=Query];
N21[label=64.0];
N22[label=Query];
N23[label=9.0];
N24[label="*"];
N25[label=Query];
N00 -> N01;
N02 -> N03;
N04 -> N05;
N06 -> N07;
N08 -> N09;
N08 -> N09;
N09 -> N10;
N10 -> N11;
N11 -> N13;
N11 -> N16;
N11 -> N19;
N11 -> N24;
N12 -> N13;
N13 -> N14;
N15 -> N16;
N16 -> N17;
N18 -> N19;
N19 -> N20;
N21 -> N22;
N23 -> N24;
N24 -> N25;
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_ne(self) -> None:
self.maxDiff = None
# "!=" operators are not yet properly supported by the compiler/BMG;
# update this test when we get them working.
queries = [
ne_1(),
ne_2(),
ne_3(),
ne_4(),
ne_5(),
ne_6(),
ne_7(),
ne_8(),
ne_9(),
]
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, {}, 1)
expected = """
The model uses an inequality (!=) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call ne_5().
The model uses an inequality (!=) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call ne_6().
The model uses an inequality (!=) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call ne_7().
The model uses an inequality (!=) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call ne_9().
"""
observed = str(ex.exception)
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_or(self) -> None:
self.maxDiff = None
# & operators are not yet properly supported by the compiler/BMG;
# update this test when we get them working.
queries = [
or_1(),
or_2(),
or_3(),
or_4(),
or_5(),
or_6(),
or_7(),
or_8(),
or_9(),
]
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, {}, 1)
expected = """
The model uses a 'bitwise or' (|) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call or_5().
The model uses a 'bitwise or' (|) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call or_6().
The model uses a 'bitwise or' (|) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call or_7().
The model uses a 'bitwise or' (|) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call or_9().
"""
observed = str(ex.exception)
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_rshift(self) -> None:
self.maxDiff = None
# >> operators are not yet properly supported by the compiler/BMG;
# update this test when we get them working.
queries = [
rshift_1(),
rshift_2(),
rshift_3(),
rshift_4(),
rshift_5(),
rshift_6(),
rshift_7(),
rshift_8(),
rshift_9(),
]
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, {}, 1)
expected = """
The model uses a 'right shift' (>>) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call rshift_5().
The model uses a 'right shift' (>>) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call rshift_6().
The model uses a 'right shift' (>>) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call rshift_7().
The model uses a 'right shift' (>>) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call rshift_9().
"""
observed = str(ex.exception)
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_pos(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(
[
pos_1(),
pos_2(),
pos_5(),
pos_8(),
pos_9(),
],
{},
)
expected = """
digraph "graph" {
N00[label=1.0];
N01[label=Query];
N02[label=2.0];
N03[label=Query];
N04[label=2.0];
N05[label=Beta];
N06[label=Sample];
N07[label=ToPosReal];
N08[label=5.0];
N09[label="+"];
N10[label=Query];
N11[label=8.0];
N12[label=Query];
N13[label=9.0];
N14[label="+"];
N15[label=Query];
N00 -> N01;
N02 -> N03;
N04 -> N05;
N04 -> N05;
N05 -> N06;
N06 -> N07;
N07 -> N09;
N07 -> N14;
N08 -> N09;
N09 -> N10;
N11 -> N12;
N13 -> N14;
N14 -> N15;
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_sub(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(
[
sub_1(),
sub_2(),
sub_3(),
sub_4(),
sub_5(),
sub_6(),
sub_7(),
sub_8(),
sub_9(),
],
{},
)
expected = """
digraph "graph" {
N00[label=1.0];
N01[label=Query];
N02[label=3.0];
N03[label=Query];
N04[label=Query];
N05[label=4.0];
N06[label=Query];
N07[label=2.0];
N08[label=Beta];
N09[label=Sample];
N10[label=ToReal];
N11[label=-5.0];
N12[label="+"];
N13[label=Query];
N14[label=-6.0];
N15[label="+"];
N16[label=Query];
N17[label=-7.0];
N18[label="+"];
N19[label=Query];
N20[label=8.0];
N21[label=Query];
N22[label=-9.0];
N23[label="+"];
N24[label=Query];
N00 -> N01;
N02 -> N03;
N02 -> N04;
N05 -> N06;
N07 -> N08;
N07 -> N08;
N08 -> N09;
N09 -> N10;
N10 -> N12;
N10 -> N15;
N10 -> N18;
N10 -> N23;
N11 -> N12;
N12 -> N13;
N14 -> N15;
N15 -> N16;
N17 -> N18;
N18 -> N19;
N20 -> N21;
N22 -> N23;
N23 -> N24;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_bmg_arithmetic_sum(self) -> None:
self.maxDiff = None
queries = [
sum_1(),
sum_2(),
sum_3(),
sum_4(),
]
expected = """
digraph "graph" {
N00[label=3.0];
N01[label=Query];
N02[label=6.0];
N03[label=Query];
N04[label=2.0];
N05[label=Beta];
N06[label=Sample];
N07[label=0.0];
N08[label=1.0];
N09[label=Normal];
N10[label=Sample];
N11[label=3];
N12[label=1];
N13[label=ToReal];
N14[label=3.0];
N15[label=ToMatrix];
N16[label=MatrixSum];
N17[label=Query];
N18[label=4.0];
N19[label=ToMatrix];
N20[label=MatrixSum];
N21[label=Query];
N00 -> N01;
N02 -> N03;
N04 -> N05;
N04 -> N05;
N05 -> N06;
N06 -> N13;
N07 -> N09;
N08 -> N09;
N09 -> N10;
N10 -> N15;
N10 -> N19;
N11 -> N15;
N11 -> N19;
N12 -> N15;
N12 -> N19;
N13 -> N15;
N13 -> N19;
N14 -> N15;
N15 -> N16;
N16 -> N17;
N18 -> N19;
N19 -> N20;
N20 -> N21;
}
"""
observed = BMGInference().to_dot(queries, {})
self.assertEqual(expected.strip(), observed.strip())
def test_bmg_arithmetic_xor(self) -> None:
self.maxDiff = None
# ^ operators are not yet properly supported by the compiler/BMG;
# update this test when we get them working.
queries = [
xor_1(),
xor_2(),
xor_3(),
xor_4(),
xor_5(),
xor_6(),
xor_7(),
xor_8(),
xor_9(),
]
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, {}, 1)
expected = """
The model uses a 'bitwise xor' (^) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call xor_5().
The model uses a 'bitwise xor' (^) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call xor_6().
The model uses a 'bitwise xor' (^) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call xor_7().
The model uses a 'bitwise xor' (^) operation unsupported by Bean Machine Graph.
The unsupported node was created in function call xor_9().
"""
observed = str(ex.exception)
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_exp(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(
[
exp_1(),
exp_2(),
exp_3(),
exp_4(),
exp_5(),
exp_6(),
exp_7(),
],
{},
)
expected = """
digraph "graph" {
N00[label=2.7182817459106445];
N01[label=Query];
N02[label=7.389056205749512];
N03[label=Query];
N04[label=20.08553695678711];
N05[label=Query];
N06[label="[54.598148345947266,54.598148345947266]"];
N07[label=Query];
N08[label=2.0];
N09[label=Beta];
N10[label=Sample];
N11[label=ToPosReal];
N12[label=5.0];
N13[label="+"];
N14[label=Exp];
N15[label=Query];
N16[label=6.0];
N17[label="+"];
N18[label=Exp];
N19[label=Query];
N20[label=7.0];
N21[label="+"];
N22[label=Exp];
N23[label=Query];
N00 -> N01;
N02 -> N03;
N04 -> N05;
N06 -> N07;
N08 -> N09;
N08 -> N09;
N09 -> N10;
N10 -> N11;
N11 -> N13;
N11 -> N17;
N11 -> N21;
N12 -> N13;
N13 -> N14;
N14 -> N15;
N16 -> N17;
N17 -> N18;
N18 -> N19;
N20 -> N21;
N21 -> N22;
N22 -> N23;
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_exp2(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(
[
exp2_1(),
exp2_2(),
exp2_3(),
exp2_4(),
exp2_5(),
exp2_6(),
exp2_7(),
exp2_8(),
],
{},
)
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=Query];
N02[label=2.0];
N03[label=Beta];
N04[label=Sample];
N05[label=ToPosReal];
N06[label="+"];
N07[label="**"];
N08[label=Query];
N09[label=8.0];
N10[label=Query];
N11[label=4.0];
N12[label="+"];
N13[label="**"];
N14[label=Query];
N15[label=32.0];
N16[label=Query];
N17[label=64.0];
N18[label=Query];
N19[label=7.0];
N20[label="+"];
N21[label="**"];
N22[label=Query];
N23[label=8.0];
N24[label="+"];
N25[label="**"];
N26[label=Query];
N00 -> N01;
N02 -> N03;
N02 -> N03;
N02 -> N06;
N02 -> N07;
N02 -> N13;
N02 -> N21;
N02 -> N25;
N03 -> N04;
N04 -> N05;
N05 -> N06;
N05 -> N12;
N05 -> N20;
N05 -> N24;
N06 -> N07;
N07 -> N08;
N09 -> N10;
N11 -> N12;
N12 -> N13;
N13 -> N14;
N15 -> N16;
N17 -> N18;
N19 -> N20;
N20 -> N21;
N21 -> N22;
N23 -> N24;
N24 -> N25;
N25 -> N26;
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_expm1(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot([expm1_prob()], {})
expected = """
digraph "graph" {
N0[label=2.0];
N1[label=Beta];
N2[label=Sample];
N3[label=ToPosReal];
N4[label=ExpM1];
N5[label=Query];
N0 -> N1;
N0 -> N1;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N4 -> N5;
}"""
self.assertEqual(observed.strip(), expected.strip())
observed = BMGInference().to_dot([expm1_real()], {})
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=ExpM1];
N5[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N4 -> N5;
}"""
self.assertEqual(observed.strip(), expected.strip())
observed = BMGInference().to_dot([expm1_negreal()], {})
expected = """
digraph "graph" {
N0[label=1.0];
N1[label=HalfCauchy];
N2[label=Sample];
N3[label="-"];
N4[label=ExpM1];
N5[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N4 -> N5;
}"""
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_arithmetic_logistic(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot([logistic_prob()], {})
expected = """
digraph "graph" {
N0[label=2.0];
N1[label=Beta];
N2[label=Sample];
N3[label=ToReal];
N4[label=Logistic];
N5[label=Query];
N0 -> N1;
N0 -> N1;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N4 -> N5;
}"""
self.assertEqual(observed.strip(), expected.strip())
observed = BMGInference().to_dot([logistic_real()], {})
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=Logistic];
N5[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N4 -> N5;
}"""
self.assertEqual(observed.strip(), expected.strip())
observed = BMGInference().to_dot([logistic_negreal()], {})
expected = """
digraph "graph" {
N0[label=1.0];
N1[label=HalfCauchy];
N2[label=Sample];
N3[label="-"];
N4[label=ToReal];
N5[label=Logistic];
N6[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N4 -> N5;
N5 -> N6;
}"""
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_misc_arithmetic(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot([stochastic_arithmetic()], {})
expected = """
digraph "graph" {
N00[label=0.5];
N01[label=Bernoulli];
N02[label=Sample];
N03[label=0.6000000238418579];
N04[label=Bernoulli];
N05[label=Sample];
N06[label=-0.010050326585769653];
N07[label=-4.605170249938965];
N08[label=0.0];
N09[label=if];
N10[label=if];
N11[label="+"];
N12[label=Exp];
N13[label=complement];
N14[label=Bernoulli];
N15[label=Sample];
N16[label=Query];
N00 -> N01;
N01 -> N02;
N02 -> N09;
N03 -> N04;
N04 -> N05;
N05 -> N10;
N06 -> N11;
N07 -> N09;
N07 -> N10;
N08 -> N09;
N08 -> N10;
N09 -> N11;
N10 -> N11;
N11 -> N12;
N12 -> N13;
N13 -> N14;
N14 -> N15;
N15 -> N16;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_bmg_neg_of_neg(self) -> None:
# This test shows that we treat torch.neg the same as the unary negation
# operator when generating a graph. Note that since this this produces
# a neg-of-neg situation, the optimizer then removes both of them.
self.maxDiff = None
observed = BMGInference().to_dot([neg_of_neg()], {})
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=Normal];
N5[label=Sample];
N6[label=Query];
N0 -> N2;
N1 -> N2;
N1 -> N4;
N2 -> N3;
N3 -> N4;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_bmg_subtractions(self) -> None:
# TODO: Notice in this code generation we end up with
# the path:
#
# Beta -> Sample -> ToPosReal -> Negate -> ToReal -> MultiAdd
#
# We could optimize this path to
#
# Beta -> Sample -> ToReal -> Negate -> MultiAdd
self.maxDiff = None
observed = BMGInference().to_dot([subtractions()], {})
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=2.0];
N05[label=Beta];
N06[label=Sample];
N07[label=HalfCauchy];
N08[label=Sample];
N09[label=ToPosReal];
N10[label="-"];
N11[label=ToReal];
N12[label=ToReal];
N13[label="-"];
N14[label=ToReal];
N15[label="+"];
N16[label="-"];
N17[label="+"];
N18[label=Query];
N00 -> N02;
N01 -> N02;
N01 -> N07;
N02 -> N03;
N03 -> N17;
N04 -> N05;
N04 -> N05;
N05 -> N06;
N06 -> N09;
N06 -> N12;
N07 -> N08;
N08 -> N13;
N09 -> N10;
N10 -> N11;
N11 -> N17;
N12 -> N15;
N13 -> N14;
N14 -> N15;
N15 -> N16;
N16 -> N17;
N17 -> N18;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_unsupported_operands(self) -> None:
self.maxDiff = None
with self.assertRaises(ValueError) as ex:
BMGInference().infer([unsupported_add()], {}, 1)
expected = (
"A constant value used as an operand of a stochastic "
+ "operation is required to be bool, int, float or tensor. "
+ "This model uses a value of type str."
)
observed = str(ex.exception)
self.assertEqual(expected.strip(), observed.strip())
def test_tensor_mutations_augmented_assignment(self) -> None:
self.maxDiff = None
# See notes in mutating_assignments() for details
observed = BMGInference().to_dot([mutating_assignments()], {})
expected = """
digraph "graph" {
N0[label=2.0];
N1[label=Beta];
N2[label=Sample];
N3[label=ToPosReal];
N4[label=3.0];
N5[label="*"];
N6[label=7.0];
N7[label="+"];
N8[label=Query];
N0 -> N1;
N0 -> N1;
N1 -> N2;
N2 -> N3;
N3 -> N5;
N4 -> N5;
N5 -> N7;
N6 -> N7;
N7 -> N8;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_numpy_operand(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot([numpy_operand()], {})
expected = """
digraph "graph" {
N0[label=2.0];
N1[label=Beta];
N2[label=Sample];
N3[label="[0.5,0.25]"];
N4[label=MatrixScale];
N5[label=Query];
N0 -> N1;
N0 -> N1;
N1 -> N2;
N2 -> N4;
N3 -> N4;
N4 -> N5;
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/bmg_arithmetic_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.gen_bmg_graph import to_bmg_graph
from beanmachine.ppl.compiler.gen_dot import to_dot
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from torch import Size
def _rv_id() -> RVIdentifier:
return RVIdentifier(lambda a, b: a, (1, 1))
class FixMatrixOpTest(unittest.TestCase):
def test_fix_matrix_addition(self) -> None:
self.maxDiff = None
bmg = BMGraphBuilder()
zeros = bmg.add_real_matrix(torch.zeros(2))
ones = bmg.add_pos_real_matrix(torch.ones(2))
tensor_elements = []
for index in range(0, 2):
index_node = bmg.add_natural(index)
index_mu = bmg.add_vector_index(zeros, index_node)
index_sigma = bmg.add_vector_index(ones, index_node)
normal = bmg.add_normal(index_mu, index_sigma)
sample = bmg.add_sample(normal)
tensor_elements.append(sample)
matrix = bmg.add_tensor(Size([2]), *tensor_elements)
exp = bmg.add_matrix_exp(matrix)
mult = bmg.add_elementwise_multiplication(matrix, matrix)
add = bmg.add_matrix_addition(exp, mult)
bmg.add_query(add, _rv_id())
observed = to_dot(bmg, after_transform=False)
expectation = """
digraph "graph" {
N00[label="[0.0,0.0]"];
N01[label=0];
N02[label=index];
N03[label="[1.0,1.0]"];
N04[label=index];
N05[label=Normal];
N06[label=Sample];
N07[label=1];
N08[label=index];
N09[label=index];
N10[label=Normal];
N11[label=Sample];
N12[label=Tensor];
N13[label=MatrixExp];
N14[label=ElementwiseMult];
N15[label=MatrixAdd];
N16[label=Query];
N00 -> N02[label=left];
N00 -> N08[label=left];
N01 -> N02[label=right];
N01 -> N04[label=right];
N02 -> N05[label=mu];
N03 -> N04[label=left];
N03 -> N09[label=left];
N04 -> N05[label=sigma];
N05 -> N06[label=operand];
N06 -> N12[label=left];
N07 -> N08[label=right];
N07 -> N09[label=right];
N08 -> N10[label=mu];
N09 -> N10[label=sigma];
N10 -> N11[label=operand];
N11 -> N12[label=right];
N12 -> N13[label=operand];
N12 -> N14[label=left];
N12 -> N14[label=right];
N13 -> N15[label=left];
N14 -> N15[label=right];
N15 -> N16[label=operator];
}
"""
self.assertEqual(expectation.strip(), observed.strip())
observed = to_dot(bmg, after_transform=True)
expectation = """
digraph "graph" {
N00[label="[0.0,0.0]"];
N01[label=0];
N02[label=index];
N03[label="[1.0,1.0]"];
N04[label=index];
N05[label=Normal];
N06[label=Sample];
N07[label=1];
N08[label=index];
N09[label=index];
N10[label=Normal];
N11[label=Sample];
N12[label=2];
N13[label=ToMatrix];
N14[label=MatrixExp];
N15[label=ToRealMatrix];
N16[label=ElementwiseMult];
N17[label=MatrixAdd];
N18[label=Query];
N00 -> N02[label=left];
N00 -> N08[label=left];
N01 -> N02[label=right];
N01 -> N04[label=right];
N02 -> N05[label=mu];
N03 -> N04[label=left];
N03 -> N09[label=left];
N04 -> N05[label=sigma];
N05 -> N06[label=operand];
N06 -> N13[label=0];
N07 -> N08[label=right];
N07 -> N09[label=right];
N07 -> N13[label=columns];
N08 -> N10[label=mu];
N09 -> N10[label=sigma];
N10 -> N11[label=operand];
N11 -> N13[label=1];
N12 -> N13[label=rows];
N13 -> N14[label=operand];
N13 -> N16[label=left];
N13 -> N16[label=right];
N14 -> N15[label=operand];
N15 -> N17[label=left];
N16 -> N17[label=right];
N17 -> N18[label=operator];
}
"""
self.assertEqual(expectation.strip(), observed.strip())
generated_graph = to_bmg_graph(bmg)
observed = generated_graph.graph.to_dot()
expectation = """
digraph "graph" {
N0[label="matrix"];
N1[label="0"];
N2[label="Index"];
N3[label="matrix"];
N4[label="Index"];
N5[label="Normal"];
N6[label="~"];
N7[label="1"];
N8[label="Index"];
N9[label="Index"];
N10[label="Normal"];
N11[label="~"];
N12[label="2"];
N13[label="ToMatrix"];
N14[label="MatrixExp"];
N15[label="ToReal"];
N16[label="ElementwiseMultiply"];
N17[label="MatrixAdd"];
N0 -> N2;
N0 -> N8;
N1 -> N2;
N1 -> N4;
N2 -> N5;
N3 -> N4;
N3 -> N9;
N4 -> N5;
N5 -> N6;
N6 -> N13;
N7 -> N8;
N7 -> N9;
N7 -> N13;
N8 -> N10;
N9 -> N10;
N10 -> N11;
N11 -> N13;
N12 -> N13;
N13 -> N14;
N13 -> N16;
N13 -> N16;
N14 -> N15;
N15 -> N17;
N16 -> N17;
Q0[label="Query"];
N17 -> Q0;
}
"""
self.assertEqual(expectation.strip(), observed.strip())
def test_fix_elementwise_multiply(self) -> None:
self.maxDiff = None
bmg = BMGraphBuilder()
zeros = bmg.add_real_matrix(torch.zeros(2))
ones = bmg.add_pos_real_matrix(torch.ones(2))
tensor_elements = []
for index in range(0, 2):
index_node = bmg.add_natural(index)
index_mu = bmg.add_vector_index(zeros, index_node)
index_sigma = bmg.add_vector_index(ones, index_node)
normal = bmg.add_normal(index_mu, index_sigma)
sample = bmg.add_sample(normal)
tensor_elements.append(sample)
matrix = bmg.add_tensor(Size([2]), *tensor_elements)
exp = bmg.add_matrix_exp(matrix)
add = bmg.add_matrix_addition(matrix, matrix)
mult = bmg.add_elementwise_multiplication(exp, add)
sum = bmg.add_matrix_sum(mult)
bmg.add_query(sum, _rv_id())
observed = to_dot(bmg, after_transform=False)
expectation = """
digraph "graph" {
N00[label="[0.0,0.0]"];
N01[label=0];
N02[label=index];
N03[label="[1.0,1.0]"];
N04[label=index];
N05[label=Normal];
N06[label=Sample];
N07[label=1];
N08[label=index];
N09[label=index];
N10[label=Normal];
N11[label=Sample];
N12[label=Tensor];
N13[label=MatrixExp];
N14[label=MatrixAdd];
N15[label=ElementwiseMult];
N16[label=MatrixSum];
N17[label=Query];
N00 -> N02[label=left];
N00 -> N08[label=left];
N01 -> N02[label=right];
N01 -> N04[label=right];
N02 -> N05[label=mu];
N03 -> N04[label=left];
N03 -> N09[label=left];
N04 -> N05[label=sigma];
N05 -> N06[label=operand];
N06 -> N12[label=left];
N07 -> N08[label=right];
N07 -> N09[label=right];
N08 -> N10[label=mu];
N09 -> N10[label=sigma];
N10 -> N11[label=operand];
N11 -> N12[label=right];
N12 -> N13[label=operand];
N12 -> N14[label=left];
N12 -> N14[label=right];
N13 -> N15[label=left];
N14 -> N15[label=right];
N15 -> N16[label=operand];
N16 -> N17[label=operator];
}
"""
self.assertEqual(expectation.strip(), observed.strip())
observed = to_dot(bmg, after_transform=True)
expectation = """
digraph "graph" {
N00[label="[0.0,0.0]"];
N01[label=0];
N02[label=index];
N03[label="[1.0,1.0]"];
N04[label=index];
N05[label=Normal];
N06[label=Sample];
N07[label=1];
N08[label=index];
N09[label=index];
N10[label=Normal];
N11[label=Sample];
N12[label=2];
N13[label=ToMatrix];
N14[label=MatrixExp];
N15[label=ToRealMatrix];
N16[label=MatrixAdd];
N17[label=ElementwiseMult];
N18[label=MatrixSum];
N19[label=Query];
N00 -> N02[label=left];
N00 -> N08[label=left];
N01 -> N02[label=right];
N01 -> N04[label=right];
N02 -> N05[label=mu];
N03 -> N04[label=left];
N03 -> N09[label=left];
N04 -> N05[label=sigma];
N05 -> N06[label=operand];
N06 -> N13[label=0];
N07 -> N08[label=right];
N07 -> N09[label=right];
N07 -> N13[label=columns];
N08 -> N10[label=mu];
N09 -> N10[label=sigma];
N10 -> N11[label=operand];
N11 -> N13[label=1];
N12 -> N13[label=rows];
N13 -> N14[label=operand];
N13 -> N16[label=left];
N13 -> N16[label=right];
N14 -> N15[label=operand];
N15 -> N17[label=left];
N16 -> N17[label=right];
N17 -> N18[label=operand];
N18 -> N19[label=operator];
}
"""
self.assertEqual(expectation.strip(), observed.strip())
generated_graph = to_bmg_graph(bmg)
observed = generated_graph.graph.to_dot()
expectation = """
digraph "graph" {
N0[label="matrix"];
N1[label="0"];
N2[label="Index"];
N3[label="matrix"];
N4[label="Index"];
N5[label="Normal"];
N6[label="~"];
N7[label="1"];
N8[label="Index"];
N9[label="Index"];
N10[label="Normal"];
N11[label="~"];
N12[label="2"];
N13[label="ToMatrix"];
N14[label="MatrixExp"];
N15[label="ToReal"];
N16[label="MatrixAdd"];
N17[label="ElementwiseMultiply"];
N18[label="MatrixSum"];
N0 -> N2;
N0 -> N8;
N1 -> N2;
N1 -> N4;
N2 -> N5;
N3 -> N4;
N3 -> N9;
N4 -> N5;
N5 -> N6;
N6 -> N13;
N7 -> N8;
N7 -> N9;
N7 -> N13;
N8 -> N10;
N9 -> N10;
N10 -> N11;
N11 -> N13;
N12 -> N13;
N13 -> N14;
N13 -> N16;
N13 -> N16;
N14 -> N15;
N15 -> N17;
N16 -> N17;
N17 -> N18;
Q0[label="Query"];
N18 -> Q0;
}
"""
self.assertEqual(expectation.strip(), observed.strip())
def test_fix_matrix_sum(self) -> None:
self.maxDiff = None
bmg = BMGraphBuilder()
probs = bmg.add_real_matrix(torch.tensor([[0.75, 0.25], [0.125, 0.875]]))
tensor_elements = []
for row in range(0, 2):
row_node = bmg.add_natural(row)
row_prob = bmg.add_column_index(probs, row_node)
for column in range(0, 2):
col_index = bmg.add_natural(column)
prob = bmg.add_vector_index(row_prob, col_index)
bernoulli = bmg.add_bernoulli(prob)
sample = bmg.add_sample(bernoulli)
tensor_elements.append(sample)
matrix = bmg.add_tensor(Size([2, 2]), *tensor_elements)
sum = bmg.add_matrix_sum(matrix)
bmg.add_query(sum, _rv_id())
observed_beanstalk = to_dot(bmg, after_transform=True)
expected = """
digraph "graph" {
N00[label="[[0.75,0.25],\\\\n[0.125,0.875]]"];
N01[label=0];
N02[label=ColumnIndex];
N03[label=index];
N04[label=ToProb];
N05[label=Bernoulli];
N06[label=Sample];
N07[label=1];
N08[label=index];
N09[label=ToProb];
N10[label=Bernoulli];
N11[label=Sample];
N12[label=ColumnIndex];
N13[label=index];
N14[label=ToProb];
N15[label=Bernoulli];
N16[label=Sample];
N17[label=index];
N18[label=ToProb];
N19[label=Bernoulli];
N20[label=Sample];
N21[label=2];
N22[label=ToMatrix];
N23[label=ToRealMatrix];
N24[label=MatrixSum];
N25[label=Query];
N00 -> N02[label=left];
N00 -> N12[label=left];
N01 -> N02[label=right];
N01 -> N03[label=right];
N01 -> N13[label=right];
N02 -> N03[label=left];
N02 -> N08[label=left];
N03 -> N04[label=operand];
N04 -> N05[label=probability];
N05 -> N06[label=operand];
N06 -> N22[label=0];
N07 -> N08[label=right];
N07 -> N12[label=right];
N07 -> N17[label=right];
N08 -> N09[label=operand];
N09 -> N10[label=probability];
N10 -> N11[label=operand];
N11 -> N22[label=1];
N12 -> N13[label=left];
N12 -> N17[label=left];
N13 -> N14[label=operand];
N14 -> N15[label=probability];
N15 -> N16[label=operand];
N16 -> N22[label=2];
N17 -> N18[label=operand];
N18 -> N19[label=probability];
N19 -> N20[label=operand];
N20 -> N22[label=3];
N21 -> N22[label=columns];
N21 -> N22[label=rows];
N22 -> N23[label=operand];
N23 -> N24[label=operand];
N24 -> N25[label=operator];
}
"""
self.assertEqual(observed_beanstalk.strip(), expected.strip())
generated_graph = to_bmg_graph(bmg)
observed_bmg = generated_graph.graph.to_dot()
expectation = """
digraph "graph" {
N0[label="matrix"];
N1[label="0"];
N2[label="ColumnIndex"];
N3[label="Index"];
N4[label="ToProb"];
N5[label="Bernoulli"];
N6[label="~"];
N7[label="1"];
N8[label="Index"];
N9[label="ToProb"];
N10[label="Bernoulli"];
N11[label="~"];
N12[label="ColumnIndex"];
N13[label="Index"];
N14[label="ToProb"];
N15[label="Bernoulli"];
N16[label="~"];
N17[label="Index"];
N18[label="ToProb"];
N19[label="Bernoulli"];
N20[label="~"];
N21[label="2"];
N22[label="ToMatrix"];
N23[label="ToReal"];
N24[label="MatrixSum"];
N0 -> N2;
N0 -> N12;
N1 -> N2;
N1 -> N3;
N1 -> N13;
N2 -> N3;
N2 -> N8;
N3 -> N4;
N4 -> N5;
N5 -> N6;
N6 -> N22;
N7 -> N8;
N7 -> N12;
N7 -> N17;
N8 -> N9;
N9 -> N10;
N10 -> N11;
N11 -> N22;
N12 -> N13;
N12 -> N17;
N13 -> N14;
N14 -> N15;
N15 -> N16;
N16 -> N22;
N17 -> N18;
N18 -> N19;
N19 -> N20;
N20 -> N22;
N21 -> N22;
N21 -> N22;
N22 -> N23;
N23 -> N24;
Q0[label="Query"];
N24 -> Q0;
}
"""
self.assertEqual(expectation.strip(), observed_bmg.strip())
def test_fix_matrix_exp_log_phi(self) -> None:
self.maxDiff = None
bmg = BMGraphBuilder()
probs = bmg.add_real_matrix(torch.tensor([[0.75, 0.25], [0.125, 0.875]]))
tensor_elements = []
for row in range(0, 2):
row_node = bmg.add_natural(row)
row_prob = bmg.add_column_index(probs, row_node)
for column in range(0, 2):
col_index = bmg.add_natural(column)
prob = bmg.add_vector_index(row_prob, col_index)
bernoulli = bmg.add_bernoulli(prob)
sample = bmg.add_sample(bernoulli)
tensor_elements.append(sample)
matrix = bmg.add_tensor(Size([2, 2]), *tensor_elements)
me = bmg.add_matrix_exp(matrix)
ml = bmg.add_matrix_log(matrix)
mp = bmg.add_matrix_phi(matrix)
bmg.add_query(me, _rv_id())
bmg.add_query(ml, _rv_id())
bmg.add_query(mp, _rv_id())
observed_beanstalk = to_dot(bmg, after_transform=True)
expectation = """
digraph "graph" {
N00[label="[[0.75,0.25],\\\\n[0.125,0.875]]"];
N01[label=0];
N02[label=ColumnIndex];
N03[label=index];
N04[label=ToProb];
N05[label=Bernoulli];
N06[label=Sample];
N07[label=1];
N08[label=index];
N09[label=ToProb];
N10[label=Bernoulli];
N11[label=Sample];
N12[label=ColumnIndex];
N13[label=index];
N14[label=ToProb];
N15[label=Bernoulli];
N16[label=Sample];
N17[label=index];
N18[label=ToProb];
N19[label=Bernoulli];
N20[label=Sample];
N21[label=2];
N22[label=ToMatrix];
N23[label=ToRealMatrix];
N24[label=MatrixExp];
N25[label=Query];
N26[label=ToPosRealMatrix];
N27[label=MatrixLog];
N28[label=Query];
N29[label=MatrixPhi];
N30[label=Query];
N00 -> N02[label=left];
N00 -> N12[label=left];
N01 -> N02[label=right];
N01 -> N03[label=right];
N01 -> N13[label=right];
N02 -> N03[label=left];
N02 -> N08[label=left];
N03 -> N04[label=operand];
N04 -> N05[label=probability];
N05 -> N06[label=operand];
N06 -> N22[label=0];
N07 -> N08[label=right];
N07 -> N12[label=right];
N07 -> N17[label=right];
N08 -> N09[label=operand];
N09 -> N10[label=probability];
N10 -> N11[label=operand];
N11 -> N22[label=1];
N12 -> N13[label=left];
N12 -> N17[label=left];
N13 -> N14[label=operand];
N14 -> N15[label=probability];
N15 -> N16[label=operand];
N16 -> N22[label=2];
N17 -> N18[label=operand];
N18 -> N19[label=probability];
N19 -> N20[label=operand];
N20 -> N22[label=3];
N21 -> N22[label=columns];
N21 -> N22[label=rows];
N22 -> N23[label=operand];
N22 -> N26[label=operand];
N23 -> N24[label=operand];
N23 -> N29[label=operand];
N24 -> N25[label=operator];
N26 -> N27[label=operand];
N27 -> N28[label=operator];
N29 -> N30[label=operator];
}
"""
self.assertEqual(expectation.strip(), observed_beanstalk.strip())
generated_graph = to_bmg_graph(bmg)
observed_bmg = generated_graph.graph.to_dot()
expectation = """
digraph "graph" {
N0[label="matrix"];
N1[label="0"];
N2[label="ColumnIndex"];
N3[label="Index"];
N4[label="ToProb"];
N5[label="Bernoulli"];
N6[label="~"];
N7[label="1"];
N8[label="Index"];
N9[label="ToProb"];
N10[label="Bernoulli"];
N11[label="~"];
N12[label="ColumnIndex"];
N13[label="Index"];
N14[label="ToProb"];
N15[label="Bernoulli"];
N16[label="~"];
N17[label="Index"];
N18[label="ToProb"];
N19[label="Bernoulli"];
N20[label="~"];
N21[label="2"];
N22[label="ToMatrix"];
N23[label="ToReal"];
N24[label="MatrixExp"];
N25[label="ToPosReal"];
N26[label="MatrixLog"];
N27[label="MatrixPhi"];
N0 -> N2;
N0 -> N12;
N1 -> N2;
N1 -> N3;
N1 -> N13;
N2 -> N3;
N2 -> N8;
N3 -> N4;
N4 -> N5;
N5 -> N6;
N6 -> N22;
N7 -> N8;
N7 -> N12;
N7 -> N17;
N8 -> N9;
N9 -> N10;
N10 -> N11;
N11 -> N22;
N12 -> N13;
N12 -> N17;
N13 -> N14;
N14 -> N15;
N15 -> N16;
N16 -> N22;
N17 -> N18;
N18 -> N19;
N19 -> N20;
N20 -> N22;
N21 -> N22;
N21 -> N22;
N22 -> N23;
N22 -> N25;
N23 -> N24;
N23 -> N27;
N25 -> N26;
Q0[label="Query"];
N24 -> Q0;
Q1[label="Query"];
N26 -> Q1;
Q2[label="Query"];
N27 -> Q2;
}
"""
self.assertEqual(expectation.strip(), observed_bmg.strip())
def test_fix_matrix_complement(self) -> None:
self.maxDiff = None
bmg = BMGraphBuilder()
probs = bmg.add_real_matrix(torch.tensor([[0.75, 0.25], [0.125, 0.875]]))
tensor_elements = []
# create non constant bool matrix
for row in range(0, 2):
row_node = bmg.add_natural(row)
row_prob = bmg.add_column_index(probs, row_node)
for column in range(0, 2):
col_index = bmg.add_natural(column)
prob = bmg.add_vector_index(row_prob, col_index)
bernoulli = bmg.add_bernoulli(prob)
sample = bmg.add_sample(bernoulli)
tensor_elements.append(sample)
matrix = bmg.add_tensor(Size([2, 2]), *tensor_elements)
# create constant matrices
const_prob_matrix = bmg.add_probability_matrix(
torch.tensor([[0.25, 0.75], [0.5, 0.5]])
)
const_bool_matrix = bmg.add_probability_matrix(
torch.tensor([[True, False], [False, False]])
)
const_prob_simplex = bmg.add_simplex(torch.tensor([0.5, 0.5]))
mc_non_constant_boolean = bmg.add_matrix_complement(matrix)
mc_const_prob = bmg.add_matrix_complement(const_prob_matrix)
mc_const_bool = bmg.add_matrix_complement(const_bool_matrix)
mc_const_simplex = bmg.add_matrix_complement(const_prob_simplex)
bmg.add_query(mc_non_constant_boolean, _rv_id())
bmg.add_query(mc_const_prob, _rv_id())
bmg.add_query(mc_const_bool, _rv_id())
bmg.add_query(mc_const_simplex, _rv_id())
observed_beanstalk = to_dot(bmg, after_transform=True)
expectation = """
digraph "graph" {
N00[label="[[0.75,0.25],\\\\n[0.125,0.875]]"];
N01[label=0];
N02[label=ColumnIndex];
N03[label=index];
N04[label=ToProb];
N05[label=Bernoulli];
N06[label=Sample];
N07[label=1];
N08[label=index];
N09[label=ToProb];
N10[label=Bernoulli];
N11[label=Sample];
N12[label=ColumnIndex];
N13[label=index];
N14[label=ToProb];
N15[label=Bernoulli];
N16[label=Sample];
N17[label=index];
N18[label=ToProb];
N19[label=Bernoulli];
N20[label=Sample];
N21[label=2];
N22[label=ToMatrix];
N23[label=MatrixComplement];
N24[label=Query];
N25[label="[[0.25,0.75],\\\\n[0.5,0.5]]"];
N26[label=MatrixComplement];
N27[label=Query];
N28[label="[[True,False],\\\\n[False,False]]"];
N29[label=MatrixComplement];
N30[label=Query];
N31[label="[0.5,0.5]"];
N32[label=MatrixComplement];
N33[label=Query];
N00 -> N02[label=left];
N00 -> N12[label=left];
N01 -> N02[label=right];
N01 -> N03[label=right];
N01 -> N13[label=right];
N02 -> N03[label=left];
N02 -> N08[label=left];
N03 -> N04[label=operand];
N04 -> N05[label=probability];
N05 -> N06[label=operand];
N06 -> N22[label=0];
N07 -> N08[label=right];
N07 -> N12[label=right];
N07 -> N17[label=right];
N08 -> N09[label=operand];
N09 -> N10[label=probability];
N10 -> N11[label=operand];
N11 -> N22[label=1];
N12 -> N13[label=left];
N12 -> N17[label=left];
N13 -> N14[label=operand];
N14 -> N15[label=probability];
N15 -> N16[label=operand];
N16 -> N22[label=2];
N17 -> N18[label=operand];
N18 -> N19[label=probability];
N19 -> N20[label=operand];
N20 -> N22[label=3];
N21 -> N22[label=columns];
N21 -> N22[label=rows];
N22 -> N23[label=operand];
N23 -> N24[label=operator];
N25 -> N26[label=operand];
N26 -> N27[label=operator];
N28 -> N29[label=operand];
N29 -> N30[label=operator];
N31 -> N32[label=operand];
N32 -> N33[label=operator];
}
"""
self.assertEqual(expectation.strip(), observed_beanstalk.strip())
generated_graph = to_bmg_graph(bmg)
observed_bmg = generated_graph.graph.to_dot()
expectation = """
digraph "graph" {
N0[label="matrix"];
N1[label="0"];
N2[label="ColumnIndex"];
N3[label="Index"];
N4[label="ToProb"];
N5[label="Bernoulli"];
N6[label="~"];
N7[label="1"];
N8[label="Index"];
N9[label="ToProb"];
N10[label="Bernoulli"];
N11[label="~"];
N12[label="ColumnIndex"];
N13[label="Index"];
N14[label="ToProb"];
N15[label="Bernoulli"];
N16[label="~"];
N17[label="Index"];
N18[label="ToProb"];
N19[label="Bernoulli"];
N20[label="~"];
N21[label="2"];
N22[label="ToMatrix"];
N23[label="MatrixComplement"];
N24[label="matrix"];
N25[label="MatrixComplement"];
N26[label="matrix"];
N27[label="MatrixComplement"];
N28[label="simplex"];
N29[label="MatrixComplement"];
N0 -> N2;
N0 -> N12;
N1 -> N2;
N1 -> N3;
N1 -> N13;
N2 -> N3;
N2 -> N8;
N3 -> N4;
N4 -> N5;
N5 -> N6;
N6 -> N22;
N7 -> N8;
N7 -> N12;
N7 -> N17;
N8 -> N9;
N9 -> N10;
N10 -> N11;
N11 -> N22;
N12 -> N13;
N12 -> N17;
N13 -> N14;
N14 -> N15;
N15 -> N16;
N16 -> N22;
N17 -> N18;
N18 -> N19;
N19 -> N20;
N20 -> N22;
N21 -> N22;
N21 -> N22;
N22 -> N23;
N24 -> N25;
N26 -> N27;
N28 -> N29;
Q0[label="Query"];
N23 -> Q0;
Q1[label="Query"];
N25 -> Q1;
Q2[label="Query"];
N27 -> Q2;
Q3[label="Query"];
N29 -> Q3;
}
"""
self.assertEqual(expectation.strip(), observed_bmg.strip())
def test_fix_matrix_log1mexp(self) -> None:
self.maxDiff = None
bmg = BMGraphBuilder()
probs = bmg.add_real_matrix(torch.tensor([[0.75, 0.25], [0.125, 0.875]]))
tensor_elements = []
# create non constant real matrix
for row in range(0, 2):
row_node = bmg.add_natural(row)
row_prob = bmg.add_column_index(probs, row_node)
for column in range(0, 2):
col_index = bmg.add_natural(column)
prob = bmg.add_vector_index(row_prob, col_index)
bern = bmg.add_bernoulli(prob)
sample = bmg.add_sample(bern)
neg_two = bmg.add_neg_real(-2.0)
neg_samples = bmg.add_multiplication(neg_two, sample)
tensor_elements.append(neg_samples)
matrix = bmg.add_tensor(Size([2, 2]), *tensor_elements)
# create constant matrix
const_neg_real_matrix = bmg.add_neg_real_matrix(
torch.tensor([[-0.25, -0.75], [-0.5, -0.5]]),
)
mlog1mexp_non_constant_real = bmg.add_matrix_log1mexp(matrix)
mlog1mexp_const_neg_real = bmg.add_matrix_log1mexp(const_neg_real_matrix)
bmg.add_query(mlog1mexp_non_constant_real, _rv_id())
bmg.add_query(mlog1mexp_const_neg_real, _rv_id())
observed_beanstalk = to_dot(bmg, after_transform=True)
expectation = """
digraph "graph" {
N00[label="[[0.75,0.25],\\\\n[0.125,0.875]]"];
N01[label=0];
N02[label=ColumnIndex];
N03[label=index];
N04[label=ToProb];
N05[label=Bernoulli];
N06[label=Sample];
N07[label=1];
N08[label=index];
N09[label=ToProb];
N10[label=Bernoulli];
N11[label=Sample];
N12[label=ColumnIndex];
N13[label=index];
N14[label=ToProb];
N15[label=Bernoulli];
N16[label=Sample];
N17[label=index];
N18[label=ToProb];
N19[label=Bernoulli];
N20[label=Sample];
N21[label=2];
N22[label=-2.0];
N23[label=0.0];
N24[label=if];
N25[label=if];
N26[label=if];
N27[label=if];
N28[label=ToMatrix];
N29[label=MatrixLog1mexp];
N30[label=Query];
N31[label="[[-0.25,-0.75],\\\\n[-0.5,-0.5]]"];
N32[label=MatrixLog1mexp];
N33[label=Query];
N00 -> N02[label=left];
N00 -> N12[label=left];
N01 -> N02[label=right];
N01 -> N03[label=right];
N01 -> N13[label=right];
N02 -> N03[label=left];
N02 -> N08[label=left];
N03 -> N04[label=operand];
N04 -> N05[label=probability];
N05 -> N06[label=operand];
N06 -> N24[label=condition];
N07 -> N08[label=right];
N07 -> N12[label=right];
N07 -> N17[label=right];
N08 -> N09[label=operand];
N09 -> N10[label=probability];
N10 -> N11[label=operand];
N11 -> N25[label=condition];
N12 -> N13[label=left];
N12 -> N17[label=left];
N13 -> N14[label=operand];
N14 -> N15[label=probability];
N15 -> N16[label=operand];
N16 -> N26[label=condition];
N17 -> N18[label=operand];
N18 -> N19[label=probability];
N19 -> N20[label=operand];
N20 -> N27[label=condition];
N21 -> N28[label=columns];
N21 -> N28[label=rows];
N22 -> N24[label=consequence];
N22 -> N25[label=consequence];
N22 -> N26[label=consequence];
N22 -> N27[label=consequence];
N23 -> N24[label=alternative];
N23 -> N25[label=alternative];
N23 -> N26[label=alternative];
N23 -> N27[label=alternative];
N24 -> N28[label=0];
N25 -> N28[label=1];
N26 -> N28[label=2];
N27 -> N28[label=3];
N28 -> N29[label=operand];
N29 -> N30[label=operator];
N31 -> N32[label=operand];
N32 -> N33[label=operator];
}
"""
self.assertEqual(expectation.strip(), observed_beanstalk.strip())
generated_graph = to_bmg_graph(bmg)
observed_bmg = generated_graph.graph.to_dot()
expectation = """
digraph "graph" {
N0[label="matrix"];
N1[label="0"];
N2[label="ColumnIndex"];
N3[label="Index"];
N4[label="ToProb"];
N5[label="Bernoulli"];
N6[label="~"];
N7[label="1"];
N8[label="Index"];
N9[label="ToProb"];
N10[label="Bernoulli"];
N11[label="~"];
N12[label="ColumnIndex"];
N13[label="Index"];
N14[label="ToProb"];
N15[label="Bernoulli"];
N16[label="~"];
N17[label="Index"];
N18[label="ToProb"];
N19[label="Bernoulli"];
N20[label="~"];
N21[label="2"];
N22[label="-2"];
N23[label="-1e-10"];
N24[label="IfThenElse"];
N25[label="IfThenElse"];
N26[label="IfThenElse"];
N27[label="IfThenElse"];
N28[label="ToMatrix"];
N29[label="MatrixLog1mexp"];
N30[label="matrix"];
N31[label="MatrixLog1mexp"];
N0 -> N2;
N0 -> N12;
N1 -> N2;
N1 -> N3;
N1 -> N13;
N2 -> N3;
N2 -> N8;
N3 -> N4;
N4 -> N5;
N5 -> N6;
N6 -> N24;
N7 -> N8;
N7 -> N12;
N7 -> N17;
N8 -> N9;
N9 -> N10;
N10 -> N11;
N11 -> N25;
N12 -> N13;
N12 -> N17;
N13 -> N14;
N14 -> N15;
N15 -> N16;
N16 -> N26;
N17 -> N18;
N18 -> N19;
N19 -> N20;
N20 -> N27;
N21 -> N28;
N21 -> N28;
N22 -> N24;
N22 -> N25;
N22 -> N26;
N22 -> N27;
N23 -> N24;
N23 -> N25;
N23 -> N26;
N23 -> N27;
N24 -> N28;
N25 -> N28;
N26 -> N28;
N27 -> N28;
N28 -> N29;
N30 -> N31;
Q0[label="Query"];
N29 -> Q0;
Q1[label="Query"];
N31 -> Q1;
}
"""
self.assertEqual(expectation.strip(), observed_bmg.strip())
| beanmachine-main | tests/ppl/compiler/fix_matrix_type_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Bernoulli compiler tests
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference import BMGInference
from torch.distributions import Bernoulli, Beta
_bern_ext = Bernoulli(0.5)
@bm.random_variable
def bern_1():
# Distribution created externally to random variable
return _bern_ext
@bm.random_variable
def bern_2():
# Distribution created in random variable, named argument
return Bernoulli(probs=0.25)
@bm.random_variable
def beta():
return Beta(2.0, 2.0)
@bm.random_variable
def bern_3():
# Distribution parameterized by another rv
return Bernoulli(beta())
@bm.random_variable
def bern_4():
# Bernoullis with constant logits are treated as though we had
# the probs instead. Notice that this is deduplicated in the graph
# with Bern(0.5) (of course it is a different sample because it
# is a different RV).
return Bernoulli(logits=0.0)
@bm.random_variable
def bern_5():
# Bernoullis with stochastic logits become a different kind of node.
return Bernoulli(logits=beta())
class BernoulliTest(unittest.TestCase):
def test_bernoulli(self) -> None:
self.maxDiff = None
queries = [
bern_1(),
bern_2(),
bern_3(),
bern_4(),
bern_5(),
]
observations = {}
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N00[label=0.5];
N01[label=Bernoulli];
N02[label=Sample];
N03[label=Query];
N04[label=0.25];
N05[label=Bernoulli];
N06[label=Sample];
N07[label=Query];
N08[label=2.0];
N09[label=Beta];
N10[label=Sample];
N11[label=Bernoulli];
N12[label=Sample];
N13[label=Query];
N14[label=Sample];
N15[label=Query];
N16[label=ToReal];
N17[label="Bernoulli(logits)"];
N18[label=Sample];
N19[label=Query];
N00 -> N01;
N01 -> N02;
N01 -> N14;
N02 -> N03;
N04 -> N05;
N05 -> N06;
N06 -> N07;
N08 -> N09;
N08 -> N09;
N09 -> N10;
N10 -> N11;
N10 -> N16;
N11 -> N12;
N12 -> N13;
N14 -> N15;
N16 -> N17;
N17 -> N18;
N18 -> N19;
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/bernoulli_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
from beanmachine.ppl.inference import BMGInference
@bm.random_variable
def foo():
return dist.Normal(0.0, 1.0)
@bm.random_variable
def bat():
return dist.Normal(0.0, 10.0)
@bm.random_variable
def bar(i):
stmt: float = 1.2 * foo() + bat()
return dist.Normal(stmt, 1.0)
class AnnotatedAssignmentTest(unittest.TestCase):
def test_annotated_assignemnt(self) -> None:
bat_value = dist.Normal(0.0, 10.0).sample(torch.Size((1, 1)))
foo_value = dist.Normal(0.0, 1.0).sample(torch.Size((1, 1)))
observations = {}
bar_parent = dist.Normal(foo_value + bat_value, torch.tensor(1.0))
for i in range(0, 1):
observations[bar(i)] = bar_parent.sample(torch.Size((1, 1)))
observed = BMGInference().to_dot(
queries=[foo(), bat()],
observations=observations,
)
print(observed)
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=10.0];
N05[label=Normal];
N06[label=Sample];
N07[label=1.2];
N08[label="*"];
N09[label="+"];
N10[label=Normal];
N11[label=Sample];
N12[label="Observation 12.937742233276367"];
N13[label=Query];
N14[label=Query];
N00 -> N02;
N00 -> N05;
N01 -> N02;
N01 -> N10;
N02 -> N03;
N03 -> N08;
N03 -> N13;
N04 -> N05;
N05 -> N06;
N06 -> N09;
N06 -> N14;
N07 -> N08;
N08 -> N09;
N09 -> N10;
N10 -> N11;
N11 -> N12;
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/annotated_assignment_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Categorical compiler tests
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference import BMGInference
from torch import tensor
from torch.distributions import Categorical, Dirichlet, HalfCauchy
t = tensor([0.125, 0.125, 0.25, 0.5])
@bm.random_variable
def c_const_simplex():
return Categorical(t)
@bm.random_variable
def c_const_unnormalized():
# If we have samples of both the normalized and unnormalized distributions
# the deduplicator should merge them into the same distribution, since
# 2:2:4:8 :: 1/8:1/8:1/4:1/2
return Categorical(t * 16.0)
@bm.random_variable
def c_const_logit_simplex():
# Note that logits here means log probabilities, not log odds.
# Since the argument is just a constant, the runtime should detect
# that it can simply reuse the [0.125, 0.125, 0.25, 0.5] node
# in the generated graph.
return Categorical(logits=t.log())
@bm.random_variable
def c_trivial_simplex():
# No sensible person would do this but we should ensure it works anyway.
# Categorical(1.0) is already illegal in torch so we don't have to test that.
# TODO: We could optimize this to the constant zero I suppose but it is
# unlikely to help in realistic code. Better would be to detect this likely
# bug and report it as a warning somehow.
return Categorical(tensor([1.0]))
@bm.random_variable
def hc():
return HalfCauchy(0.0)
@bm.random_variable
def c_random_logit():
return Categorical(logits=tensor([0.0, 0.0, 0.0, -hc()]))
@bm.random_variable
def d4():
return Dirichlet(tensor([1.0, 1.0, 1.0, 1.0]))
@bm.random_variable
def cd4():
return Categorical(d4())
@bm.random_variable
def c_multi():
return Categorical(tensor([[0.5, 0.5], [0.5, 0.5]]))
# NOTE: A random variable indexed by a categorical is tested in
# stochastic_control_flow_test.py.
# TODO: Once categorical inference is supported in BMG add a test
# here which demonstrates that.
class CategoricalTest(unittest.TestCase):
def test_categorical_trivial(self) -> None:
self.maxDiff = None
queries = [c_trivial_simplex()]
observations = {}
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N0[label="[1.0]"];
N1[label=Categorical];
N2[label=Sample];
N3[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_categorical_dirichlet(self) -> None:
self.maxDiff = None
# It should be legal to use the output of a one-column
# Dirichlet as the input to a categorical:
queries = [cd4()]
observations = {}
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N0[label="[1.0,1.0,1.0,1.0]"];
N1[label=Dirichlet];
N2[label=Sample];
N3[label=Categorical];
N4[label=Sample];
N5[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N4 -> N5;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_categorical_equivalent_consts(self) -> None:
self.maxDiff = None
# * If we have a categorical with a constant probability
# that does not sum to 1.0 then we automatically normalize it.
# * If we have a categorical logits with constant probability
# then we automatically convert it to regular probs and
# normalize them.
#
# That means that we automatically deduplicate what looks
# like three distinct distributions into three samples from
# the same distribution:
queries = [
c_const_unnormalized(),
c_const_simplex(),
c_const_logit_simplex(),
]
observations = {}
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N0[label="[0.125,0.125,0.25,0.5]"];
N1[label=Categorical];
N2[label=Sample];
N3[label=Query];
N4[label=Sample];
N5[label=Query];
N6[label=Sample];
N7[label=Query];
N0 -> N1;
N1 -> N2;
N1 -> N4;
N1 -> N6;
N2 -> N3;
N4 -> N5;
N6 -> N7;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# Note that we add a simplex-typed constant:
observed = BMGInference().to_python(queries, observations)
expected = """
from beanmachine import graph
from torch import tensor
g = graph.Graph()
n0 = g.add_constant_col_simplex_matrix(tensor([[0.125],[0.125],[0.25],[0.5]]))
n1 = g.add_distribution(
graph.DistributionType.CATEGORICAL,
graph.AtomicType.NATURAL,
[n0],
)
n2 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
q0 = g.query(n2)
n3 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
q1 = g.query(n3)
n4 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
q2 = g.query(n4)
"""
self.assertEqual(expected.strip(), observed.strip())
def test_categorical_random_logit(self) -> None:
self.maxDiff = None
# We do not support Categorical(logits=something_random)
# random variables.
queries = [
c_random_logit(),
]
observations = {}
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, observations, 10)
observed = str(ex.exception)
expected = """
The model uses a categorical operation unsupported by Bean Machine Graph.
The unsupported node was created in function call c_random_logit().
"""
self.assertEqual(expected.strip(), observed.strip())
def test_categorical_multi(self) -> None:
self.maxDiff = None
# We do not support Categorical with multiple dimensions.
# TODO: This error message is not very well worded; what we want to communicate
# is that ANY one-column simplex is the requirement.
queries = [
c_multi(),
]
observations = {}
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, observations, 10)
observed = str(ex.exception)
expected = """
The probability of a categorical is required to be a 2 x 1 simplex matrix but is a 2 x 2 simplex matrix.
The categorical was created in function call c_multi().
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/categorical_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.compiler.runtime import BMGRuntime
from torch.distributions import Bernoulli, Beta
@bm.random_variable
def beta():
return Beta(2.0, 2.0)
@bm.random_variable
def flip1(n):
return Bernoulli(beta())
@bm.functional
def sum1():
return flip1(0) + 1.0
def sum2(n, m):
# Note that sum2 is NOT a functional.
# The returned addition node should deduplicate with
# the one returned by sum1().
return flip1(0) + (n * m)
@bm.functional
def prod1(n):
# Try a named argument.
return sum1() * sum2(1.0, m=1.0)
@bm.functional
def log1(n):
return prod1(n).log()
def _dict_to_str(d) -> str:
return "\n".join(
sorted(
type(key).__name__ + ":{" + ",".join(sorted(str(v) for v in d[key])) + "}"
for key in d
)
)
class NodeContextTest(unittest.TestCase):
def test_node_context(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
rt.accumulate_graph([log1(123)], {})
expected = """
AdditionNode:{sum1(),sum2(1.0,m=1.0)}
BernoulliNode:{flip1(0)}
BetaNode:{beta()}
LogNode:{log1(123)}
MultiplicationNode:{prod1(123)}
SampleNode:{beta()}
SampleNode:{flip1(0)}
"""
observed = _dict_to_str(rt._context._node_locations)
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/node_context_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Compare original and conjugate prior transformed model"""
import random
import unittest
import scipy
import torch
from beanmachine.ppl.examples.conjugate_models.normal_normal import NormalNormalModel
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
from torch.distributions import Normal
class NormalNormalConjugacyTest(unittest.TestCase):
def test_conjugate_graph(self) -> None:
bmg = BMGInference()
model = NormalNormalModel(10.0, 2.0, 5.0)
queries = [model.normal_p()]
observations = {model.normal(): tensor(15.9)}
observed_bmg = bmg.to_dot(queries, observations, skip_optimizations=set())
expected_bmg = """
digraph "graph" {
N0[label=10.813793182373047];
N1[label=1.8569534304710584];
N2[label=Normal];
N3[label=Sample];
N4[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N4;
}
"""
self.assertEqual(observed_bmg.strip(), expected_bmg.strip())
def test_normal_normal_conjugate(self) -> None:
"""
KS test to check if samples from the original NormalNormalModel and
transformed model is within a certain bound.
We initialize the seed to ensure the test is deterministic.
"""
seed = 0
torch.manual_seed(seed)
random.seed(seed)
true_mu = 0.5
true_y = Normal(true_mu, 10.0)
num_samples = 1000
bmg = BMGInference()
model = NormalNormalModel(10.0, 2.0, 5.0)
queries = [model.normal_p()]
observations = {
model.normal(): true_y.sample(),
}
skip_optimizations = {"normal_normal_conjugate_fixer"}
original_posterior = bmg.infer(
queries, observations, num_samples, 1, skip_optimizations=skip_optimizations
)
original_samples = original_posterior[model.normal_p()][0]
transformed_posterior = bmg.infer(
queries, observations, num_samples, 1, skip_optimizations=set()
)
transformed_samples = transformed_posterior[model.normal_p()][0]
self.assertEqual(
type(original_samples),
type(transformed_samples),
"Sample type of original and transformed model should be the same.",
)
self.assertEqual(
len(original_samples),
len(transformed_samples),
"Sample size of original and transformed model should be the same.",
)
self.assertGreaterEqual(
scipy.stats.ks_2samp(original_samples, transformed_samples).pvalue,
0.05,
)
| beanmachine-main | tests/ppl/compiler/fix_normal_normal_basic_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""End-to-end compiler test for Bayesian Meta-Analysis model"""
import platform
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
from torch.distributions import HalfCauchy, Normal, StudentT
class Group:
level = 2
class Team:
group: Group
level = 1
def __init__(self, group: Group):
self.group = group
class Experiment:
result: float
stddev: float
team: Team
level = 0
def __init__(self, result: float, stddev: float, team: Team):
self.result = result
self.stddev = stddev
self.team = team
group1 = Group()
group2 = Group()
team1 = Team(group1)
team2 = Team(group1)
team3 = Team(group2)
team4 = Team(group2)
# I generated sample values for everything that conform to this model:
# * true value is 10.0
# * experiment bias stddev is 2.10
# * team bias stddev is 1.34
# * group bias stddev is 1.52
# * experiment biases are
# -0.82, -1.58, 0.45, 0.23, 1.30, -1.25, -1.26, -1.14
# * team biases are -2.19, -1.41, -0.26, 1.16
# * group biases are 0.19, 0.79
# * experiment stddevs and results are given below.
experiments = [
Experiment(7.36, 0.3, team1),
Experiment(6.47, 0.5, team1),
Experiment(8.87, 0.2, team2),
Experiment(9.17, 1.0, team2),
Experiment(11.19, 2.4, team3),
Experiment(10.30, 1.5, team3),
Experiment(11.06, 0.9, team4),
Experiment(10.74, 0.8, team4),
]
@bm.random_variable
def true_value():
return StudentT(1.0)
@bm.random_variable
def bias_size(level):
return HalfCauchy(1.0)
@bm.random_variable
def node_bias(node):
return Normal(0, bias_size(node.level))
@bm.random_variable
def result(experiment):
mean = (
true_value()
+ node_bias(experiment)
+ node_bias(experiment.team)
+ node_bias(experiment.team.group)
)
return Normal(mean, experiment.stddev)
class BMATest(unittest.TestCase):
@unittest.skipIf(
platform.system() in ["Darwin", "Windows"],
reason="Numerical behavior seems to be different on MacOS/Windows",
)
def test_bma_inference(self) -> None:
queries = [true_value(), bias_size(0), bias_size(1), bias_size(2)]
observations = {result(x): tensor(x.result) for x in experiments}
# Eight experiments, four teams, two groups, is very little data to
# make good inferences from, so we should expect that the inference
# engine does not get particularly close.
# The true value is 10.0, but the observations given best match
# a true value of 8.15.
expected_true_value = 8.15
# True exp bias size was 2.10 but observations given best match
# a exp bias size of 0.70
expected_exp_bias = 0.70
# True team bias size was 1.32 but observations given best match
# a team bias of 1.26
expected_team_bias = 1.26
# True group bias size was 1.52 but observations given best match
# a group bias of 1.50
expected_group_bias = 1.50
mcsamples = BMGInference().infer(queries, observations, 1000, 1)
queries = [true_value(), bias_size(0), bias_size(1), bias_size(2)]
observed_true_value = mcsamples[true_value()].mean()
observed_exp_bias = mcsamples[bias_size(0)].mean()
observed_team_bias = mcsamples[bias_size(1)].mean()
observed_group_bias = mcsamples[bias_size(2)].mean()
self.assertAlmostEqual(observed_true_value, expected_true_value, delta=0.1)
self.assertAlmostEqual(observed_exp_bias, expected_exp_bias, delta=0.1)
self.assertAlmostEqual(observed_team_bias, expected_team_bias, delta=0.1)
self.assertAlmostEqual(observed_group_bias, expected_group_bias, delta=0.1)
| beanmachine-main | tests/ppl/compiler/bma_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.compiler.gen_dot import to_dot
from beanmachine.ppl.compiler.runtime import BMGRuntime
from beanmachine.ppl.inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli, Beta, Dirichlet, Normal
# Random variable that takes an argument
@bm.random_variable
def norm(n):
return Normal(loc=0.0, scale=1.0)
# Random variable that takes no argument
@bm.random_variable
def coin():
return Beta(2.0, 2.0)
# Call to random variable inside random variable
@bm.random_variable
def flip():
return Bernoulli(coin())
@bm.random_variable
def flips(n):
return Bernoulli(0.5)
@bm.random_variable
def spike_and_slab(n):
if n:
return Bernoulli(0.5)
else:
return Normal(0, 1)
@bm.functional
def if_statement():
# Stochastic control flows using "if" statements are not yet implemented
if flip():
return flips(0)
else:
return flips(1)
@bm.functional
def while_statement():
# A while statement is logically just a fancy "if"; since we do not support
# stochastic "if" yet, neither do we support stochastic "while".
while flip():
return flips(0)
return flips(1)
@bm.random_variable
def dirichlet():
return Dirichlet(tensor([1.0, 1.0, 1.0]))
@bm.functional
def for_statement():
# Stochastic control flows using "for" statements are not yet implemented
# TODO: If we know the shape of a graph node then we could implement:
#
# for x in stochastic_vector():
# ...
#
# as
#
# for i in range(vector_length):
# x = stochastic_vector()[i]
# ...
#
# and similarly for 2-d matrix tensors; we could iterate the columns.
#
s = 0.0
for x in dirichlet():
s += x
return s
@bm.functional
def list_comprehension():
# Comprehensions are just a special kind of "for".
# We don't support them.
return tensor([x + 1.0 for x in dirichlet()])
@bm.functional
def set_comprehension():
# Comprehensions are just a special kind of "for".
# We don't support them.
return tensor(len({x > 0.5 for x in dirichlet()}))
@bm.functional
def dict_comprehension():
# Comprehensions are just a special kind of "for".
# We don't support them.
return tensor(len({x: x > 0.5 for x in dirichlet()}))
@bm.functional
def seq_comprehension():
# Comprehensions are just a special kind of "for".
# We don't support them.
return tensor(x * 2.0 for x in dirichlet())
# Try out a stochastic control flow where we choose
# a mean from one of two distributions depending on
# a coin flip.
@bm.random_variable
def choose_your_mean():
return Normal(spike_and_slab(flip()), 1)
# Now let's try what looks like a stochastic workflow but is
# actually deterministic. We should detect this and avoid
# generating a stochastic workflow.
@bm.functional
def always_zero():
return tensor(0)
@bm.random_variable
def any_index_you_want_as_long_as_it_is_zero():
return Normal(spike_and_slab(always_zero()), 1)
# Now choose from one of three options; notice that we have
# computed a stochastic value inline here rather than putting
# it in a functional; that's fine.
@bm.random_variable
def three_possibilities():
return Normal(spike_and_slab(flips(0) + flips(1)), 1)
@bm.random_variable
def choice_of_flips(n):
if n:
return Bernoulli(0.75)
return Bernoulli(0.25)
@bm.random_variable
def composition():
return Normal(spike_and_slab(choice_of_flips(flip())), 1)
# Make a choice of four possibilities based on two parameters.
@bm.random_variable
def multiple_choice(m, n):
if n:
if m:
return Bernoulli(0.125)
return Bernoulli(0.25)
if m:
return Bernoulli(0.75)
return Bernoulli(0.875)
@bm.random_variable
def two_parameters():
return Normal(multiple_choice(flips(0), flips(1)), 1)
class StochasticControlFlowTest(unittest.TestCase):
def test_stochastic_control_flow_1(self) -> None:
self.maxDiff = None
queries = [any_index_you_want_as_long_as_it_is_zero()]
observations = {}
bmg = BMGRuntime().accumulate_graph(queries, observations)
# Here we have what looks like a stochastic control flow but
# in reality there is only one possibility. We should ensure
# that we generate a graph with no choice points.
observed = to_dot(bmg, after_transform=True, label_edges=False)
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=Normal];
N5[label=Sample];
N6[label=Query];
N0 -> N2;
N1 -> N2;
N1 -> N4;
N2 -> N3;
N3 -> N4;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_stochastic_control_flow_2(self) -> None:
self.maxDiff = None
queries = [choose_your_mean()]
observations = {}
bmg = BMGRuntime().accumulate_graph(queries, observations)
# Note that we generate an if-then-else node here to express the
# flip that chooses between two alternatives, and therefore can
# lower this to a form that BMG would accept.
observed = to_dot(bmg, after_transform=True, label_edges=True)
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=Beta];
N02[label=Sample];
N03[label=Bernoulli];
N04[label=Sample];
N05[label=0.0];
N06[label=1.0];
N07[label=Normal];
N08[label=Sample];
N09[label=0.5];
N10[label=Bernoulli];
N11[label=Sample];
N12[label=ToReal];
N13[label=if];
N14[label=Normal];
N15[label=Sample];
N16[label=Query];
N00 -> N01[label=alpha];
N00 -> N01[label=beta];
N01 -> N02[label=operand];
N02 -> N03[label=probability];
N03 -> N04[label=operand];
N04 -> N13[label=condition];
N05 -> N07[label=mu];
N06 -> N07[label=sigma];
N06 -> N14[label=sigma];
N07 -> N08[label=operand];
N08 -> N13[label=alternative];
N09 -> N10[label=probability];
N10 -> N11[label=operand];
N11 -> N12[label=operand];
N12 -> N13[label=consequence];
N13 -> N14[label=mu];
N14 -> N15[label=operand];
N15 -> N16[label=operator];
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_stochastic_control_flow_3(self) -> None:
self.maxDiff = None
queries = [three_possibilities()]
observations = {}
bmg = BMGRuntime().accumulate_graph(queries, observations)
# TODO: We cannot yet transform this into a legal BMG graph because
# the quantity used to make the choice is a sum of Booleans, and
# we treat the sum of bools as a real number, not as a natural.
# We can only index on naturals.
# TODO: Add a test where we generate supports such as 1, 2, 3
# or 1, 10, 100.
observed = to_dot(bmg, after_transform=False, label_edges=True)
expected = """
digraph "graph" {
N00[label=0.5];
N01[label=Bernoulli];
N02[label=Sample];
N03[label=Sample];
N04[label="+"];
N05[label=0.0];
N06[label=1.0];
N07[label=Normal];
N08[label=Sample];
N09[label=Sample];
N10[label=2.0];
N11[label=Sample];
N12[label=Switch];
N13[label=1];
N14[label=Normal];
N15[label=Sample];
N16[label=Query];
N00 -> N01[label=probability];
N01 -> N02[label=operand];
N01 -> N03[label=operand];
N01 -> N09[label=operand];
N01 -> N11[label=operand];
N02 -> N04[label=left];
N03 -> N04[label=right];
N04 -> N12[label=0];
N05 -> N07[label=mu];
N05 -> N12[label=1];
N06 -> N07[label=sigma];
N06 -> N12[label=3];
N07 -> N08[label=operand];
N08 -> N12[label=2];
N09 -> N12[label=4];
N10 -> N12[label=5];
N11 -> N12[label=6];
N12 -> N14[label=mu];
N13 -> N14[label=sigma];
N14 -> N15[label=operand];
N15 -> N16[label=operator];
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_stochastic_control_flow_composition(self) -> None:
self.maxDiff = None
queries = [composition()]
observations = {}
# Here we have a case where we have composed one stochastic control flow
# as the input to another:
# * we flip a beta(2,2) coin
# * that flip decides whether the next coin flipped is 0.75 or 0.25
# * which decides whether to sample from a normal or a 0.5 coin
# * the result is the mean of a normal.
# TODO: Write a similar test that shows composition of categoricals.
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=Beta];
N02[label=Sample];
N03[label=Bernoulli];
N04[label=Sample];
N05[label=0.25];
N06[label=Bernoulli];
N07[label=Sample];
N08[label=0.75];
N09[label=Bernoulli];
N10[label=Sample];
N11[label=0.0];
N12[label=1.0];
N13[label=Normal];
N14[label=Sample];
N15[label=0.5];
N16[label=Bernoulli];
N17[label=Sample];
N18[label=if];
N19[label=ToReal];
N20[label=if];
N21[label=Normal];
N22[label=Sample];
N23[label=Query];
N00 -> N01;
N00 -> N01;
N01 -> N02;
N02 -> N03;
N03 -> N04;
N04 -> N18;
N05 -> N06;
N06 -> N07;
N07 -> N18;
N08 -> N09;
N09 -> N10;
N10 -> N18;
N11 -> N13;
N12 -> N13;
N12 -> N21;
N13 -> N14;
N14 -> N20;
N15 -> N16;
N16 -> N17;
N17 -> N19;
N18 -> N20;
N19 -> N20;
N20 -> N21;
N21 -> N22;
N22 -> N23;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_stochastic_control_flow_4(self) -> None:
self.maxDiff = None
queries = [two_parameters()]
observations = {}
bmg = BMGRuntime().accumulate_graph(queries, observations)
# Here we have four possibilities but since each is a Boolean choice
# it turns out we can in fact represent it.
observed = to_dot(bmg, after_transform=True, label_edges=True)
expected = """
digraph "graph" {
N00[label=0.5];
N01[label=Bernoulli];
N02[label=Sample];
N03[label=Sample];
N04[label=0.875];
N05[label=Bernoulli];
N06[label=Sample];
N07[label=0.25];
N08[label=Bernoulli];
N09[label=Sample];
N10[label=0.75];
N11[label=Bernoulli];
N12[label=Sample];
N13[label=0.125];
N14[label=Bernoulli];
N15[label=Sample];
N16[label=if];
N17[label=if];
N18[label=if];
N19[label=ToReal];
N20[label=1.0];
N21[label=Normal];
N22[label=Sample];
N23[label=Query];
N00 -> N01[label=probability];
N01 -> N02[label=operand];
N01 -> N03[label=operand];
N02 -> N18[label=condition];
N03 -> N16[label=condition];
N03 -> N17[label=condition];
N04 -> N05[label=probability];
N05 -> N06[label=operand];
N06 -> N17[label=alternative];
N07 -> N08[label=probability];
N08 -> N09[label=operand];
N09 -> N17[label=consequence];
N10 -> N11[label=probability];
N11 -> N12[label=operand];
N12 -> N16[label=alternative];
N13 -> N14[label=probability];
N14 -> N15[label=operand];
N15 -> N16[label=consequence];
N16 -> N18[label=consequence];
N17 -> N18[label=alternative];
N18 -> N19[label=operand];
N19 -> N21[label=mu];
N20 -> N21[label=sigma];
N21 -> N22[label=operand];
N22 -> N23[label=operator];
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_stochastic_control_flow_5(self) -> None:
self.maxDiff = None
queries = [if_statement()]
observations = {}
with self.assertRaises(ValueError) as ex:
BMGRuntime().accumulate_graph(queries, observations)
# TODO: Better error message
expected = "Stochastic control flows are not yet implemented."
self.assertEqual(expected, str(ex.exception))
queries = [for_statement()]
with self.assertRaises(ValueError) as ex:
BMGRuntime().accumulate_graph(queries, observations)
# TODO: Better error message
expected = "Stochastic control flows are not yet implemented."
self.assertEqual(expected, str(ex.exception))
queries = [while_statement()]
with self.assertRaises(ValueError) as ex:
BMGRuntime().accumulate_graph(queries, observations)
# TODO: Better error message
expected = "Stochastic control flows are not yet implemented."
self.assertEqual(expected, str(ex.exception))
queries = [dict_comprehension()]
with self.assertRaises(ValueError) as ex:
BMGRuntime().accumulate_graph(queries, observations)
# TODO: Better error message
expected = "Stochastic control flows are not yet implemented."
self.assertEqual(expected, str(ex.exception))
queries = [list_comprehension()]
with self.assertRaises(ValueError) as ex:
BMGRuntime().accumulate_graph(queries, observations)
# TODO: Better error message
expected = "Stochastic control flows are not yet implemented."
self.assertEqual(expected, str(ex.exception))
queries = [seq_comprehension()]
with self.assertRaises(ValueError) as ex:
BMGRuntime().accumulate_graph(queries, observations)
# TODO: Better error message
expected = "Stochastic control flows are not yet implemented."
self.assertEqual(expected, str(ex.exception))
queries = [set_comprehension()]
with self.assertRaises(ValueError) as ex:
BMGRuntime().accumulate_graph(queries, observations)
# TODO: Better error message
expected = "Stochastic control flows are not yet implemented."
self.assertEqual(expected, str(ex.exception))
# TODO: Test that shows what happens when multiple graph node
# arguments are not independent. Can get some false paths
# in the graph when this happens. Can we prune them?
| beanmachine-main | tests/ppl/compiler/stochastic_control_flow_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""End-to-end test for log1mexp"""
import math
import unittest
import beanmachine.ppl as bm
import torch
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli, Beta, HalfCauchy
# New
def log1mexp(lprob):
return torch.log(1 - torch.exp(lprob))
def math_log1mexp(lprob):
return math.log(1 - math.exp(lprob))
@bm.random_variable
def hc():
return HalfCauchy(42) # positive real
@bm.functional
def right():
return log1mexp(-hc()) # log1mexp takes a negative real
@bm.functional
def wrong():
return log1mexp(hc()) # log1mexp takes a negative real!
@bm.functional
def math_right():
return math_log1mexp(-hc()) # log1mexp takes a negative real
@bm.functional
def math_wrong():
return math_log1mexp(hc()) # log1mexp takes a negative real!
# Old
@bm.random_variable
def beta():
return Beta(2.0, -math_log1mexp(-2.0))
@bm.random_variable
def beta2():
return Beta(2.0, -log1mexp(-beta()))
@bm.random_variable
def flip(n):
return Bernoulli(beta())
class Log1mexpTest(unittest.TestCase):
def test_log1mexp(self) -> None:
"""log1mexp"""
# New
#
# First we look at the torch.tensor case
#
# Example of a model that is OK
#
queries = [right()]
observations = {}
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N0[label=42.0];
N1[label=HalfCauchy];
N2[label=Sample];
N3[label="-"];
N4[label=Log1mexp];
N5[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N4 -> N5;
}
"""
self.assertEqual(expected.strip(), observed.strip())
self.assertTrue(
BMGInference().infer(queries, observations, 1, 1),
msg="Expected inference to complete successful on this example.",
)
#
# Example of a model that is not OK, that is, should raise an error
#
queries = [wrong()]
observations = {}
with self.assertRaises(ValueError) as ex:
observed = BMGInference().to_dot(queries, observations)
# TODO: The location in this error message is oddly formatted.
# We probably shouldn't be putting descriptions of stochastic
# nodes into the call site.
expected = """
The operand of a log is required to be a positive real but is a real.
The log was created in function call log1mexp(Sample(HalfCauchy(42.0)))."""
self.assertEqual(expected.strip(), str(ex.exception).strip())
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, observations, 1, 1)
self.assertEqual(expected.strip(), str(ex.exception))
queries = [right()]
observations = {hc(): tensor(1.0)}
result = BMGInference().infer(queries, observations, 1, 1)
observed = result[right()]
expected = log1mexp(tensor(-1.0))
self.assertEqual(observed, expected)
# Second we look at the math_ case
#
# Example of a model that is OK
#
queries = [math_right()]
observations = {}
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N0[label=42.0];
N1[label=HalfCauchy];
N2[label=Sample];
N3[label="-"];
N4[label=Log1mexp];
N5[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N4 -> N5;
}
"""
self.assertEqual(expected.strip(), observed.strip())
self.assertTrue(
BMGInference().infer(queries, observations, 1, 1),
msg="Expected inference to complete successful on this example.",
)
#
# Example of a model that is not OK, that is, should raise an error
#
queries = [math_wrong()]
observations = {}
with self.assertRaises(ValueError) as ex:
observed = BMGInference().to_dot(queries, observations)
expected = """
The operand of a log is required to be a positive real but is a real.
The log was created in function call math_log1mexp(Sample(HalfCauchy(42.0)))."""
self.assertEqual(expected.strip(), str(ex.exception).strip())
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, observations, 1, 1)
self.assertEqual(expected.strip(), str(ex.exception))
queries = [math_right()]
observations = {hc(): tensor(1.0)}
result = BMGInference().infer(queries, observations, 1, 1)
observed = result[math_right()]
expected = math_log1mexp(-1.0)
self.assertEqual(observed, expected)
# ...
# Old
def test_log1mexp_coin_flip_inference(self) -> None:
"""Like a test in coin_flip_test.py but with log1mexp"""
self.maxDiff = None
queries = [beta()]
observations = {
flip(0): tensor(0.0),
flip(1): tensor(0.0),
flip(2): tensor(1.0),
flip(3): tensor(0.0),
}
num_samples = 1000
inference = BMGInference()
mcsamples = inference.infer(queries, observations, num_samples, 1)
samples = mcsamples[beta()]
observed = samples.mean()
expected = tensor(0.4873)
self.assertAlmostEqual(first=observed, second=expected, delta=0.05)
def test_log1mexp_coin_flip_to_dot_cpp_python(self) -> None:
"""Like a test in coin_flip_test.py but with log1mexp"""
self.maxDiff = None
queries = [beta2()]
observations = {
flip(0): tensor(0.0),
flip(1): tensor(0.0),
flip(2): tensor(1.0),
flip(3): tensor(0.0),
}
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=0.14541345834732056];
N02[label=Beta];
N03[label=Sample];
N04[label=Bernoulli];
N05[label=Sample];
N06[label="Observation False"];
N07[label=Sample];
N08[label="Observation False"];
N09[label=Sample];
N10[label="Observation True"];
N11[label=Sample];
N12[label="Observation False"];
N13[label=ToPosReal];
N14[label="-"];
N15[label=Log1mexp];
N16[label="-"];
N17[label=Beta];
N18[label=Sample];
N19[label=Query];
N00 -> N02;
N00 -> N17;
N01 -> N02;
N02 -> N03;
N03 -> N04;
N03 -> N13;
N04 -> N05;
N04 -> N07;
N04 -> N09;
N04 -> N11;
N05 -> N06;
N07 -> N08;
N09 -> N10;
N11 -> N12;
N13 -> N14;
N14 -> N15;
N15 -> N16;
N16 -> N17;
N17 -> N18;
N18 -> N19;
}
"""
self.assertEqual(observed.strip(), expected.strip())
observed = BMGInference().to_cpp(queries, observations)
expected = """
graph::Graph g;
uint n0 = g.add_constant_pos_real(2.0);
uint n1 = g.add_constant_pos_real(0.14541345834732056);
uint n2 = g.add_distribution(
graph::DistributionType::BETA,
graph::AtomicType::PROBABILITY,
std::vector<uint>({n0, n1}));
uint n3 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n2}));
uint n4 = g.add_distribution(
graph::DistributionType::BERNOULLI,
graph::AtomicType::BOOLEAN,
std::vector<uint>({n3}));
uint n5 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n4}));
g.observe(n5, false);
uint n6 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n4}));
g.observe(n6, false);
uint n7 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n4}));
g.observe(n7, true);
uint n8 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n4}));
g.observe(n8, false);
uint n9 = g.add_operator(
graph::OperatorType::TO_POS_REAL, std::vector<uint>({n3}));
uint n10 = g.add_operator(
graph::OperatorType::NEGATE, std::vector<uint>({n9}));
uint n11 = g.add_operator(
graph::OperatorType::LOG1MEXP, std::vector<uint>({n10}));
uint n12 = g.add_operator(
graph::OperatorType::NEGATE, std::vector<uint>({n11}));
uint n13 = g.add_distribution(
graph::DistributionType::BETA,
graph::AtomicType::PROBABILITY,
std::vector<uint>({n0, n12}));
uint n14 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n13}));
uint q0 = g.query(n14);
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_python(queries, observations)
expected = """
from beanmachine import graph
from torch import tensor
g = graph.Graph()
n0 = g.add_constant_pos_real(2.0)
n1 = g.add_constant_pos_real(0.14541345834732056)
n2 = g.add_distribution(
graph.DistributionType.BETA,
graph.AtomicType.PROBABILITY,
[n0, n1],
)
n3 = g.add_operator(graph.OperatorType.SAMPLE, [n2])
n4 = g.add_distribution(
graph.DistributionType.BERNOULLI,
graph.AtomicType.BOOLEAN,
[n3],
)
n5 = g.add_operator(graph.OperatorType.SAMPLE, [n4])
g.observe(n5, False)
n6 = g.add_operator(graph.OperatorType.SAMPLE, [n4])
g.observe(n6, False)
n7 = g.add_operator(graph.OperatorType.SAMPLE, [n4])
g.observe(n7, True)
n8 = g.add_operator(graph.OperatorType.SAMPLE, [n4])
g.observe(n8, False)
n9 = g.add_operator(graph.OperatorType.TO_POS_REAL, [n3])
n10 = g.add_operator(graph.OperatorType.NEGATE, [n9])
n11 = g.add_operator(graph.OperatorType.LOG1MEXP, [n10])
n12 = g.add_operator(graph.OperatorType.NEGATE, [n11])
n13 = g.add_distribution(
graph.DistributionType.BETA,
graph.AtomicType.PROBABILITY,
[n0, n12],
)
n14 = g.add_operator(graph.OperatorType.SAMPLE, [n13])
q0 = g.query(n14)
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/log1mexp_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch.distributions import Bernoulli
# TODO: x > y --> if x then not y else false
# TODO: x is y --> same as == ? Or should this be illegal?
@bm.random_variable
def flip(n):
return Bernoulli(0.5)
#
# ==
#
@bm.functional
def eq_x_0():
# not flip(0)
return flip(0) == 0.0
@bm.functional
def eq_x_1():
# flip(0)
return flip(0) == 1.0
@bm.functional
def eq_0_y():
# not flip(1)
return 0 == flip(1)
@bm.functional
def eq_1_y():
# flip(1)
return 1 == flip(1)
@bm.functional
def eq_x_y():
# if flip(0) then flip(1) else not flip(1)
return flip(0) == flip(1)
#
# !=
#
@bm.functional
def neq_x_0():
# flip(0)
return flip(0) != 0.0
@bm.functional
def neq_x_1():
# not flip(0)
return flip(0) != 1.0
@bm.functional
def neq_0_y():
# flip(1)
return 0 != flip(1)
@bm.functional
def neq_1_y():
# not flip(1)
return 1 != flip(1)
@bm.functional
def neq_x_y():
# if flip(0) then not flip(1) else flip(1)
return flip(0) != flip(1)
#
# >=
#
@bm.functional
def gte_x_0():
# true
return flip(0) >= 0.0
@bm.functional
def gte_x_1():
# flip(0)
return flip(0) >= 1.0
@bm.functional
def gte_0_y():
# not flip(1)
return 0 >= flip(1)
@bm.functional
def gte_1_y():
# true
return 1 >= flip(1)
@bm.functional
def gte_x_y():
# if flip(0) then true else not flip(1)
return flip(0) >= flip(1)
#
# <=
#
@bm.functional
def lte_x_0():
# not flip(0)
return flip(0) <= 0.0
@bm.functional
def lte_x_1():
# true
return flip(0) <= 1.0
@bm.functional
def lte_0_y():
# true
return 0 <= flip(1)
@bm.functional
def lte_1_y():
# flip(1)
return 1 <= flip(1)
@bm.functional
def lte_x_y():
# if flip(0) then flip(1) else true
return flip(0) <= flip(1)
#
# <
#
@bm.functional
def lt_x_0():
# false
return flip(0) < 0.0
@bm.functional
def lt_x_1():
# not flip(0)
return flip(0) < 1.0
@bm.functional
def lt_0_y():
# flip(1)
return 0 < flip(1)
@bm.functional
def lt_1_y():
# false
return 1 < flip(1)
@bm.functional
def lt_x_y():
# if flip(0) then false else flip(1)
return flip(0) < flip(1)
#
# >
#
@bm.functional
def gt_x_0():
# flip(0)
return flip(0) > 0.0
@bm.functional
def gt_x_1():
# false
return flip(0) > 1.0
@bm.functional
def gt_0_y():
# false
return 0 > flip(1)
@bm.functional
def gt_1_y():
# not flip(1)
return 1 > flip(1)
@bm.functional
def gt_x_y():
# if flip(0) then not flip(1) else false
return flip(0) > flip(1)
class BooleanComparisonsTest(unittest.TestCase):
def test_boolean_comparison_eq(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot([eq_x_y()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=Sample];
N4[label=complement];
N5[label=if];
N6[label=Query];
N0 -> N1;
N1 -> N2;
N1 -> N3;
N2 -> N5;
N3 -> N4;
N3 -> N5;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([eq_x_0()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=complement];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N3 -> N4;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([eq_0_y()], {})
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([eq_x_1()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([eq_1_y()], {})
self.assertEqual(expected.strip(), observed.strip())
def test_boolean_comparison_neq(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot([neq_x_y()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=Sample];
N4[label=complement];
N5[label=if];
N6[label=Query];
N0 -> N1;
N1 -> N2;
N1 -> N3;
N2 -> N5;
N3 -> N4;
N3 -> N5;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([neq_x_0()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([neq_0_y()], {})
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([neq_x_1()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=complement];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N3 -> N4;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([neq_1_y()], {})
self.assertEqual(expected.strip(), observed.strip())
def test_boolean_comparison_gte(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot([gte_x_y()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=Sample];
N4[label=True];
N5[label=complement];
N6[label=if];
N7[label=Query];
N0 -> N1;
N1 -> N2;
N1 -> N3;
N2 -> N6;
N3 -> N5;
N4 -> N6;
N5 -> N6;
N6 -> N7;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# TODO: Note that here we keep the sample in the graph even though it is
# not queried or observed. We might consider removing it.
observed = BMGInference().to_dot([gte_x_0()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=True];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N3 -> N4;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([gte_0_y()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=complement];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N3 -> N4;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([gte_x_1()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([gte_1_y()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=True];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N3 -> N4;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_boolean_comparison_lte(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot([lte_x_y()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=Sample];
N4[label=True];
N5[label=if];
N6[label=Query];
N0 -> N1;
N1 -> N2;
N1 -> N3;
N2 -> N5;
N3 -> N5;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([lte_x_0()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=complement];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N3 -> N4;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([lte_0_y()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=True];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N3 -> N4;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([lte_x_1()], {})
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([lte_1_y()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_boolean_comparison_lt(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot([lt_x_y()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=Sample];
N4[label=False];
N5[label=if];
N6[label=Query];
N0 -> N1;
N1 -> N2;
N1 -> N3;
N2 -> N5;
N3 -> N5;
N4 -> N5;
N5 -> N6;
}"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([lt_x_0()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=False];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N3 -> N4;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([lt_1_y()], {})
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([lt_0_y()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([lt_x_1()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=complement];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N3 -> N4;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_boolean_comparison_gt(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot([gt_x_y()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=Sample];
N4[label=complement];
N5[label=False];
N6[label=if];
N7[label=Query];
N0 -> N1;
N1 -> N2;
N1 -> N3;
N2 -> N6;
N3 -> N4;
N4 -> N6;
N5 -> N6;
N6 -> N7;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([gt_x_0()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([gt_1_y()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=complement];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N3 -> N4;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([gt_0_y()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=False];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N3 -> N4;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([gt_x_1()], {})
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/boolean_comparisons_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli, Beta
# This is a very simplified version of a CLARA model; this is the sort of model
# that we want to apply our workaround of removing observations on.
@bm.random_variable
def sensitivity(labeler):
return Beta(1, 1)
@bm.random_variable
def specificity(labeler):
return Beta(2, 2)
@bm.random_variable
def prevalence():
return Beta(0.5, 0.5)
@bm.random_variable
def observation(x):
bob = 0
sue = 1
pos_sum = prevalence().log() + sensitivity(bob).log() + sensitivity(sue).log()
neg_sum = (
(1 - prevalence()).log()
+ (1 - specificity(bob)).log()
+ (1 - specificity(sue)).log()
)
log_prob = (pos_sum.exp() + neg_sum.exp()).log()
return Bernoulli(log_prob.exp())
class FixObserveTrueTest(unittest.TestCase):
def test_fix_observe_true(self) -> None:
self.maxDiff = None
observations = {observation(0): tensor(1.0), observation(1): tensor(1.0)}
queries = []
bmg = BMGInference()
observed = bmg.to_dot(queries, observations)
# Here's the model as it would be handed off to BMG normally.
expected = """
digraph "graph" {
N00[label=0.5];
N01[label=Beta];
N02[label=Sample];
N03[label=1.0];
N04[label=Beta];
N05[label=Sample];
N06[label=Sample];
N07[label=2.0];
N08[label=Beta];
N09[label=Sample];
N10[label=Sample];
N11[label=Log];
N12[label=Log];
N13[label=Log];
N14[label="+"];
N15[label=complement];
N16[label=Log];
N17[label=complement];
N18[label=Log];
N19[label=complement];
N20[label=Log];
N21[label="+"];
N22[label=LogSumExp];
N23[label=Exp];
N24[label=ToProb];
N25[label=Bernoulli];
N26[label=Sample];
N27[label="Observation True"];
N28[label=Sample];
N29[label="Observation True"];
N00 -> N01;
N00 -> N01;
N01 -> N02;
N02 -> N11;
N02 -> N15;
N03 -> N04;
N03 -> N04;
N04 -> N05;
N04 -> N06;
N05 -> N12;
N06 -> N13;
N07 -> N08;
N07 -> N08;
N08 -> N09;
N08 -> N10;
N09 -> N17;
N10 -> N19;
N11 -> N14;
N12 -> N14;
N13 -> N14;
N14 -> N22;
N15 -> N16;
N16 -> N21;
N17 -> N18;
N18 -> N21;
N19 -> N20;
N20 -> N21;
N21 -> N22;
N22 -> N23;
N23 -> N24;
N24 -> N25;
N25 -> N26;
N25 -> N28;
N26 -> N27;
N28 -> N29;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# Now let's force an additional rewriting pass. Note that there must
# be as many factor nodes as we removed observations; factor nodes
# are not deduplicated.
bmg = BMGInference()
bmg._fix_observe_true = True
observed = bmg.to_dot(queries, observations)
expected = """
digraph "graph" {
N00[label=0.5];
N01[label=Beta];
N02[label=Sample];
N03[label=1.0];
N04[label=Beta];
N05[label=Sample];
N06[label=Sample];
N07[label=2.0];
N08[label=Beta];
N09[label=Sample];
N10[label=Sample];
N11[label=Log];
N12[label=Log];
N13[label=Log];
N14[label="+"];
N15[label=complement];
N16[label=Log];
N17[label=complement];
N18[label=Log];
N19[label=complement];
N20[label=Log];
N21[label="+"];
N22[label=LogSumExp];
N23[label=ExpProduct];
N24[label=ExpProduct];
N00 -> N01;
N00 -> N01;
N01 -> N02;
N02 -> N11;
N02 -> N15;
N03 -> N04;
N03 -> N04;
N04 -> N05;
N04 -> N06;
N05 -> N12;
N06 -> N13;
N07 -> N08;
N07 -> N08;
N08 -> N09;
N08 -> N10;
N09 -> N17;
N10 -> N19;
N11 -> N14;
N12 -> N14;
N13 -> N14;
N14 -> N22;
N15 -> N16;
N16 -> N21;
N17 -> N18;
N18 -> N21;
N19 -> N20;
N20 -> N21;
N21 -> N22;
N22 -> N23;
N22 -> N24;
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/fix_observe_true_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference import BMGInference
from torch.distributions import Bernoulli, Beta, Normal
@bm.random_variable
def norm(x):
return Normal(0.0, 1.0)
@bm.functional
def sum_1():
return norm(0) + norm(1) + norm(2)
@bm.functional
def sum_2():
return norm(3) + norm(4) + norm(5)
@bm.functional
def sum_3():
return sum_1() + 5.0
@bm.functional
def sum_4():
return sum_1() + sum_2()
@bm.functional
def mult_1():
return norm(0) * norm(1) * norm(2)
@bm.functional
def mult_2():
return norm(3) * norm(4) * norm(5)
@bm.functional
def mult_3():
return mult_1() * 5.0
@bm.functional
def mult_4():
return mult_1() * mult_2()
@bm.random_variable
def mult_negs_1():
# Verify that product of three negative reals is a negative real.
phi = Normal(0.0, 1.0).cdf
p1 = phi(norm(1)) # P
p2 = phi(norm(2)) # P
p3 = phi(norm(3)) # P
lp1 = p1.log() # R-
lp2 = p2.log() # R-
lp3 = p3.log() # R-
prod = lp1 * lp2 * lp3 # Should be R-
ex = prod.exp() # Should be P
return Bernoulli(ex) # Should be legal
@bm.random_variable
def mult_negs_2():
phi = Normal(0.0, 1.0).cdf
p1 = phi(norm(1)) # P
p2 = phi(norm(2)) # P
p3 = phi(norm(3)) # P
lp1 = p1.log() # R-
lp2 = p2.log() # R-
lp3 = p3.log() # R-
prod = lp1 * lp2 * lp3 # Should be R-
return Beta(-prod, 2.0) # Should be legal
class FixMultiaryOperatorTest(unittest.TestCase):
def test_fix_multiary_addition_1(self) -> None:
self.maxDiff = None
observations = {}
queries = [sum_3(), sum_4()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before optimization
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label="+"];
N06[label=Sample];
N07[label="+"];
N08[label=5.0];
N09[label="+"];
N10[label=Query];
N11[label=Sample];
N12[label=Sample];
N13[label="+"];
N14[label=Sample];
N15[label="+"];
N16[label="+"];
N17[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N02 -> N04;
N02 -> N06;
N02 -> N11;
N02 -> N12;
N02 -> N14;
N03 -> N05;
N04 -> N05;
N05 -> N07;
N06 -> N07;
N07 -> N09;
N07 -> N16;
N08 -> N09;
N09 -> N10;
N11 -> N13;
N12 -> N13;
N13 -> N15;
N14 -> N15;
N15 -> N16;
N16 -> N17;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After optimization:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label=Sample];
N06[label="+"];
N07[label=5.0];
N08[label="+"];
N09[label=Query];
N10[label=Sample];
N11[label=Sample];
N12[label=Sample];
N13[label="+"];
N14[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N02 -> N04;
N02 -> N05;
N02 -> N10;
N02 -> N11;
N02 -> N12;
N03 -> N06;
N04 -> N06;
N05 -> N06;
N06 -> N08;
N06 -> N13;
N07 -> N08;
N08 -> N09;
N10 -> N13;
N11 -> N13;
N12 -> N13;
N13 -> N14;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_multiary_multiplication(self) -> None:
self.maxDiff = None
observations = {}
queries = [mult_3(), mult_4()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before optimization
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label="*"];
N06[label=Sample];
N07[label="*"];
N08[label=5.0];
N09[label="*"];
N10[label=Query];
N11[label=Sample];
N12[label=Sample];
N13[label="*"];
N14[label=Sample];
N15[label="*"];
N16[label="*"];
N17[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N02 -> N04;
N02 -> N06;
N02 -> N11;
N02 -> N12;
N02 -> N14;
N03 -> N05;
N04 -> N05;
N05 -> N07;
N06 -> N07;
N07 -> N09;
N07 -> N16;
N08 -> N09;
N09 -> N10;
N11 -> N13;
N12 -> N13;
N13 -> N15;
N14 -> N15;
N15 -> N16;
N16 -> N17;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After optimization:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label=Sample];
N06[label="*"];
N07[label=5.0];
N08[label="*"];
N09[label=Query];
N10[label=Sample];
N11[label=Sample];
N12[label=Sample];
N13[label="*"];
N14[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N02 -> N04;
N02 -> N05;
N02 -> N10;
N02 -> N11;
N02 -> N12;
N03 -> N06;
N04 -> N06;
N05 -> N06;
N06 -> N08;
N06 -> N13;
N07 -> N08;
N08 -> N09;
N10 -> N13;
N11 -> N13;
N12 -> N13;
N13 -> N14;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_multiply_neg_reals_1(self) -> None:
self.maxDiff = None
observations = {}
queries = [mult_negs_1()]
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label=Sample];
N06[label=Phi];
N07[label=Log];
N08[label="-"];
N09[label=Phi];
N10[label=Log];
N11[label="-"];
N12[label=Phi];
N13[label=Log];
N14[label="-"];
N15[label="*"];
N16[label="-"];
N17[label=Exp];
N18[label=Bernoulli];
N19[label=Sample];
N20[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N02 -> N04;
N02 -> N05;
N03 -> N06;
N04 -> N09;
N05 -> N12;
N06 -> N07;
N07 -> N08;
N08 -> N15;
N09 -> N10;
N10 -> N11;
N11 -> N15;
N12 -> N13;
N13 -> N14;
N14 -> N15;
N15 -> N16;
N16 -> N17;
N17 -> N18;
N18 -> N19;
N19 -> N20;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_multiply_neg_reals_2(self) -> None:
# Make sure we're not introducing negate
# on top of negate.
self.maxDiff = None
observations = {}
queries = [mult_negs_2()]
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label=Sample];
N06[label=Phi];
N07[label=Log];
N08[label="-"];
N09[label=Phi];
N10[label=Log];
N11[label="-"];
N12[label=Phi];
N13[label=Log];
N14[label="-"];
N15[label="*"];
N16[label=2.0];
N17[label=Beta];
N18[label=Sample];
N19[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N02 -> N04;
N02 -> N05;
N03 -> N06;
N04 -> N09;
N05 -> N12;
N06 -> N07;
N07 -> N08;
N08 -> N15;
N09 -> N10;
N10 -> N11;
N11 -> N15;
N12 -> N13;
N13 -> N14;
N14 -> N15;
N15 -> N17;
N16 -> N17;
N17 -> N18;
N18 -> N19;
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/fix_multiary_ops_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.compiler.gen_dot import to_dot
from beanmachine.ppl.compiler.runtime import BMGRuntime
from beanmachine.ppl.inference import BMGInference
from torch import logsumexp, tensor
from torch.distributions import Bernoulli, Normal
@bm.random_variable
def norm(n):
return Normal(tensor(0.0), tensor(1.0))
@bm.functional
def make_a_tensor():
return tensor([norm(1), norm(1), norm(2), 1.25])
@bm.functional
def lse1():
return make_a_tensor().logsumexp(dim=0)
@bm.functional
def lse2():
return logsumexp(make_a_tensor(), dim=0)
@bm.functional
def lse_bad_1():
# Dim cannot be anything but zero
return logsumexp(make_a_tensor(), dim=1)
@bm.random_variable
def flip():
return Bernoulli(0.5)
@bm.functional
def lse_bad_2():
# keepdim cannot be anything but false
return logsumexp(make_a_tensor(), dim=0, keepdim=flip())
class TensorOperationsTest(unittest.TestCase):
def test_tensor_operations_1(self) -> None:
self.maxDiff = None
bmg = BMGRuntime().accumulate_graph([lse1()], {})
observed = to_dot(bmg)
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label=1.25];
N06[label=Tensor];
N07[label=0];
N08[label=False];
N09[label=LogSumExp];
N10[label=Query];
N00 -> N02[label=mu];
N01 -> N02[label=sigma];
N02 -> N03[label=operand];
N02 -> N04[label=operand];
N03 -> N06[label=0];
N03 -> N06[label=1];
N04 -> N06[label=2];
N05 -> N06[label=3];
N06 -> N09[label=operand];
N07 -> N09[label=dim];
N08 -> N09[label=keepdim];
N09 -> N10[label=operator];
}
"""
self.assertEqual(expected.strip(), observed.strip())
# Do it again, but this time with the static method flavor of
# logsumexp. We should get the same result.
bmg = BMGRuntime().accumulate_graph([lse2()], {})
observed = to_dot(bmg)
self.assertEqual(expected.strip(), observed.strip())
# Now try generating a BMG from them. The problem fixer should
# remove the unsupported tensor node.
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=Sample];
N5[label=1.25];
N6[label=LogSumExp];
N7[label=Query];
N0 -> N2[label=mu];
N1 -> N2[label=sigma];
N2 -> N3[label=operand];
N2 -> N4[label=operand];
N3 -> N6[label=0];
N3 -> N6[label=1];
N4 -> N6[label=2];
N5 -> N6[label=3];
N6 -> N7[label=operator];
}
"""
bmg = BMGRuntime().accumulate_graph([lse1()], {})
observed = to_dot(bmg, after_transform=True)
self.assertEqual(observed.strip(), expected.strip())
def test_unsupported_logsumexp(self) -> None:
with self.assertRaises(ValueError) as ex:
BMGInference().infer([lse_bad_1()], {}, 1)
# TODO: Do a better job here. Say why the operation is unsupported.
expected = """
The model uses a logsumexp operation unsupported by Bean Machine Graph.
The unsupported node was created in function call lse_bad_1().
"""
self.assertEqual(expected.strip(), str(ex.exception).strip())
expected = """
The node logsumexp cannot be sized.The operand sizes may be incompatible or the size may not be computable at compile time. The operand sizes are: [torch.Size([4]), torch.Size([]), torch.Size([])]
The unsizable node was created in function call lse_bad_2().
"""
with self.assertRaises(ValueError) as ex:
BMGInference().infer([lse_bad_2()], {}, 1)
self.assertEqual(expected.strip(), str(ex.exception).strip())
| beanmachine-main | tests/ppl/compiler/tensor_operations_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Beta-Bernoulli model conjugacy transformation check when
hyperparameter is a random variable."""
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.examples.conjugate_models.beta_bernoulli import BetaBernoulliModel
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
from torch.distributions import Beta
class BetaBernoulliAlphaRVModel(BetaBernoulliModel):
def __init__(self):
self.beta_ = 2.0
@bm.random_variable
def alpha(self):
return Beta(5.0, 1.0)
@bm.random_variable
def theta(self):
return Beta(self.alpha(), self.beta_)
class BetaBernoulliWithAlphaAsRVConjugateTest(unittest.TestCase):
def test_conjugate_graph(self) -> None:
"""
Test to check that Beta-Bernoulli conjugate transformation
is not be applied when parameters of Beta distribution are
random variables.
"""
self.maxDiff = None
model = BetaBernoulliAlphaRVModel()
queries = [model.theta()]
observations = {
model.y(0): tensor(0.0),
model.y(1): tensor(0.0),
model.y(2): tensor(1.0),
model.y(3): tensor(0.0),
}
num_samples = 1000
bmg = BMGInference()
# This is the model before beta-bernoulli conjugate rewrite is applied
expected_bmg = bmg.to_dot(queries, observations, num_samples)
# This is the model after beta-bernoulli conjugate rewrite is applied
skip_optimizations = set()
observed_bmg = bmg.to_dot(
queries, observations, num_samples, skip_optimizations=skip_optimizations
)
self.assertEqual(expected_bmg.strip(), observed_bmg.strip())
| beanmachine-main | tests/ppl/compiler/fix_beta_bernoulli_alpha_rv_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.compiler.copy_and_replace import copy_and_replace
from beanmachine.ppl.compiler.gen_dot import to_dot
from beanmachine.ppl.compiler.runtime import BMGRuntime
from beanmachine.ppl.compiler.tensorizer_transformer import Tensorizer
from torch import mm, tensor
from torch.distributions import Beta, Normal
@bm.random_variable
def norm(n):
return Normal(tensor(0.0), tensor(1.0))
@bm.random_variable
def beta(n):
return Beta(2, 2)
@bm.functional
def make_matrix(n):
return tensor([[norm(n), norm(n)], [norm(n), 1.25]])
@bm.functional
def make_prob_matrix(n):
return tensor([[beta(n), beta(n)], [beta(n), 0.25]])
@bm.functional
def make_tensor(n):
return tensor(
[[[norm(n), norm(n)], [norm(n), 2.35]], [[norm(n), norm(n)], [norm(n), 1.25]]]
)
@bm.functional
def operators_are_tensorized():
return (make_matrix(0) + make_matrix(1)).exp().sum()
@bm.functional
def operators_are_tensorized_2():
return make_prob_matrix(1).log().sum()
@bm.functional
def matrix_scale_lhs():
return make_matrix(1) * norm(2)
@bm.functional
def matrix_scale_rhs():
return norm(1) * make_matrix(2)
@bm.functional
def scalar_mult():
return norm(1) * norm(2)
@bm.functional
def non_matrix_tensor_mult_lhs():
return make_tensor(1) * norm(2)
@bm.functional
def non_matrix_tensor_mult_rhs():
return norm(6) * make_tensor(5)
@bm.functional
def mm_mismatch():
return mm(make_tensor(1), tensor([3.6, 3.1, 3.5]))
class TensorizeTransformerTest(unittest.TestCase):
def test_tensor_operators(self) -> None:
self.maxDiff = None
bmg = BMGRuntime().accumulate_graph(
[operators_are_tensorized(), operators_are_tensorized_2()], {}
)
transformed_graph, error_report = copy_and_replace(
bmg, lambda c, s: Tensorizer(c, s)
)
before = to_dot(bmg)
after = to_dot(transformed_graph)
expected_before = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=1.25];
N05[label=Tensor];
N06[label=Sample];
N07[label=Tensor];
N08[label="+"];
N09[label=Exp];
N10[label=Sum];
N11[label=Query];
N12[label=2.0];
N13[label=Beta];
N14[label=Sample];
N15[label=0.25];
N16[label=Tensor];
N17[label=Log];
N18[label=Sum];
N19[label=Query];
N00 -> N02[label=mu];
N01 -> N02[label=sigma];
N02 -> N03[label=operand];
N02 -> N06[label=operand];
N03 -> N05[label=0];
N03 -> N05[label=1];
N03 -> N05[label=2];
N04 -> N05[label=3];
N04 -> N07[label=3];
N05 -> N08[label=left];
N06 -> N07[label=0];
N06 -> N07[label=1];
N06 -> N07[label=2];
N07 -> N08[label=right];
N08 -> N09[label=operand];
N09 -> N10[label=operand];
N10 -> N11[label=operator];
N12 -> N13[label=alpha];
N12 -> N13[label=beta];
N13 -> N14[label=operand];
N14 -> N16[label=0];
N14 -> N16[label=1];
N14 -> N16[label=2];
N15 -> N16[label=3];
N16 -> N17[label=operand];
N17 -> N18[label=operand];
N18 -> N19[label=operator];
}
"""
expected_after = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=1.25];
N05[label=Tensor];
N06[label=Sample];
N07[label=Tensor];
N08[label=MatrixAdd];
N09[label=MatrixExp];
N10[label=MatrixSum];
N11[label=Query];
N12[label=2.0];
N13[label=Beta];
N14[label=Sample];
N15[label=0.25];
N16[label=Tensor];
N17[label=MatrixLog];
N18[label=MatrixSum];
N19[label=Query];
N00 -> N02[label=mu];
N01 -> N02[label=sigma];
N02 -> N03[label=operand];
N02 -> N06[label=operand];
N03 -> N05[label=0];
N03 -> N05[label=1];
N03 -> N05[label=2];
N04 -> N05[label=3];
N04 -> N07[label=3];
N05 -> N08[label=left];
N06 -> N07[label=0];
N06 -> N07[label=1];
N06 -> N07[label=2];
N07 -> N08[label=right];
N08 -> N09[label=operand];
N09 -> N10[label=operand];
N10 -> N11[label=operator];
N12 -> N13[label=alpha];
N12 -> N13[label=beta];
N13 -> N14[label=operand];
N14 -> N16[label=0];
N14 -> N16[label=1];
N14 -> N16[label=2];
N15 -> N16[label=3];
N16 -> N17[label=operand];
N17 -> N18[label=operand];
N18 -> N19[label=operator];
}
"""
self.assertEqual(expected_before.strip(), before.strip())
self.assertEqual(expected_after.strip(), after.strip())
def test_matrix_scale(self) -> None:
self.maxDiff = None
bmg = BMGRuntime().accumulate_graph(
[
matrix_scale_rhs(),
matrix_scale_lhs(),
non_matrix_tensor_mult_lhs(),
non_matrix_tensor_mult_rhs(),
],
{},
)
transformed_graph, error_report = copy_and_replace(
bmg, lambda c, s: Tensorizer(c, s)
)
before = to_dot(bmg)
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label=1.25];
N06[label=Tensor];
N07[label="*"];
N08[label=Query];
N09[label=Tensor];
N10[label="*"];
N11[label=Query];
N12[label=2.35];
N13[label=Tensor];
N14[label="*"];
N15[label=Query];
N16[label=Sample];
N17[label=Sample];
N18[label=Tensor];
N19[label="*"];
N20[label=Query];
N00 -> N02[label=mu];
N01 -> N02[label=sigma];
N02 -> N03[label=operand];
N02 -> N04[label=operand];
N02 -> N16[label=operand];
N02 -> N17[label=operand];
N03 -> N07[label=left];
N03 -> N09[label=0];
N03 -> N09[label=1];
N03 -> N09[label=2];
N03 -> N13[label=0];
N03 -> N13[label=1];
N03 -> N13[label=2];
N03 -> N13[label=4];
N03 -> N13[label=5];
N03 -> N13[label=6];
N04 -> N06[label=0];
N04 -> N06[label=1];
N04 -> N06[label=2];
N04 -> N10[label=right];
N04 -> N14[label=right];
N05 -> N06[label=3];
N05 -> N09[label=3];
N05 -> N13[label=7];
N05 -> N18[label=7];
N06 -> N07[label=right];
N07 -> N08[label=operator];
N09 -> N10[label=left];
N10 -> N11[label=operator];
N12 -> N13[label=3];
N12 -> N18[label=3];
N13 -> N14[label=left];
N14 -> N15[label=operator];
N16 -> N19[label=left];
N17 -> N18[label=0];
N17 -> N18[label=1];
N17 -> N18[label=2];
N17 -> N18[label=4];
N17 -> N18[label=5];
N17 -> N18[label=6];
N18 -> N19[label=right];
N19 -> N20[label=operator];
}
"""
self.assertEqual(expected.strip(), before.strip())
after = to_dot(transformed_graph)
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label=1.25];
N06[label=Tensor];
N07[label=MatrixScale];
N08[label=Query];
N09[label=Tensor];
N10[label=MatrixScale];
N11[label=Query];
N12[label=2.35];
N13[label=Tensor];
N14[label=MatrixScale];
N15[label=Query];
N16[label=Sample];
N17[label=Sample];
N18[label=Tensor];
N19[label=MatrixScale];
N20[label=Query];
N00 -> N02[label=mu];
N01 -> N02[label=sigma];
N02 -> N03[label=operand];
N02 -> N04[label=operand];
N02 -> N16[label=operand];
N02 -> N17[label=operand];
N03 -> N07[label=left];
N03 -> N09[label=0];
N03 -> N09[label=1];
N03 -> N09[label=2];
N03 -> N13[label=0];
N03 -> N13[label=1];
N03 -> N13[label=2];
N03 -> N13[label=4];
N03 -> N13[label=5];
N03 -> N13[label=6];
N04 -> N06[label=0];
N04 -> N06[label=1];
N04 -> N06[label=2];
N04 -> N10[label=left];
N04 -> N14[label=left];
N05 -> N06[label=3];
N05 -> N09[label=3];
N05 -> N13[label=7];
N05 -> N18[label=7];
N06 -> N07[label=right];
N07 -> N08[label=operator];
N09 -> N10[label=right];
N10 -> N11[label=operator];
N12 -> N13[label=3];
N12 -> N18[label=3];
N13 -> N14[label=right];
N14 -> N15[label=operator];
N16 -> N19[label=left];
N17 -> N18[label=0];
N17 -> N18[label=1];
N17 -> N18[label=2];
N17 -> N18[label=4];
N17 -> N18[label=5];
N17 -> N18[label=6];
N18 -> N19[label=right];
N19 -> N20[label=operator];
}
"""
self.assertEqual(expected.strip(), after.strip())
def test_not_transformed(self) -> None:
self.maxDiff = None
bmg = BMGRuntime().accumulate_graph(
[scalar_mult()],
{},
)
transformed_graph, error_report = copy_and_replace(
bmg, lambda c, s: Tensorizer(c, s)
)
observed = to_dot(transformed_graph)
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=Sample];
N5[label="*"];
N6[label=Query];
N0 -> N2[label=mu];
N1 -> N2[label=sigma];
N2 -> N3[label=operand];
N2 -> N4[label=operand];
N3 -> N5[label=left];
N4 -> N5[label=right];
N5 -> N6[label=operator];
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_errors(self) -> None:
self.maxDiff = None
# this case verifies that even if there is nothing replacable it will error out because the errors
# in this graph prevent even checking whether this graph can be tensorized
bmg = BMGRuntime().accumulate_graph([mm_mismatch()], {})
transformed_graph, error_report = copy_and_replace(
bmg, lambda c, s: Tensorizer(c, s)
)
if len(error_report.errors) == 1:
error = error_report.errors[0].__str__()
expected = """
The model uses a matrix multiplication (@) operation unsupported by Bean Machine Graph.
The dimensions of the operands are 2x2 and 3x1.
The unsupported node was created in function call mm_mismatch().
"""
self.assertEqual(expected.strip(), error.strip())
else:
self.fail(
"A single error message should have been generated. Tensorizing depends on sizing and a size cannot be inferred from an operation whose operand sizes are invalid."
)
| beanmachine-main | tests/ppl/compiler/tensorize_transformer_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference import BMGInference
from torch import tensor
from torch.distributions import Normal
@bm.random_variable
def n(n):
return Normal(0, 1)
@bm.random_variable
def n12():
return Normal(tensor([n(3), n(4)]), 1.0)
@bm.random_variable
def n21():
return Normal(tensor([[n(1)], [n(2)]]), 1.0)
@bm.functional
def broadcast_add():
return n12() + n21()
@bm.functional
def fill_add_1():
return n12() + n(5)
@bm.functional
def fill_add_2():
return n12() + 123
class BroadcastTest(unittest.TestCase):
# TODO: Test broadcast multiplication as well.
def test_broadcast_add(self) -> None:
self.maxDiff = None
observations = {}
queries = [broadcast_add()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label=Tensor];
N06[label=1.0];
N07[label=Normal];
N08[label=Sample];
N09[label=Sample];
N10[label=Sample];
N11[label=Tensor];
N12[label=Normal];
N13[label=Sample];
N14[label="+"];
N15[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N02 -> N04;
N02 -> N09;
N02 -> N10;
N03 -> N05;
N04 -> N05;
N05 -> N07;
N06 -> N07;
N06 -> N12;
N07 -> N08;
N08 -> N14;
N09 -> N11;
N10 -> N11;
N11 -> N12;
N12 -> N13;
N13 -> N14;
N14 -> N15;
}
"""
self.assertEqual(expected.strip(), observed.strip())
g, _ = BMGInference().to_graph(queries, observations)
observed = g.to_dot()
expected = """
digraph "graph" {
N0[label="0"];
N1[label="1"];
N2[label="Normal"];
N3[label="~"];
N4[label="~"];
N5[label="Normal"];
N6[label="~"];
N7[label="Normal"];
N8[label="~"];
N9[label="~"];
N10[label="~"];
N11[label="Normal"];
N12[label="~"];
N13[label="Normal"];
N14[label="~"];
N15[label="2"];
N16[label="1"];
N17[label="ToMatrix"];
N18[label="Broadcast"];
N19[label="ToMatrix"];
N20[label="Broadcast"];
N21[label="MatrixAdd"];
N0 -> N2;
N1 -> N2;
N1 -> N5;
N1 -> N7;
N1 -> N11;
N1 -> N13;
N2 -> N3;
N2 -> N4;
N2 -> N9;
N2 -> N10;
N3 -> N5;
N4 -> N7;
N5 -> N6;
N6 -> N17;
N7 -> N8;
N8 -> N17;
N9 -> N11;
N10 -> N13;
N11 -> N12;
N12 -> N19;
N13 -> N14;
N14 -> N19;
N15 -> N17;
N15 -> N18;
N15 -> N18;
N15 -> N19;
N15 -> N20;
N15 -> N20;
N16 -> N17;
N16 -> N19;
N17 -> N18;
N18 -> N21;
N19 -> N20;
N20 -> N21;
Q0[label="Query"];
N21 -> Q0;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fill_add_1(self) -> None:
self.maxDiff = None
observations = {}
queries = [fill_add_1()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label=Tensor];
N06[label=1.0];
N07[label=Normal];
N08[label=Sample];
N09[label=Sample];
N10[label="+"];
N11[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N02 -> N04;
N02 -> N09;
N03 -> N05;
N04 -> N05;
N05 -> N07;
N06 -> N07;
N07 -> N08;
N08 -> N10;
N09 -> N10;
N10 -> N11;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# The model after converting to BMG:
g, _ = BMGInference().to_graph(queries, observations)
observed = g.to_dot()
expected = """
digraph "graph" {
N0[label="0"];
N1[label="1"];
N2[label="Normal"];
N3[label="~"];
N4[label="~"];
N5[label="Normal"];
N6[label="~"];
N7[label="Normal"];
N8[label="~"];
N9[label="~"];
N10[label="2"];
N11[label="1"];
N12[label="ToMatrix"];
N13[label="FillMatrix"];
N14[label="MatrixAdd"];
N0 -> N2;
N1 -> N2;
N1 -> N5;
N1 -> N7;
N2 -> N3;
N2 -> N4;
N2 -> N9;
N3 -> N5;
N4 -> N7;
N5 -> N6;
N6 -> N12;
N7 -> N8;
N8 -> N12;
N9 -> N13;
N10 -> N12;
N10 -> N13;
N11 -> N12;
N11 -> N13;
N12 -> N14;
N13 -> N14;
Q0[label="Query"];
N14 -> Q0;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fill_add_2(self) -> None:
self.maxDiff = None
observations = {}
queries = [fill_add_2()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label=Tensor];
N06[label=1.0];
N07[label=Normal];
N08[label=Sample];
N09[label=123];
N10[label="+"];
N11[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N02 -> N04;
N03 -> N05;
N04 -> N05;
N05 -> N07;
N06 -> N07;
N07 -> N08;
N08 -> N10;
N09 -> N10;
N10 -> N11;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# The model after converting to BMG:
# TODO: We could constant-fold the matrix fill here, though that might
# be not actually an optimization if the matrix is large enough.
g, _ = BMGInference().to_graph(queries, observations)
observed = g.to_dot()
expected = """
digraph "graph" {
N0[label="0"];
N1[label="1"];
N2[label="Normal"];
N3[label="~"];
N4[label="~"];
N5[label="Normal"];
N6[label="~"];
N7[label="Normal"];
N8[label="~"];
N9[label="2"];
N10[label="1"];
N11[label="ToMatrix"];
N12[label="123"];
N13[label="FillMatrix"];
N14[label="MatrixAdd"];
N0 -> N2;
N1 -> N2;
N1 -> N5;
N1 -> N7;
N2 -> N3;
N2 -> N4;
N3 -> N5;
N4 -> N7;
N5 -> N6;
N6 -> N11;
N7 -> N8;
N8 -> N11;
N9 -> N11;
N9 -> N13;
N10 -> N11;
N10 -> N13;
N11 -> N14;
N12 -> N13;
N13 -> N14;
Q0[label="Query"];
N14 -> Q0;
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/broadcast_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli, Beta, Normal
@bm.random_variable
def beta():
return Beta(2.0, 2.0)
@bm.random_variable
def flip(n):
return Bernoulli(beta() * 0.5)
@bm.random_variable
def normal(n):
return Normal(flip(n), 1.0)
class CoinFlipTest(unittest.TestCase):
def test_gen_mini(self) -> None:
self.maxDiff = None
# In the MiniBMG graph, the fact that we've observed
# the flip(0) input to Normal(flip(0), 1.0) should ensure
# that it is emitted into the graph as Normal(0.0, 1.0)
queries = [beta(), normal(0), normal(1)]
observations = {
flip(0): tensor(0.0),
}
observed = BMGInference()._to_mini(queries, observations, indent=2)
expected = """
{
"comment": "Mini BMG",
"nodes": [
{
"operator": "CONSTANT",
"type": "REAL",
"value": 2.0,
"sequence": 0
},
{
"operator": "DISTRIBUTION_BETA",
"type": "DISTRIBUTION",
"in_nodes": [
0,
0
],
"sequence": 1
},
{
"operator": "SAMPLE",
"type": "REAL",
"in_nodes": [
1
],
"sequence": 2
},
{
"operator": "CONSTANT",
"type": "REAL",
"value": 0.5,
"sequence": 3
},
{
"operator": "MULTIPLY",
"type": "REAL",
"in_nodes": [
2,
3
],
"sequence": 4
},
{
"operator": "DISTRIBUTION_BERNOULLI",
"type": "DISTRIBUTION",
"in_nodes": [
4
],
"sequence": 5
},
{
"operator": "CONSTANT",
"type": "REAL",
"value": 0.0,
"sequence": 6
},
{
"operator": "OBSERVE",
"type": "NONE",
"in_nodes": [
5,
6
],
"sequence": 7
},
{
"operator": "QUERY",
"type": "NONE",
"query_index": 0,
"in_nodes": [
2
],
"sequence": 8
},
{
"operator": "CONSTANT",
"type": "REAL",
"value": 1.0,
"sequence": 9
},
{
"operator": "DISTRIBUTION_NORMAL",
"type": "DISTRIBUTION",
"in_nodes": [
6,
9
],
"sequence": 10
},
{
"operator": "SAMPLE",
"type": "REAL",
"in_nodes": [
10
],
"sequence": 11
},
{
"operator": "QUERY",
"type": "NONE",
"query_index": 1,
"in_nodes": [
11
],
"sequence": 12
},
{
"operator": "SAMPLE",
"type": "REAL",
"in_nodes": [
5
],
"sequence": 13
},
{
"operator": "DISTRIBUTION_NORMAL",
"type": "DISTRIBUTION",
"in_nodes": [
13,
9
],
"sequence": 14
},
{
"operator": "SAMPLE",
"type": "REAL",
"in_nodes": [
14
],
"sequence": 15
},
{
"operator": "QUERY",
"type": "NONE",
"query_index": 2,
"in_nodes": [
15
],
"sequence": 16
}
]
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/gen_mini_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.gen_dot import to_dot
from beanmachine.ppl.model.rv_identifier import RVIdentifier
def _rv_id() -> RVIdentifier:
return RVIdentifier(lambda a, b: a, (1, 1))
def construct_model_graph(is_nested_cons: bool = True):
bmg = BMGraphBuilder()
zero = bmg.add_pos_real(0.0)
one = bmg.add_pos_real(1.0)
two = bmg.add_pos_real(2.0)
three = bmg.add_pos_real(3.0)
normal_one = bmg.add_normal(three, three)
normal_two = bmg.add_normal(one, two)
sample_normal_one = bmg.add_sample(normal_one)
sample_normal_two = bmg.add_sample(normal_two)
half = bmg.add_probability(0.5)
bernoulli = bmg.add_bernoulli(half)
bern_sample = bmg.add_sample(bernoulli)
norm_if = bmg.add_if_then_else(bern_sample, sample_normal_one, sample_normal_two)
if is_nested_cons:
bern_if = bmg.add_if_then_else(bern_sample, norm_if, zero)
else:
bern_if = bmg.add_if_then_else(bern_sample, zero, norm_if)
scale_two = bmg.add_multiplication(bern_if, two)
bmg.add_query(scale_two, _rv_id())
return bmg
class FixIfTest(unittest.TestCase):
def test_nested_if_cons_fix(self) -> None:
# This test case checks the nested if fixer for the cons case
# IF(COND, IF(COND, CONS2, ALT2), ALT1) --> IF(COND, CONS2, ALT1)
self.maxDiff = None
bmg = construct_model_graph(is_nested_cons=True)
observed_before = to_dot(bmg, after_transform=False, label_edges=True)
expected_before = """
digraph "graph" {
N00[label=0.5];
N01[label=Bernoulli];
N02[label=Sample];
N03[label=3.0];
N04[label=Normal];
N05[label=Sample];
N06[label=1.0];
N07[label=2.0];
N08[label=Normal];
N09[label=Sample];
N10[label=if];
N11[label=0.0];
N12[label=if];
N13[label="*"];
N14[label=Query];
N00 -> N01[label=probability];
N01 -> N02[label=operand];
N02 -> N10[label=condition];
N02 -> N12[label=condition];
N03 -> N04[label=mu];
N03 -> N04[label=sigma];
N04 -> N05[label=operand];
N05 -> N10[label=consequence];
N06 -> N08[label=mu];
N07 -> N08[label=sigma];
N07 -> N13[label=right];
N08 -> N09[label=operand];
N09 -> N10[label=alternative];
N10 -> N12[label=consequence];
N11 -> N12[label=alternative];
N12 -> N13[label=left];
N13 -> N14[label=operator];
}
"""
self.assertEqual(observed_before.strip(), expected_before.strip())
observed = to_dot(bmg, after_transform=True, label_edges=True)
expected = """
digraph "graph" {
N00[label=0.5];
N01[label=Bernoulli];
N02[label=Sample];
N03[label=3.0];
N04[label=3.0];
N05[label=Normal];
N06[label=Sample];
N07[label=1.0];
N08[label=2.0];
N09[label=Normal];
N10[label=Sample];
N11[label=0.0];
N12[label=if];
N13[label=2.0];
N14[label="*"];
N15[label=Query];
N00 -> N01[label=probability];
N01 -> N02[label=operand];
N02 -> N12[label=condition];
N03 -> N05[label=mu];
N04 -> N05[label=sigma];
N05 -> N06[label=operand];
N06 -> N12[label=consequence];
N07 -> N09[label=mu];
N08 -> N09[label=sigma];
N09 -> N10[label=operand];
N11 -> N12[label=alternative];
N12 -> N14[label=left];
N13 -> N14[label=right];
N14 -> N15[label=operator];
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_nested_if_alt_fix(self) -> None:
# This test case checks the nested if fixer for the alt case
# IF(COND, CONS_1, IF(COND, CONS2, ALT2)) --> IF(COND, CONS1, ALT2)
self.maxDiff = None
bmg = construct_model_graph(is_nested_cons=False)
observed_before = to_dot(bmg, after_transform=False, label_edges=True)
expected_before = """
digraph "graph" {
N00[label=0.5];
N01[label=Bernoulli];
N02[label=Sample];
N03[label=0.0];
N04[label=3.0];
N05[label=Normal];
N06[label=Sample];
N07[label=1.0];
N08[label=2.0];
N09[label=Normal];
N10[label=Sample];
N11[label=if];
N12[label=if];
N13[label="*"];
N14[label=Query];
N00 -> N01[label=probability];
N01 -> N02[label=operand];
N02 -> N11[label=condition];
N02 -> N12[label=condition];
N03 -> N12[label=consequence];
N04 -> N05[label=mu];
N04 -> N05[label=sigma];
N05 -> N06[label=operand];
N06 -> N11[label=consequence];
N07 -> N09[label=mu];
N08 -> N09[label=sigma];
N08 -> N13[label=right];
N09 -> N10[label=operand];
N10 -> N11[label=alternative];
N11 -> N12[label=alternative];
N12 -> N13[label=left];
N13 -> N14[label=operator];
}
"""
self.assertEqual(observed_before.strip(), expected_before.strip())
observed = to_dot(bmg, after_transform=True, label_edges=True)
expected = """
digraph "graph" {
N00[label=0.5];
N01[label=Bernoulli];
N02[label=Sample];
N03[label=3.0];
N04[label=3.0];
N05[label=Normal];
N06[label=Sample];
N07[label=1.0];
N08[label=2.0];
N09[label=Normal];
N10[label=Sample];
N11[label=0.0];
N12[label=if];
N13[label=2.0];
N14[label="*"];
N15[label=Query];
N00 -> N01[label=probability];
N01 -> N02[label=operand];
N02 -> N12[label=condition];
N03 -> N05[label=mu];
N04 -> N05[label=sigma];
N05 -> N06[label=operand];
N07 -> N09[label=mu];
N08 -> N09[label=sigma];
N09 -> N10[label=operand];
N10 -> N12[label=alternative];
N11 -> N12[label=consequence];
N12 -> N14[label=left];
N13 -> N14[label=right];
N14 -> N15[label=operator];
}
"""
self.assertEqual(observed.strip(), expected.strip())
| beanmachine-main | tests/ppl/compiler/fix_if_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""End-to-end test for tutorial on Neal's Funnel"""
# This file is a manual replica of the Bento tutorial with the same name
# This is a block for Beanstalk OSS readiness
# TODO: Check imports for conistency
import logging
import math
import unittest
import beanmachine.ppl as bm
import torch # from torch import manual_seed, tensor
import torch.distributions as dist # from torch.distributions import Bernoulli, Normal, Uniform
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
# This makes the results deterministic and reproducible.
logging.getLogger("beanmachine").setLevel(50)
torch.manual_seed(11)
# Model
def normal_log_prob(mu, sigma, x):
z = (x - mu) / sigma
return (-1.0 / 2.0) * math.log(2.0 * math.pi) - (z**2.0 / 2.0)
@bm.random_variable
def z():
"""
An uninformative (flat) prior for z.
"""
# TODO(tingley): Replace with Flat once it's part of the framework.
return dist.Normal(0, 10000)
@bm.random_variable
def x():
"""
An uninformative (flat) prior for x.
"""
# TODO(tingley): Replace with Flat once it's part of the framework.
return dist.Normal(0, 10000)
@bm.random_variable
def neals_funnel_coin_flip():
"""
Flip a "coin", which is heads with probability equal to the probability
of drawing z and x from the true Neal's funnel posterior.
"""
return dist.Bernoulli(
(
normal_log_prob(0.0, 3.0, z())
+ normal_log_prob(0.0, (z() / 2.0).exp(), x())
).exp()
)
# Inference parameters
num_samples = 1 ###000
num_chains = 4
observations = {neals_funnel_coin_flip(): tensor(1.0)}
queries = [z(), x()]
class tutorialNealsFunnelTest(unittest.TestCase):
def test_tutorial_Neals_Funnel(self) -> None:
"""Check BM and BMG inference both terminate"""
self.maxDiff = None
# Inference with BM
# Note: No explicit seed here (in original tutorial model). Should we add one?
nmc = bm.SingleSiteNewtonianMonteCarlo()
_ = nmc.infer(
queries=queries,
observations=observations,
num_samples=num_samples,
num_chains=num_chains,
)
hmc = bm.SingleSiteHamiltonianMonteCarlo(
trajectory_length=0.1, initial_step_size=0.01
)
_ = hmc.infer(
queries=queries,
observations=observations,
num_samples=num_samples,
num_chains=num_chains,
)
ghmc = bm.CompositionalInference(
{
(z, x): bm.SingleSiteHamiltonianMonteCarlo(
trajectory_length=0.1, initial_step_size=0.01
),
}
)
ghmc.infer(
queries=queries,
observations=observations,
num_samples=num_samples,
num_chains=num_chains,
)
bmg = BMGInference()
_ = bmg.infer(
queries=queries,
observations=observations,
num_samples=num_samples,
num_chains=1, # TODO[Walid]: 1 should be num_chains
)
self.assertTrue(True, msg="We just want to check this point is reached")
def test_tutorial_Neals_Funnel_to_dot_cpp_python(
self,
) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=10000.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label=-0.9189385332046727];
N06[label=0.3333333333333333];
N07[label="*"];
N08[label=2.0];
N09[label="**"];
N10[label=0.5];
N11[label="*"];
N12[label="-"];
N13[label="*"];
N14[label=Exp];
N15[label=-1.0];
N16[label="**"];
N17[label=ToReal];
N18[label="*"];
N19[label="**"];
N20[label="*"];
N21[label="-"];
N22[label="+"];
N23[label=Exp];
N24[label=ToProb];
N25[label=Bernoulli];
N26[label=Sample];
N27[label="Observation True"];
N28[label=Query];
N29[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N02 -> N04;
N03 -> N07;
N03 -> N13;
N03 -> N28;
N04 -> N18;
N04 -> N29;
N05 -> N22;
N05 -> N22;
N06 -> N07;
N07 -> N09;
N08 -> N09;
N08 -> N19;
N09 -> N11;
N10 -> N11;
N10 -> N13;
N10 -> N20;
N11 -> N12;
N12 -> N22;
N13 -> N14;
N14 -> N16;
N15 -> N16;
N16 -> N17;
N17 -> N18;
N18 -> N19;
N19 -> N20;
N20 -> N21;
N21 -> N22;
N22 -> N23;
N23 -> N24;
N24 -> N25;
N25 -> N26;
N26 -> N27;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_cpp(queries, observations)
expected = """
graph::Graph g;
uint n0 = g.add_constant_real(0.0);
uint n1 = g.add_constant_pos_real(10000.0);
uint n2 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n0, n1}));
uint n3 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n2}));
uint n4 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n2}));
uint n5 = g.add_constant_real(-0.9189385332046727);
uint n6 = g.add_constant_real(0.3333333333333333);
uint n7 = g.add_operator(
graph::OperatorType::MULTIPLY, std::vector<uint>({n3, n6}));
uint n8 = g.add_constant_pos_real(2.0);
uint n9 = g.add_operator(
graph::OperatorType::POW, std::vector<uint>({n7, n8}));
uint n10 = g.add_constant_real(0.5);
uint n11 = g.add_operator(
graph::OperatorType::MULTIPLY, std::vector<uint>({n9, n10}));
uint n12 = g.add_operator(
graph::OperatorType::NEGATE, std::vector<uint>({n11}));
uint n13 = g.add_operator(
graph::OperatorType::MULTIPLY, std::vector<uint>({n3, n10}));
uint n14 = g.add_operator(
graph::OperatorType::EXP, std::vector<uint>({n13}));
uint n15 = g.add_constant_real(-1.0);
uint n16 = g.add_operator(
graph::OperatorType::POW, std::vector<uint>({n14, n15}));
uint n17 = g.add_operator(
graph::OperatorType::TO_REAL, std::vector<uint>({n16}));
uint n18 = g.add_operator(
graph::OperatorType::MULTIPLY, std::vector<uint>({n4, n17}));
uint n19 = g.add_operator(
graph::OperatorType::POW, std::vector<uint>({n18, n8}));
uint n20 = g.add_operator(
graph::OperatorType::MULTIPLY, std::vector<uint>({n19, n10}));
uint n21 = g.add_operator(
graph::OperatorType::NEGATE, std::vector<uint>({n20}));
uint n22 = g.add_operator(
graph::OperatorType::ADD,
std::vector<uint>({n5, n12, n5, n21}));
uint n23 = g.add_operator(
graph::OperatorType::EXP, std::vector<uint>({n22}));
uint n24 = g.add_operator(
graph::OperatorType::TO_PROBABILITY, std::vector<uint>({n23}));
uint n25 = g.add_distribution(
graph::DistributionType::BERNOULLI,
graph::AtomicType::BOOLEAN,
std::vector<uint>({n24}));
uint n26 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n25}));
g.observe(n26, true);
uint q0 = g.query(n3);
uint q1 = g.query(n4);
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_python(queries, observations)
expected = """
from beanmachine import graph
from torch import tensor
g = graph.Graph()
n0 = g.add_constant_real(0.0)
n1 = g.add_constant_pos_real(10000.0)
n2 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n0, n1],
)
n3 = g.add_operator(graph.OperatorType.SAMPLE, [n2])
n4 = g.add_operator(graph.OperatorType.SAMPLE, [n2])
n5 = g.add_constant_real(-0.9189385332046727)
n6 = g.add_constant_real(0.3333333333333333)
n7 = g.add_operator(graph.OperatorType.MULTIPLY, [n3, n6])
n8 = g.add_constant_pos_real(2.0)
n9 = g.add_operator(graph.OperatorType.POW, [n7, n8])
n10 = g.add_constant_real(0.5)
n11 = g.add_operator(graph.OperatorType.MULTIPLY, [n9, n10])
n12 = g.add_operator(graph.OperatorType.NEGATE, [n11])
n13 = g.add_operator(graph.OperatorType.MULTIPLY, [n3, n10])
n14 = g.add_operator(graph.OperatorType.EXP, [n13])
n15 = g.add_constant_real(-1.0)
n16 = g.add_operator(graph.OperatorType.POW, [n14, n15])
n17 = g.add_operator(graph.OperatorType.TO_REAL, [n16])
n18 = g.add_operator(graph.OperatorType.MULTIPLY, [n4, n17])
n19 = g.add_operator(graph.OperatorType.POW, [n18, n8])
n20 = g.add_operator(graph.OperatorType.MULTIPLY, [n19, n10])
n21 = g.add_operator(graph.OperatorType.NEGATE, [n20])
n22 = g.add_operator(
graph.OperatorType.ADD,
[n5, n12, n5, n21],
)
n23 = g.add_operator(graph.OperatorType.EXP, [n22])
n24 = g.add_operator(graph.OperatorType.TO_PROBABILITY, [n23])
n25 = g.add_distribution(
graph.DistributionType.BERNOULLI,
graph.AtomicType.BOOLEAN,
[n24],
)
n26 = g.add_operator(graph.OperatorType.SAMPLE, [n25])
g.observe(n26, True)
q0 = g.query(n3)
q1 = g.query(n4)
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/tutorial_Neals_Funnel_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""End-to-end test of realistic linear regression model"""
# This is copied from bento workbook N140350, simplified, and
# modified to use BMG inference.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch.distributions import Normal, Uniform
@bm.random_variable
def theta_0():
return Normal(0.0, 1.0)
@bm.random_variable
def theta_1():
return Normal(0.0, 1.0)
@bm.random_variable
def error():
return Uniform(0.0, 1.0)
@bm.random_variable
def x(i):
return Normal(0.0, 1.0)
@bm.random_variable
def y(i):
return Normal(theta_0() + theta_1() * x(i), error())
class LinearRegressionTest(unittest.TestCase):
def test_linear_regression_inference(self) -> None:
self.maxDiff = None
# We start by generating some test data; we can use the inference engine
# as a random number generator if we have no observations.
#
# Generate an intercept, slope, and n points such that:
#
# y(i) = theta_0() + theta_1() * x(i) + some normal error
n = 100
x_rvs = [x(i) for i in range(n)]
y_rvs = [y(i) for i in range(n)]
test_samples = BMGInference().infer(
[theta_0(), theta_1()] + x_rvs + y_rvs, {}, 1
)
true_intercept = test_samples[theta_0()][0].item()
true_slope = test_samples[theta_1()][0].item()
points = [(test_samples[x(i)][0], test_samples[y(i)][0]) for i in range(n)]
# We are only pseudo-random here so we should always get the same result.
expected_true_intercept = -0.05
expected_true_slope = -0.44
self.assertAlmostEqual(true_intercept, expected_true_intercept, delta=0.1)
self.assertAlmostEqual(true_slope, expected_true_slope, delta=0.5)
# If we then run inference when observing the set of (x, y) points we generated,
# what slope and intercept do we infer? It should be close to the actual values.
observed_xs = {x(i): points[i][0] for i in range(n)}
observed_ys = {y(i): points[i][1] for i in range(n)}
observations = {**observed_xs, **observed_ys}
queries = [theta_0(), theta_1()]
num_samples = 1000
samples = BMGInference().infer(queries, observations, num_samples)
inferred_intercept = samples[theta_0()].mean()
inferred_slope = samples[theta_1()].mean()
expected_inferred_int = -0.05
expected_inferred_slope = -0.33
self.assertAlmostEqual(inferred_intercept, expected_inferred_int, delta=0.2)
self.assertAlmostEqual(inferred_slope, expected_inferred_slope, delta=0.5)
| beanmachine-main | tests/ppl/compiler/linear_regression_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Compilation test of Todd's Linear Regression Outliers Marginalized model"""
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.distributions.unit import Unit
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import logaddexp, ones, tensor
from torch.distributions import Bernoulli, Beta, Gamma, Normal
_x_obs = tensor([0, 3, 9])
_y_obs = tensor([33, 68, 34])
_err_obs = tensor([3.6, 3.9, 2.6])
@bm.random_variable
def beta_0():
return Normal(0, 10)
@bm.random_variable
def beta_1():
return Normal(0, 10)
@bm.random_variable
def sigma_out():
return Gamma(1, 1)
@bm.random_variable
def theta():
return Beta(2, 5)
@bm.functional
def f():
mu = beta_0() + beta_1() * _x_obs
ns = Normal(mu, sigma_out())
ne = Normal(mu, _err_obs)
log_likelihood_outlier = theta().log() + ns.log_prob(_y_obs)
log_likelihood = (1 - theta()).log() + ne.log_prob(_y_obs)
return logaddexp(log_likelihood_outlier, log_likelihood)
@bm.random_variable
def y():
return Unit(f())
# Same model, but with the "Bernoulli trick" instead of a Unit:
@bm.random_variable
def d():
return Bernoulli(f().exp())
# Same model, but using a logits Bernoulli
@bm.random_variable
def d2():
log_prob = f()
logit = log_prob - (1 - log_prob.exp()).log()
return Bernoulli(logits=logit)
class LROMMTest(unittest.TestCase):
def test_lromm_unit_to_dot(self) -> None:
self.maxDiff = None
queries = [beta_0(), beta_1(), sigma_out(), theta()]
observations = {y(): _y_obs}
with self.assertRaises(ValueError) as ex:
BMGInference().to_dot(queries, observations)
expected = """
Function Unit is not supported by Bean Machine Graph.
"""
observed = str(ex.exception)
self.assertEqual(observed.strip(), expected.strip())
def test_lromm_bern_to_dot(self) -> None:
self.maxDiff = None
queries = [beta_0(), beta_1(), sigma_out(), theta()]
observations = {d(): ones(len(_y_obs))}
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=5.0];
N02[label=Beta];
N03[label=Sample];
N04[label=0.0];
N05[label=10.0];
N06[label=Normal];
N07[label=Sample];
N08[label=Sample];
N09[label=1.0];
N10[label=Gamma];
N11[label=Sample];
N12[label=3];
N13[label=1];
N14[label=Log];
N15[label=ToReal];
N16[label=FillMatrix];
N17[label=FillMatrix];
N18[label="[0,3,9]"];
N19[label=MatrixScale];
N20[label=MatrixAdd];
N21[label=0];
N22[label=index];
N23[label=Normal];
N24[label=33.0];
N25[label=LogProb];
N26[label=index];
N27[label=Normal];
N28[label=68.0];
N29[label=LogProb];
N30[label=2];
N31[label=index];
N32[label=Normal];
N33[label=34.0];
N34[label=LogProb];
N35[label=ToMatrix];
N36[label=MatrixAdd];
N37[label=index];
N38[label=complement];
N39[label=Log];
N40[label=ToReal];
N41[label=FillMatrix];
N42[label=3.5999999046325684];
N43[label=Normal];
N44[label=LogProb];
N45[label=3.9000000953674316];
N46[label=Normal];
N47[label=LogProb];
N48[label=2.5999999046325684];
N49[label=Normal];
N50[label=LogProb];
N51[label=ToMatrix];
N52[label=MatrixAdd];
N53[label=index];
N54[label=LogSumExp];
N55[label=index];
N56[label=index];
N57[label=LogSumExp];
N58[label=index];
N59[label=index];
N60[label=LogSumExp];
N61[label=ToMatrix];
N62[label=MatrixExp];
N63[label=index];
N64[label=ToProb];
N65[label=Bernoulli];
N66[label=Sample];
N67[label=index];
N68[label=ToProb];
N69[label=Bernoulli];
N70[label=Sample];
N71[label=index];
N72[label=ToProb];
N73[label=Bernoulli];
N74[label=Sample];
N75[label="Observation True"];
N76[label="Observation True"];
N77[label="Observation True"];
N78[label=Query];
N79[label=Query];
N80[label=Query];
N81[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N03 -> N14;
N03 -> N38;
N03 -> N81;
N04 -> N06;
N05 -> N06;
N06 -> N07;
N06 -> N08;
N07 -> N17;
N07 -> N78;
N08 -> N19;
N08 -> N79;
N09 -> N10;
N09 -> N10;
N10 -> N11;
N11 -> N23;
N11 -> N27;
N11 -> N32;
N11 -> N80;
N12 -> N16;
N12 -> N17;
N12 -> N35;
N12 -> N41;
N12 -> N51;
N12 -> N61;
N13 -> N16;
N13 -> N17;
N13 -> N26;
N13 -> N35;
N13 -> N41;
N13 -> N51;
N13 -> N55;
N13 -> N56;
N13 -> N61;
N13 -> N67;
N14 -> N15;
N15 -> N16;
N16 -> N36;
N17 -> N20;
N18 -> N19;
N19 -> N20;
N20 -> N22;
N20 -> N26;
N20 -> N31;
N21 -> N22;
N21 -> N37;
N21 -> N53;
N21 -> N63;
N22 -> N23;
N22 -> N43;
N23 -> N25;
N24 -> N25;
N24 -> N44;
N25 -> N35;
N26 -> N27;
N26 -> N46;
N27 -> N29;
N28 -> N29;
N28 -> N47;
N29 -> N35;
N30 -> N31;
N30 -> N58;
N30 -> N59;
N30 -> N71;
N31 -> N32;
N31 -> N49;
N32 -> N34;
N33 -> N34;
N33 -> N50;
N34 -> N35;
N35 -> N36;
N36 -> N37;
N36 -> N55;
N36 -> N58;
N37 -> N54;
N38 -> N39;
N39 -> N40;
N40 -> N41;
N41 -> N52;
N42 -> N43;
N43 -> N44;
N44 -> N51;
N45 -> N46;
N46 -> N47;
N47 -> N51;
N48 -> N49;
N49 -> N50;
N50 -> N51;
N51 -> N52;
N52 -> N53;
N52 -> N56;
N52 -> N59;
N53 -> N54;
N54 -> N61;
N55 -> N57;
N56 -> N57;
N57 -> N61;
N58 -> N60;
N59 -> N60;
N60 -> N61;
N61 -> N62;
N62 -> N63;
N62 -> N67;
N62 -> N71;
N63 -> N64;
N64 -> N65;
N65 -> N66;
N66 -> N75;
N67 -> N68;
N68 -> N69;
N69 -> N70;
N70 -> N76;
N71 -> N72;
N72 -> N73;
N73 -> N74;
N74 -> N77;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_lromm_logits_to_bmg_dot(self) -> None:
self.maxDiff = None
queries = [beta_0(), beta_1(), sigma_out(), theta()]
observations = {d2(): ones(len(_y_obs))}
# Go all the way to BMG.
# (This regression-tests the bug described t131976521.)
g, _ = BMGInference().to_graph(queries, observations)
observed = g.to_dot()
expected = """
digraph "graph" {
N0[label="2"];
N1[label="5"];
N2[label="Beta"];
N3[label="~"];
N4[label="0"];
N5[label="10"];
N6[label="Normal"];
N7[label="~"];
N8[label="~"];
N9[label="1"];
N10[label="Gamma"];
N11[label="~"];
N12[label="3"];
N13[label="1"];
N14[label="Log"];
N15[label="ToReal"];
N16[label="FillMatrix"];
N17[label="FillMatrix"];
N18[label="matrix"];
N19[label="MatrixScale"];
N20[label="MatrixAdd"];
N21[label="0"];
N22[label="Index"];
N23[label="Normal"];
N24[label="33"];
N25[label="LogProb"];
N26[label="Index"];
N27[label="Normal"];
N28[label="68"];
N29[label="LogProb"];
N30[label="2"];
N31[label="Index"];
N32[label="Normal"];
N33[label="34"];
N34[label="LogProb"];
N35[label="ToMatrix"];
N36[label="MatrixAdd"];
N37[label="Index"];
N38[label="Complement"];
N39[label="Log"];
N40[label="ToReal"];
N41[label="FillMatrix"];
N42[label="3.6"];
N43[label="Normal"];
N44[label="LogProb"];
N45[label="3.9"];
N46[label="Normal"];
N47[label="LogProb"];
N48[label="2.6"];
N49[label="Normal"];
N50[label="LogProb"];
N51[label="ToMatrix"];
N52[label="MatrixAdd"];
N53[label="Index"];
N54[label="LogSumExp"];
N55[label="Index"];
N56[label="Index"];
N57[label="LogSumExp"];
N58[label="Index"];
N59[label="Index"];
N60[label="LogSumExp"];
N61[label="ToMatrix"];
N62[label="1"];
N63[label="FillMatrix"];
N64[label="MatrixExp"];
N65[label="MatrixNegate"];
N66[label="ToReal"];
N67[label="MatrixAdd"];
N68[label="ToPosReal"];
N69[label="MatrixLog"];
N70[label="MatrixNegate"];
N71[label="MatrixAdd"];
N72[label="Index"];
N73[label="BernoulliLogit"];
N74[label="~"];
N75[label="Index"];
N76[label="BernoulliLogit"];
N77[label="~"];
N78[label="Index"];
N79[label="BernoulliLogit"];
N80[label="~"];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N14;
N3 -> N38;
N4 -> N6;
N5 -> N6;
N6 -> N7;
N6 -> N8;
N7 -> N17;
N8 -> N19;
N9 -> N10;
N9 -> N10;
N10 -> N11;
N11 -> N23;
N11 -> N27;
N11 -> N32;
N12 -> N16;
N12 -> N17;
N12 -> N35;
N12 -> N41;
N12 -> N51;
N12 -> N61;
N12 -> N63;
N13 -> N16;
N13 -> N17;
N13 -> N26;
N13 -> N35;
N13 -> N41;
N13 -> N51;
N13 -> N55;
N13 -> N56;
N13 -> N61;
N13 -> N63;
N13 -> N75;
N14 -> N15;
N15 -> N16;
N16 -> N36;
N17 -> N20;
N18 -> N19;
N19 -> N20;
N20 -> N22;
N20 -> N26;
N20 -> N31;
N21 -> N22;
N21 -> N37;
N21 -> N53;
N21 -> N72;
N22 -> N23;
N22 -> N43;
N23 -> N25;
N24 -> N25;
N24 -> N44;
N25 -> N35;
N26 -> N27;
N26 -> N46;
N27 -> N29;
N28 -> N29;
N28 -> N47;
N29 -> N35;
N30 -> N31;
N30 -> N58;
N30 -> N59;
N30 -> N78;
N31 -> N32;
N31 -> N49;
N32 -> N34;
N33 -> N34;
N33 -> N50;
N34 -> N35;
N35 -> N36;
N36 -> N37;
N36 -> N55;
N36 -> N58;
N37 -> N54;
N38 -> N39;
N39 -> N40;
N40 -> N41;
N41 -> N52;
N42 -> N43;
N43 -> N44;
N44 -> N51;
N45 -> N46;
N46 -> N47;
N47 -> N51;
N48 -> N49;
N49 -> N50;
N50 -> N51;
N51 -> N52;
N52 -> N53;
N52 -> N56;
N52 -> N59;
N53 -> N54;
N54 -> N61;
N55 -> N57;
N56 -> N57;
N57 -> N61;
N58 -> N60;
N59 -> N60;
N60 -> N61;
N61 -> N64;
N61 -> N71;
N62 -> N63;
N63 -> N67;
N64 -> N65;
N65 -> N66;
N66 -> N67;
N67 -> N68;
N68 -> N69;
N69 -> N70;
N70 -> N71;
N71 -> N72;
N71 -> N75;
N71 -> N78;
N72 -> N73;
N73 -> N74;
N75 -> N76;
N76 -> N77;
N78 -> N79;
N79 -> N80;
O0[label="Observation"];
N74 -> O0;
O1[label="Observation"];
N77 -> O1;
O2[label="Observation"];
N80 -> O2;
Q0[label="Query"];
N7 -> Q0;
Q1[label="Query"];
N8 -> Q1;
Q2[label="Query"];
N11 -> Q2;
Q3[label="Query"];
N3 -> Q3;
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/lromm_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import platform
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference import BMGInference
from torch import exp, log
from torch.distributions import Normal
@bm.random_variable
def norm(x):
return Normal(0.0, 1.0)
@bm.functional
def sum_1(counter):
sum = 0.0
for i in range(counter):
sum = sum + exp(norm(i))
return sum
@bm.functional
def sum_2():
return log(sum_1(100))
def get_report(skip_optimizations):
observations = {}
queries = [sum_2()]
number_samples = 1000
_, perf_report = BMGInference()._infer(
queries, observations, number_samples, skip_optimizations=skip_optimizations
)
return perf_report
class LogSumExpPerformanceTest(unittest.TestCase):
def test_perf_num_nodes_edges(self) -> None:
"""
Test to check if LogSumExp Transformation reduces the
number of nodes and number of edges using the performance
report returned by BMGInference.
"""
if platform.system() == "Windows":
self.skipTest("Disabling *_perf_test.py until flakiness is resolved")
self.maxDiff = None
skip_optimizations = {
"beta_bernoulli_conjugate_fixer",
"beta_binomial_conjugate_fixer",
"normal_normal_conjugate_fixer",
}
report_w_optimization = get_report(skip_optimizations)
self.assertEqual(report_w_optimization.node_count, 104)
self.assertEqual(report_w_optimization.edge_count, 202)
skip_optimizations = {
"logsumexp_fixer",
"beta_bernoulli_conjugate_fixer",
"beta_binomial_conjugate_fixer",
"normal_normal_conjugate_fixer",
}
report_wo_optimization = get_report(skip_optimizations)
self.assertEqual(report_wo_optimization.node_count, 205)
self.assertEqual(report_wo_optimization.edge_count, 303)
| beanmachine-main | tests/ppl/compiler/fix_logsumexp_perf_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
from beanmachine.ppl.inference import BMGInference
from torch.distributions import Normal
@bm.random_variable
def scalar():
return Normal(0.0, 1.0)
matrix = torch.tensor([20, 40])
@bm.functional
def scaled():
return scalar() * matrix
@bm.functional
def scaled_sym():
return matrix * scalar()
@bm.functional
def scaled2():
return scalar() * torch.tensor([scalar(), scalar()])
@bm.functional
def scaled2_sym():
return (torch.tensor([scalar(), scalar()])) * scalar()
@bm.functional
def multiple_scalars():
return scalar() * scalar() * matrix * scalar() * scalar()
class FixMatrixScaleTest(unittest.TestCase):
def test_fix_matrix_scale_1(self) -> None:
self.maxDiff = None
observations = {}
queries = [scaled()]
num_samples = 1000
num_chains = 1
# Sanity check to make sure the model is valid
nmc = bm.SingleSiteNewtonianMonteCarlo()
_ = nmc.infer(
queries=queries,
observations=observations,
num_samples=num_samples,
num_chains=num_chains,
)
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before optimization
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label="[20,40]"];
N5[label="*"];
N6[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N5;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After optimization:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label="[20,40]"];
N5[label=MatrixScale];
N6[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N5;
N4 -> N5;
N5 -> N6;
}"""
self.assertEqual(expected.strip(), observed.strip())
# The model runs on Bean Machine Graph
_ = BMGInference().infer(queries, observations, num_samples=num_samples)
def test_fix_matrix_scale_1_sym(self) -> None:
self.maxDiff = None
observations = {}
queries = [scaled_sym()]
num_samples = 1000
num_chains = 1
# Sanity check to make sure the model is valid
nmc = bm.SingleSiteNewtonianMonteCarlo()
_ = nmc.infer(
queries=queries,
observations=observations,
num_samples=num_samples,
num_chains=num_chains,
)
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before optimization
expected = """
digraph "graph" {
N0[label="[20,40]"];
N1[label=0.0];
N2[label=1.0];
N3[label=Normal];
N4[label=Sample];
N5[label="*"];
N6[label=Query];
N0 -> N5;
N1 -> N3;
N2 -> N3;
N3 -> N4;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After optimization:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label="[20,40]"];
N5[label=MatrixScale];
N6[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N5;
N4 -> N5;
N5 -> N6;
}"""
self.assertEqual(expected.strip(), observed.strip())
# The model runs on Bean Machine Graph
_ = BMGInference().infer(queries, observations, num_samples=num_samples)
def test_fix_matrix_scale_2(self) -> None:
self.maxDiff = None
observations = {}
queries = [scaled2()]
num_samples = 1000
num_chains = 1
# Sanity check to make sure the model is valid
nmc = bm.SingleSiteNewtonianMonteCarlo()
_ = nmc.infer(
queries=queries,
observations=observations,
num_samples=num_samples,
num_chains=num_chains,
)
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before optimization
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=Tensor];
N5[label="*"];
N6[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N3 -> N4;
N3 -> N5;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After optimization:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=2];
N5[label=1];
N6[label=ToMatrix];
N7[label=MatrixScale];
N8[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N6;
N3 -> N6;
N3 -> N7;
N4 -> N6;
N5 -> N6;
N6 -> N7;
N7 -> N8;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# The model runs on Bean Machine Graph
_ = BMGInference().infer(queries, observations, num_samples=num_samples)
def test_fix_matrix_scale_2_sym(self) -> None:
self.maxDiff = None
observations = {}
queries = [scaled2_sym()]
num_samples = 1000
num_chains = 1
# Sanity check to make sure the model is valid
nmc = bm.SingleSiteNewtonianMonteCarlo()
_ = nmc.infer(
queries=queries,
observations=observations,
num_samples=num_samples,
num_chains=num_chains,
)
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before optimization
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=Tensor];
N5[label="*"];
N6[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N3 -> N4;
N3 -> N5;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After optimization:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=2];
N5[label=1];
N6[label=ToMatrix];
N7[label=MatrixScale];
N8[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N6;
N3 -> N6;
N3 -> N7;
N4 -> N6;
N5 -> N6;
N6 -> N7;
N7 -> N8;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# The model runs on Bean Machine Graph
_ = BMGInference().infer(queries, observations, num_samples=num_samples)
def test_fix_matrix_scale_3(self) -> None:
# TODO: The matrix scale optimizer correctly removes the extra matrix scale
# but the multiary multiplication optimizer does not optimize to a single
# multiplication node. That optimizer does not optimize nodes where the
# outgoing edge count is more than one, but in this case the outgoing
# edges are to orphaned nodes, illustrating a flaw in this design.
# We might consider always doing the optimization even if there are multiple
# outgoing edges -- that risks making a suboptimal graph but that scenario
# is likely rare. Or we could write an orphan-trimming pass.
self.maxDiff = None
observations = {}
queries = [multiple_scalars()]
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label="*"];
N5[label="*"];
N6[label="*"];
N7[label="[20,40]"];
N8[label=MatrixScale];
N9[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N3 -> N4;
N3 -> N5;
N3 -> N6;
N4 -> N5;
N5 -> N6;
N6 -> N8;
N7 -> N8;
N8 -> N9;
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/fix_matrix_scale_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import scipy
from beanmachine.ppl.inference import BMGInference
from torch.distributions import Normal
@bm.random_variable
def norm(x):
return Normal(0.0, 1.0)
@bm.functional
def sum_1():
return norm(0) + norm(1) + norm(2)
@bm.functional
def sum_2():
return norm(3) + norm(4) + norm(5)
@bm.functional
def sum_3():
return sum_1() + 5.0
@bm.functional
def sum_4():
return sum_1() + sum_2()
class DisableTransformationsTest(unittest.TestCase):
def test_multiary_ops_opt_to_dot(self) -> None:
self.maxDiff = None
observations = {}
queries = [sum_3(), sum_4()]
skip_optimizations = {"multiary_addition_fixer"}
observed = BMGInference().to_dot(
queries, observations, skip_optimizations=skip_optimizations
)
# Expected model when skipping multiary addition optimization
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label=Sample];
N06[label="+"];
N07[label="+"];
N08[label=5.0];
N09[label="+"];
N10[label=Query];
N11[label=Sample];
N12[label=Sample];
N13[label=Sample];
N14[label="+"];
N15[label="+"];
N16[label="+"];
N17[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N02 -> N04;
N02 -> N05;
N02 -> N11;
N02 -> N12;
N02 -> N13;
N03 -> N06;
N04 -> N06;
N05 -> N07;
N06 -> N07;
N07 -> N09;
N07 -> N16;
N08 -> N09;
N09 -> N10;
N11 -> N14;
N12 -> N14;
N13 -> N15;
N14 -> N15;
N15 -> N16;
N16 -> N17;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# Expected graph without skipping multiary addition optimization:
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label=Sample];
N06[label="+"];
N07[label=5.0];
N08[label="+"];
N09[label=Query];
N10[label=Sample];
N11[label=Sample];
N12[label=Sample];
N13[label="+"];
N14[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N02 -> N04;
N02 -> N05;
N02 -> N10;
N02 -> N11;
N02 -> N12;
N03 -> N06;
N04 -> N06;
N05 -> N06;
N06 -> N08;
N06 -> N13;
N07 -> N08;
N08 -> N09;
N10 -> N13;
N11 -> N13;
N12 -> N13;
N13 -> N14;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_multiary_ops_opt_inference(self) -> None:
observations = {}
queries = [sum_3(), sum_4()]
num_samples = 1000
skip_optimizations = {"multiary_addition_fixer"}
posterior_wo_opt = BMGInference().infer(
queries, observations, num_samples, 1, skip_optimizations=skip_optimizations
)
sum_3_samples_wo_opt = posterior_wo_opt[sum_3()][0]
sum_4_samples_wo_opt = posterior_wo_opt[sum_4()][0]
posterior_w_opt = BMGInference().infer(queries, observations, num_samples)
sum_3_samples_w_opt = posterior_w_opt[sum_3()][0]
sum_4_samples_w_opt = posterior_w_opt[sum_4()][0]
self.assertGreaterEqual(
scipy.stats.ks_2samp(sum_3_samples_wo_opt, sum_3_samples_w_opt).pvalue, 0.05
)
self.assertGreaterEqual(
scipy.stats.ks_2samp(sum_4_samples_wo_opt, sum_4_samples_w_opt).pvalue, 0.05
)
| beanmachine-main | tests/ppl/compiler/disable_transformations_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Compare original and conjugate prior transformed
Beta-Binomial model"""
import random
import unittest
import beanmachine.ppl as bm
import scipy
import torch
from beanmachine.ppl.examples.conjugate_models.beta_binomial import BetaBinomialModel
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
from torch.distributions import Beta
class BetaBinomialTransformedModel(BetaBinomialModel):
"""Closed-form Posterior due to conjugacy"""
@bm.random_variable
def theta_transformed(self):
# Analytical posterior Beta(alpha + sum x_i, beta + sum N - sum x_i)
return Beta(self.alpha_ + 3.0, self.beta_ + (self.n_ - 3.0))
class BetaBinomialConjugateModelTest(unittest.TestCase):
def test_beta_binomial_conjugate_graph(self) -> None:
original_model = BetaBinomialModel(2.0, 2.0, 4.0)
queries = [original_model.theta()]
observations = {original_model.x(): tensor(3.0)}
skip_optimizations = set()
bmg = BMGInference()
original_graph = bmg.to_dot(
queries, observations, skip_optimizations=skip_optimizations
)
transformed_model = BetaBinomialTransformedModel(2.0, 2.0, 4.0)
queries_transformed = [transformed_model.theta_transformed()]
observations_transformed = {}
transformed_graph = bmg.to_dot(queries_transformed, observations_transformed)
self.assertEqual(original_graph, transformed_graph)
def test_beta_binomial_conjugate(self) -> None:
"""
KS test to check if theta samples from BetaBinomialModel and
BetaBinomialTransformedModel is within a certain bound.
We initialize the seed to ensure the test is deterministic.
"""
seed = 0
torch.manual_seed(seed)
random.seed(seed)
original_model = BetaBinomialModel(2.0, 2.0, 4.0)
queries = [original_model.theta()]
observations = {original_model.x(): tensor(3.0)}
num_samples = 1000
bmg = BMGInference()
posterior_original_model = bmg.infer(queries, observations, num_samples)
theta_samples_original = posterior_original_model[original_model.theta()][0]
transformed_model = BetaBinomialTransformedModel(2.0, 2.0, 4.0)
queries_transformed = [transformed_model.theta_transformed()]
observations_transformed = {}
posterior_transformed_model = bmg.infer(
queries_transformed, observations_transformed, num_samples
)
theta_samples_transformed = posterior_transformed_model[
transformed_model.theta_transformed()
][0]
self.assertEqual(
type(theta_samples_original),
type(theta_samples_transformed),
"Sample type of original and transformed model should be the same.",
)
self.assertEqual(
len(theta_samples_original),
len(theta_samples_transformed),
"Sample size of original and transformed model should be the same.",
)
self.assertGreaterEqual(
scipy.stats.ks_2samp(
theta_samples_original, theta_samples_transformed
).pvalue,
0.05,
)
| beanmachine-main | tests/ppl/compiler/fix_beta_binomial_basic_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for bm_to_bmg.py"""
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.compiler.gen_bmg_graph import to_bmg_graph
from beanmachine.ppl.compiler.runtime import BMGRuntime
from torch import tensor
from torch.distributions import (
Bernoulli,
Beta,
Binomial,
Chi2,
Gamma,
HalfCauchy,
Normal,
StudentT,
Uniform,
)
def tidy(s: str) -> str:
return "\n".join(c.strip() for c in s.strip().split("\n")).strip()
# These are cases where we just have either a straightforward sample from
# a distribution parameterized with constants, or a distribution parameterized
# with a sample from another distribution.
#
# * No arithmetic
# * No interesting type conversions
# * No use of a sample as an index.
#
@bm.random_variable
def flip_straight_constant():
return Bernoulli(tensor(0.5))
@bm.random_variable
def flip_logit_constant():
logits = tensor(-2.0)
return Bernoulli(logits=logits)
@bm.random_variable
def standard_normal():
return Normal(0.0, 1.0)
@bm.random_variable
def flip_logit_normal():
logits = standard_normal()
return Bernoulli(logits=logits)
@bm.random_variable
def beta_constant():
return Beta(1.0, 1.0)
@bm.random_variable
def hc(i):
return HalfCauchy(1.0)
@bm.random_variable
def beta_hc():
return Beta(hc(1), hc(2))
@bm.random_variable
def student_t():
return StudentT(hc(1), standard_normal(), hc(2))
@bm.random_variable
def bin_constant():
return Binomial(3, 0.5)
@bm.random_variable
def gamma():
return Gamma(1.0, 2.0)
@bm.random_variable
def flat():
return Uniform(0.0, 1.0)
@bm.random_variable
def chi2():
return Chi2(8.0)
expected_bmg_1 = """
0: CONSTANT(probability 0.5) (out nodes: 1, 22)
1: BERNOULLI(0) (out nodes: 2)
2: SAMPLE(1) (out nodes: ) queried
3: CONSTANT(probability 0.119203) (out nodes: 4)
4: BERNOULLI(3) (out nodes: 5)
5: SAMPLE(4) (out nodes: ) queried
6: CONSTANT(real 0) (out nodes: 8)
7: CONSTANT(positive real 1) (out nodes: 8, 12, 12, 14, 25)
8: NORMAL(6, 7) (out nodes: 9)
9: SAMPLE(8) (out nodes: 10, 19) queried
10: BERNOULLI_LOGIT(9) (out nodes: 11)
11: SAMPLE(10) (out nodes: ) queried
12: BETA(7, 7) (out nodes: 13)
13: SAMPLE(12) (out nodes: ) queried
14: HALF_CAUCHY(7) (out nodes: 15, 16)
15: SAMPLE(14) (out nodes: 17, 19) queried
16: SAMPLE(14) (out nodes: 17, 19) queried
17: BETA(15, 16) (out nodes: 18)
18: SAMPLE(17) (out nodes: ) queried
19: STUDENT_T(15, 9, 16) (out nodes: 20)
20: SAMPLE(19) (out nodes: ) queried
21: CONSTANT(natural 3) (out nodes: 22)
22: BINOMIAL(21, 0) (out nodes: 23)
23: SAMPLE(22) (out nodes: ) queried
24: CONSTANT(positive real 2) (out nodes: 25)
25: GAMMA(7, 24) (out nodes: 26)
26: SAMPLE(25) (out nodes: ) queried
27: FLAT() (out nodes: 28)
28: SAMPLE(27) (out nodes: ) queried
29: CONSTANT(positive real 4) (out nodes: 31)
30: CONSTANT(positive real 0.5) (out nodes: 31)
31: GAMMA(29, 30) (out nodes: 32)
32: SAMPLE(31) (out nodes: ) queried
"""
# These are cases where we have a type conversion on a sample.
@bm.random_variable
def normal_from_bools():
# Converts Boolean to real, positive real
# This is of course dubious as we would not typically
# expect the standard deviation to be zero or one, but
# it illustrates that the type conversion works.
# TODO: Consider adding a warning for conversion from
# TODO: bool to positive real.
return Normal(flip_straight_constant(), flip_straight_constant())
@bm.random_variable
def binomial_from_bools():
# Converts Boolean to natural and probability
return Binomial(flip_straight_constant(), flip_straight_constant())
expected_bmg_2 = """
0: CONSTANT(probability 0.5) (out nodes: 1)
1: BERNOULLI(0) (out nodes: 2)
2: SAMPLE(1) (out nodes: 3, 4, 9, 12)
3: TO_REAL(2) (out nodes: 5)
4: TO_POS_REAL(2) (out nodes: 5)
5: NORMAL(3, 4) (out nodes: 6)
6: SAMPLE(5) (out nodes: ) queried
7: CONSTANT(natural 1) (out nodes: 9)
8: CONSTANT(natural 0) (out nodes: 9)
9: IF_THEN_ELSE(2, 7, 8) (out nodes: 13)
10: CONSTANT(probability 1) (out nodes: 12)
11: CONSTANT(probability 1e-10) (out nodes: 12)
12: IF_THEN_ELSE(2, 10, 11) (out nodes: 13)
13: BINOMIAL(9, 12) (out nodes: 14)
14: SAMPLE(13) (out nodes: ) queried
"""
# Here we multiply a bool by a natural, and then use that as a natural.
# This cannot be turned into a BMG that uses multiplication because
# there is no multiplication defined on naturals or bools; the best
# we could do as a multiplication is to turn both into a positive real
# and multiply those. But we *can* turn this into an if-then-else
# that takes a bool and returns either the given natural or zero,
# so that's what we'll do.
@bm.random_variable
def bool_times_natural():
return Binomial(bin_constant() * flip_straight_constant(), 0.5)
expected_bmg_3 = """
0: CONSTANT(natural 3) (out nodes: 2)
1: CONSTANT(probability 0.5) (out nodes: 2, 4, 8)
2: BINOMIAL(0, 1) (out nodes: 3)
3: SAMPLE(2) (out nodes: 7)
4: BERNOULLI(1) (out nodes: 5)
5: SAMPLE(4) (out nodes: 7)
6: CONSTANT(natural 0) (out nodes: 7)
7: IF_THEN_ELSE(5, 3, 6) (out nodes: 8)
8: BINOMIAL(7, 1) (out nodes: 9)
9: SAMPLE(8) (out nodes: ) queried
"""
# Tests for math functions
@bm.random_variable
def math1():
# log(R+) -> R
# exp(R+) -> R+
return Normal(hc(0).log(), hc(1).exp())
@bm.random_variable
def math2():
# R+ ** R+ -> R+
return HalfCauchy(hc(2) ** hc(3))
@bm.random_variable
def math3():
# PHI
return Bernoulli(Normal(0.0, 1.0).cdf(hc(4)))
@bm.random_variable
def math4():
# PHI, alternative syntax
# TODO: Add a test where the value passed to cdf is a named argument.
return Bernoulli(Normal.cdf(Normal(0.0, 1.0), hc(4)))
expected_bmg_4 = """
0: CONSTANT(positive real 1) (out nodes: 1)
1: HALF_CAUCHY(0) (out nodes: 2, 3, 8, 9, 13)
2: SAMPLE(1) (out nodes: 4)
3: SAMPLE(1) (out nodes: 5)
4: LOG(2) (out nodes: 6)
5: EXP(3) (out nodes: 6)
6: NORMAL(4, 5) (out nodes: 7)
7: SAMPLE(6) (out nodes: ) queried
8: SAMPLE(1) (out nodes: 10)
9: SAMPLE(1) (out nodes: 10)
10: POW(8, 9) (out nodes: 11)
11: HALF_CAUCHY(10) (out nodes: 12)
12: SAMPLE(11) (out nodes: ) queried
13: SAMPLE(1) (out nodes: 14)
14: TO_REAL(13) (out nodes: 15)
15: PHI(14) (out nodes: 16)
16: BERNOULLI(15) (out nodes: 17)
17: SAMPLE(16) (out nodes: ) queried
"""
# Demonstrate that we generate 1-p as a complement
@bm.random_variable
def flip_complement():
return Bernoulli(1.0 - beta_constant())
expected_bmg_5 = """
0: CONSTANT(positive real 1) (out nodes: 1, 1)
1: BETA(0, 0) (out nodes: 2)
2: SAMPLE(1) (out nodes: 3)
3: COMPLEMENT(2) (out nodes: 4)
4: BERNOULLI(3) (out nodes: 5)
5: SAMPLE(4) (out nodes: ) queried
"""
# Demonstrate that we generate -log(prob) as a positive real.
@bm.random_variable
def beta_neg_log():
return Beta(-beta_constant().log(), 1.0)
expected_bmg_6 = """
0: CONSTANT(positive real 1) (out nodes: 1, 1, 5)
1: BETA(0, 0) (out nodes: 2)
2: SAMPLE(1) (out nodes: 3)
3: LOG(2) (out nodes: 4)
4: NEGATE(3) (out nodes: 5)
5: BETA(4, 0) (out nodes: 6)
6: SAMPLE(5) (out nodes: ) queried
"""
# Demonstrate that identity additions and multiplications
# are removed from the graph. Here we are computing
# 0 + 0 * hc(0) + 1 * hc(1) + 2 * hc(2)
# but as you can see, in the final program we generate
# the code as though we had written hc(1) + 2 * hc(2).
#
# TODO: However, note that we still do emit a sample
# for hc(0) into the graph, even though it is unused.
# We might consider trimming sample operations which
# are ancestors of no observation or query.
@bm.random_variable
def beta_eliminate_identities():
s = 0.0
for i in [0, 1, 2]:
s = s + i * hc(i)
return Beta(s, 4.0)
expected_bmg_7 = """
digraph "graph" {
N0[label="1"];
N1[label="HalfCauchy"];
N2[label="~"];
N3[label="~"];
N4[label="~"];
N5[label="2"];
N6[label="*"];
N7[label="+"];
N8[label="4"];
N9[label="Beta"];
N10[label="~"];
N0 -> N1;
N1 -> N2;
N1 -> N3;
N1 -> N4;
N3 -> N7;
N4 -> N6;
N5 -> N6;
N6 -> N7;
N7 -> N9;
N8 -> N9;
N9 -> N10;
Q0[label="Query"];
N10 -> Q0;
}
"""
class GraphAccumulationTests(unittest.TestCase):
def test_accumulate_simple_distributions(self) -> None:
self.maxDiff = None
queries = [
flip_straight_constant(),
flip_logit_constant(),
standard_normal(),
flip_logit_normal(),
beta_constant(),
hc(1),
hc(2),
beta_hc(),
student_t(),
bin_constant(),
gamma(),
flat(),
chi2(),
]
bmg = BMGRuntime().accumulate_graph(queries, {})
observed = to_bmg_graph(bmg).graph.to_string()
self.assertEqual(tidy(observed), tidy(expected_bmg_1))
def test_accumulate_bool_conversions(self) -> None:
self.maxDiff = None
queries = [normal_from_bools(), binomial_from_bools()]
bmg = BMGRuntime().accumulate_graph(queries, {})
observed = to_bmg_graph(bmg).graph.to_string()
self.assertEqual(tidy(observed), tidy(expected_bmg_2))
def test_accumulate_bool_nat_mult(self) -> None:
self.maxDiff = None
queries = [bool_times_natural()]
bmg = BMGRuntime().accumulate_graph(queries, {})
observed = to_bmg_graph(bmg).graph.to_string()
self.assertEqual(tidy(observed), tidy(expected_bmg_3))
def test_accumulate_math(self) -> None:
self.maxDiff = None
queries = [math1(), math2(), math3()]
bmg = BMGRuntime().accumulate_graph(queries, {})
observed = to_bmg_graph(bmg).graph.to_string()
self.assertEqual(tidy(observed), tidy(expected_bmg_4))
# Try with a different version of CDF syntax.
queries = [math1(), math2(), math4()]
bmg = BMGRuntime().accumulate_graph(queries, {})
observed = to_bmg_graph(bmg).graph.to_string()
self.assertEqual(tidy(observed), tidy(expected_bmg_4))
def test_accumulate_complement(self) -> None:
self.maxDiff = None
queries = [flip_complement()]
bmg = BMGRuntime().accumulate_graph(queries, {})
observed = to_bmg_graph(bmg).graph.to_string()
self.assertEqual(tidy(observed), tidy(expected_bmg_5))
def test_accumulate_neg_log(self) -> None:
self.maxDiff = None
queries = [beta_neg_log()]
bmg = BMGRuntime().accumulate_graph(queries, {})
observed = to_bmg_graph(bmg).graph.to_string()
self.assertEqual(tidy(observed), tidy(expected_bmg_6))
def test_accumulate_eliminate_identities(self) -> None:
self.maxDiff = None
# TODO: We end up with an extraneous zero addend in the
# sum; eliminate that.
queries = [beta_eliminate_identities()]
bmg = BMGRuntime().accumulate_graph(queries, {})
observed = to_bmg_graph(bmg).graph.to_dot()
self.assertEqual(expected_bmg_7.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/graph_accumulation_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.compiler.gen_bmg_cpp import to_bmg_cpp
from beanmachine.ppl.compiler.gen_bmg_graph import to_bmg_graph
from beanmachine.ppl.compiler.gen_bmg_python import to_bmg_python
from beanmachine.ppl.compiler.gen_dot import to_dot
from beanmachine.ppl.compiler.runtime import BMGRuntime
from torch import tensor
from torch.distributions import Bernoulli, Normal
@bm.random_variable
def norm():
return Normal(tensor(0.0), tensor(1.0))
@bm.random_variable
def flip():
return Bernoulli(0.5)
@bm.functional
def f1by2():
# A 1x2 tensor in Python becomes a 2x1 matrix in BMG
t = tensor([norm().exp(), norm()])
# This should become a LOGSUMEXP BMG node with no TO_MATRIX
return t.logsumexp(dim=0)
@bm.functional
def f2by1():
# A 2x1 tensor in Python becomes a 1x2 matrix in BMG
t = tensor([[norm().exp()], [norm()]])
# This should be an error; BMG requires that the matrix have a single column.
return t.logsumexp(dim=0)
@bm.functional
def f2by3():
# A 2x3 tensor in Python becomes a 3x2 matrix in BMG
t = tensor([[norm().exp(), 10, 20], [norm(), 30, 40]])
# Randomly choose one of the two columns and LSE it.
# This should become an LOGSUMEXP_VECTOR node.
return t[flip()].logsumexp(dim=0)
class LSEVectorTest(unittest.TestCase):
def test_lse1by2(self) -> None:
self.maxDiff = None
bmg = BMGRuntime().accumulate_graph([f1by2()], {})
observed = to_dot(bmg, after_transform=True, label_edges=False)
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=Exp];
N5[label=ToReal];
N6[label=LogSumExp];
N7[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N3 -> N6;
N4 -> N5;
N5 -> N6;
N6 -> N7;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = to_bmg_python(bmg).code
expected = """
from beanmachine import graph
from torch import tensor
g = graph.Graph()
n0 = g.add_constant_real(0.0)
n1 = g.add_constant_pos_real(1.0)
n2 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n0, n1],
)
n3 = g.add_operator(graph.OperatorType.SAMPLE, [n2])
n4 = g.add_operator(graph.OperatorType.EXP, [n3])
n5 = g.add_operator(graph.OperatorType.TO_REAL, [n4])
n6 = g.add_operator(graph.OperatorType.LOGSUMEXP, [n5, n3])
q0 = g.query(n6)
"""
self.assertEqual(expected.strip(), observed.strip())
observed = to_bmg_graph(bmg).graph.to_dot()
expected = """
digraph "graph" {
N0[label="0"];
N1[label="1"];
N2[label="Normal"];
N3[label="~"];
N4[label="exp"];
N5[label="ToReal"];
N6[label="LogSumExp"];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N3 -> N6;
N4 -> N5;
N5 -> N6;
Q0[label="Query"];
N6 -> Q0;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_lse2by3(self) -> None:
self.maxDiff = None
bmg = BMGRuntime().accumulate_graph([f2by3()], {})
observed = to_dot(bmg, after_transform=True, label_edges=False)
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=0.5];
N05[label=Bernoulli];
N06[label=Sample];
N07[label=3];
N08[label=2];
N09[label=Exp];
N10[label=ToReal];
N11[label=10.0];
N12[label=20.0];
N13[label=30.0];
N14[label=40.0];
N15[label=ToMatrix];
N16[label=1];
N17[label=0];
N18[label=if];
N19[label=ColumnIndex];
N20[label=LogSumExp];
N21[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N03 -> N09;
N03 -> N15;
N04 -> N05;
N05 -> N06;
N06 -> N18;
N07 -> N15;
N08 -> N15;
N09 -> N10;
N10 -> N15;
N11 -> N15;
N12 -> N15;
N13 -> N15;
N14 -> N15;
N15 -> N19;
N16 -> N18;
N17 -> N18;
N18 -> N19;
N19 -> N20;
N20 -> N21;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = to_bmg_cpp(bmg).code
expected = """
graph::Graph g;
uint n0 = g.add_constant_real(0.0);
uint n1 = g.add_constant_pos_real(1.0);
uint n2 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n0, n1}));
uint n3 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n2}));
uint n4 = g.add_constant_probability(0.5);
uint n5 = g.add_distribution(
graph::DistributionType::BERNOULLI,
graph::AtomicType::BOOLEAN,
std::vector<uint>({n4}));
uint n6 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n5}));
uint n7 = g.add_constant_natural(3);
uint n8 = g.add_constant_natural(2);
uint n9 = g.add_operator(
graph::OperatorType::EXP, std::vector<uint>({n3}));
uint n10 = g.add_operator(
graph::OperatorType::TO_REAL, std::vector<uint>({n9}));
uint n11 = g.add_constant_real(10.0);
uint n12 = g.add_constant_real(20.0);
uint n13 = g.add_constant_real(30.0);
uint n14 = g.add_constant_real(40.0);
uint n15 = g.add_operator(
graph::OperatorType::TO_MATRIX,
std::vector<uint>({n7, n8, n10, n11, n12, n3, n13, n14}));
uint n16 = g.add_constant_natural(1);
uint n17 = g.add_constant_natural(0);
uint n18 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n6, n16, n17}));
uint n19 = g.add_operator(
graph::OperatorType::COLUMN_INDEX, std::vector<uint>({n15, n18}));
uint n20 = g.add_operator(
graph::OperatorType::LOGSUMEXP_VECTOR, std::vector<uint>({n19}));
uint q0 = g.query(n20);
"""
self.assertEqual(expected.strip(), observed.strip())
observed = to_bmg_graph(bmg).graph.to_dot()
expected = """
digraph "graph" {
N0[label="0"];
N1[label="1"];
N2[label="Normal"];
N3[label="~"];
N4[label="0.5"];
N5[label="Bernoulli"];
N6[label="~"];
N7[label="3"];
N8[label="2"];
N9[label="exp"];
N10[label="ToReal"];
N11[label="10"];
N12[label="20"];
N13[label="30"];
N14[label="40"];
N15[label="ToMatrix"];
N16[label="1"];
N17[label="0"];
N18[label="IfThenElse"];
N19[label="ColumnIndex"];
N20[label="LogSumExp"];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N9;
N3 -> N15;
N4 -> N5;
N5 -> N6;
N6 -> N18;
N7 -> N15;
N8 -> N15;
N9 -> N10;
N10 -> N15;
N11 -> N15;
N12 -> N15;
N13 -> N15;
N14 -> N15;
N15 -> N19;
N16 -> N18;
N17 -> N18;
N18 -> N19;
N19 -> N20;
Q0[label="Query"];
N20 -> Q0;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_lse2by1(self) -> None:
self.maxDiff = None
bmg = BMGRuntime().accumulate_graph([f2by1()], {})
expected = """
The model uses a logsumexp operation unsupported by Bean Machine Graph.
The unsupported node was created in function call f2by1().
"""
with self.assertRaises(ValueError) as ex:
to_dot(bmg, after_transform=True)
observed = str(ex.exception)
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/lse_vector_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Any
import beanmachine.ppl as bm
from beanmachine.ppl.compiler.runtime import BMGRuntime
from beanmachine.ppl.compiler.support import ComputeSupport, Infinite, TooBig
from torch import Tensor, tensor
from torch.distributions import Bernoulli, Categorical, Normal
def tidy(s: str) -> str:
return "\n".join(c.strip() for c in s.strip().split("\n")).strip()
def tensor_equality(x: Tensor, y: Tensor) -> bool:
# Tensor equality is weird. Suppose x and y are both
# tensor([1.0, 2.0]). Then x.eq(y) is tensor([True, True]),
# and x.eq(y).all() is tensor(True).
return bool(x.eq(y).all())
@bm.random_variable
def flip1(n):
return Bernoulli(0.5)
@bm.random_variable
def flip2(n):
return Bernoulli(tensor([[0.5, 0.5]]))
@bm.functional
def to_tensor():
return tensor([2.5, flip1(0), flip1(1), flip1(2)])
@bm.random_variable
def normal():
return Normal(0.0, 1.0)
@bm.functional
def sum1():
return flip1(0) + 1.0
@bm.functional
def prod1():
return sum1() * sum1()
@bm.functional
def pow1():
return prod1() ** prod1()
@bm.functional
def ge1():
return pow1() >= prod1()
@bm.functional
def and1():
return ge1() & ge1()
@bm.functional
def negexp1():
return -prod1().exp()
@bm.random_variable
def cat3():
return Categorical(tensor([0.5, 0.25, 0.25]))
@bm.random_variable
def cat2_3():
return Categorical(tensor([[0.5, 0.25, 0.25], [0.25, 0.25, 0.5]]))
@bm.random_variable
def cat8_3():
return Categorical(
tensor(
[
[0.5, 0.25, 0.25],
[0.25, 0.25, 0.5],
[0.5, 0.25, 0.25],
[0.25, 0.25, 0.5],
[0.5, 0.25, 0.25],
[0.25, 0.25, 0.5],
[0.5, 0.25, 0.25],
[0.25, 0.25, 0.5],
[0.5, 0.25, 0.25],
[0.25, 0.25, 0.5],
]
)
)
@bm.random_variable
def normal_or_bern(n):
if n:
return Normal(0.0, 1.0)
return Bernoulli(0.5)
@bm.random_variable
def cat_or_bern(n):
if n:
return Categorical(tensor([0.5, 0.25, 0.25, 0.25]))
return Bernoulli(0.5)
@bm.functional
def switch_inf():
return normal_or_bern(flip1(0))
@bm.functional
def switch_4():
return cat_or_bern(flip1(0))
class NodeSupportTest(unittest.TestCase):
def assertEqual(self, x: Any, y: Any) -> None:
if isinstance(x, Tensor) and isinstance(y, Tensor):
self.assertTrue(tensor_equality(x, y))
else:
super().assertEqual(x, y)
def test_node_supports(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
rt.accumulate_graph([and1(), negexp1()], {})
cs = ComputeSupport()
expected_flip1 = """
tensor(0.)
tensor(1.)"""
observed_flip1 = str(cs[rt._rv_to_node(flip1(0))])
self.assertEqual(expected_flip1.strip(), observed_flip1.strip())
expected_sum1 = """
tensor(1.)
tensor(2.)"""
observed_sum1 = str(cs[rt._rv_to_node(sum1())])
self.assertEqual(expected_sum1.strip(), observed_sum1.strip())
expected_prod1 = """
tensor(1.)
tensor(2.)
tensor(4.)"""
observed_prod1 = str(cs[rt._rv_to_node(prod1())])
self.assertEqual(expected_prod1.strip(), observed_prod1.strip())
expected_pow1 = """
tensor(1.)
tensor(16.)
tensor(2.)
tensor(256.)
tensor(4.)
"""
observed_pow1 = str(cs[rt._rv_to_node(pow1())])
self.assertEqual(expected_pow1.strip(), observed_pow1.strip())
expected_ge1 = """
tensor(False)
tensor(True)
"""
observed_ge1 = str(cs[rt._rv_to_node(ge1())])
self.assertEqual(expected_ge1.strip(), observed_ge1.strip())
expected_and1 = expected_ge1
observed_and1 = str(cs[rt._rv_to_node(and1())])
self.assertEqual(expected_and1.strip(), observed_and1.strip())
# Some versions of torch display -exp(4) as -54.5981, and some display it
# as -54.5982. (The actual value is -54.5981500331..., which is not an excuse
# for some versions getting it wrong.) To avoid this test randomly failing
# depending on which version of torch we're using, we'll truncate to integers.
expected_exp1 = "['-2', '-54', '-7']"
results = [str(int(t)) for t in cs[rt._rv_to_node(negexp1())]]
results.sort()
self.assertEqual(expected_exp1.strip(), str(results).strip())
def test_bernoulli_support(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
rt.accumulate_graph([flip2(0)], {})
sample = rt._rv_to_node(flip2(0))
s = ComputeSupport()
observed = str(s[sample])
expected = """
tensor([[0., 0.]])
tensor([[0., 1.]])
tensor([[1., 0.]])
tensor([[1., 1.]])"""
self.assertEqual(expected.strip(), observed.strip())
def test_categorical_support(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
rt.accumulate_graph([cat3(), cat2_3(), cat8_3()], {})
s = ComputeSupport()
c3 = rt._rv_to_node(cat3())
observed_c3 = str(s[c3])
expected_c3 = """
tensor(0)
tensor(1)
tensor(2)
"""
self.assertEqual(expected_c3.strip(), observed_c3.strip())
c23 = rt._rv_to_node(cat2_3())
observed_c23 = str(s[c23])
expected_c23 = """
tensor([0, 0])
tensor([0, 1])
tensor([0, 2])
tensor([1, 0])
tensor([1, 1])
tensor([1, 2])
tensor([2, 0])
tensor([2, 1])
tensor([2, 2])
"""
self.assertEqual(expected_c23.strip(), observed_c23.strip())
c83 = rt._rv_to_node(cat8_3())
observed_c23 = s[c83]
self.assertTrue(observed_c23 is TooBig)
def test_stochastic_tensor_support(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
rt.accumulate_graph([to_tensor()], {})
tm = rt._rv_to_node(to_tensor())
s = ComputeSupport()
observed = str(s[tm])
expected = """
tensor([2.5000, 0.0000, 0.0000, 0.0000])
tensor([2.5000, 0.0000, 0.0000, 1.0000])
tensor([2.5000, 0.0000, 1.0000, 0.0000])
tensor([2.5000, 0.0000, 1.0000, 1.0000])
tensor([2.5000, 1.0000, 0.0000, 0.0000])
tensor([2.5000, 1.0000, 0.0000, 1.0000])
tensor([2.5000, 1.0000, 1.0000, 0.0000])
tensor([2.5000, 1.0000, 1.0000, 1.0000])
"""
self.assertEqual(expected.strip(), observed.strip())
def test_infinite_support(self) -> None:
self.maxDiff = None
rt = BMGRuntime()
rt.accumulate_graph([normal()], {})
sample = rt._rv_to_node(normal())
s = ComputeSupport()
observed = s[sample]
self.assertEqual(Infinite, observed)
def test_switch_support(self) -> None:
# This is also tested in stochastic_control_flow_test.py.
self.maxDiff = None
rt = BMGRuntime()
rt.accumulate_graph([switch_inf(), switch_4()], {})
s = ComputeSupport()
switch_inf_sample = rt._rv_to_node(switch_inf())
observed_inf = s[switch_inf_sample]
self.assertEqual(Infinite, observed_inf)
switch_4_sample = rt._rv_to_node(switch_4())
observed_4 = str(s[switch_4_sample])
# Notice an oddity here: in torch, Bernoulli produces 0. and 1. -- floats --
# but Categorical produces 0, 1, 2, 3 -- integers. When taking the union we
# detect that tensor(0) and tensor(1) are equal to tensor(0.) and tensor(1.);
# they are deduplicated.
# TODO: Can this cause any problems? Do we need to canonicalize Bernoulli output
# to integers?
expected_4 = """
tensor(0.)
tensor(1.)
tensor(2)
tensor(3)
"""
self.assertEqual(expected_4.strip(), observed_4.strip())
| beanmachine-main | tests/ppl/compiler/support_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Dirichlet compiler tests
import unittest
import beanmachine.ppl as bm
from beanmachine.graph import (
AtomicType,
DistributionType,
Graph,
InferenceType,
OperatorType,
ValueType,
VariableType,
)
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.gen_bmg_cpp import to_bmg_cpp
from beanmachine.ppl.compiler.gen_bmg_graph import to_bmg_graph
from beanmachine.ppl.compiler.gen_bmg_python import to_bmg_python
from beanmachine.ppl.compiler.gen_dot import to_dot
from beanmachine.ppl.compiler.runtime import BMGRuntime
from beanmachine.ppl.inference import BMGInference, SingleSiteNewtonianMonteCarlo
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from torch import Size, tensor
from torch.distributions import Bernoulli, Dirichlet
def tidy(s: str) -> str:
return "\n".join(c.strip() for c in s.strip().split("\n")).strip()
def _rv_id() -> RVIdentifier:
return RVIdentifier(lambda a, b: a, (1, 1))
# Support for Dirichlet distributions has recently been added to BMG;
# this is the first time that the compiler will have to deal with
# tensor-valued quantities directly so we anticipate having a number
# of problems to solve in type analysis and code generation that have
# been put off until now.
#
# We'll start by just taking the BMG code for a spin directly and see
# what gives errors and what gives results.
dirichlet = DistributionType.DIRICHLET
simplex = VariableType.COL_SIMPLEX_MATRIX
broadcast = VariableType.BROADCAST_MATRIX
real = AtomicType.REAL
prob = AtomicType.PROBABILITY
sample = OperatorType.SAMPLE
s3x1 = ValueType(simplex, prob, 3, 1)
r3x1 = ValueType(broadcast, real, 3, 1)
nmc = InferenceType.NMC
rejection = InferenceType.REJECTION
# Here are some simple models we'll use to test the compiler.
# TODO: Test Dirichlets with non-constant inputs.
@bm.random_variable
def d0():
return Dirichlet(tensor([]))
# Torch rejects this one.
# @bm.random_variable
# def d1a():
# return Dirichlet(tensor(0.5))
@bm.random_variable
def d1b():
return Dirichlet(tensor([1.0]))
@bm.random_variable
def d1c():
return Dirichlet(tensor([[1.5]]))
@bm.random_variable
def d1d():
return Dirichlet(tensor([[[2.0]]]))
# Torch rejects this one
# @bm.random_variable
# def d1e():
# return Dirichlet(tensor([[[-2.0]]]))
@bm.random_variable
def d2a():
return Dirichlet(tensor([2.5, 3.0]))
@bm.random_variable
def d2b():
return Dirichlet(tensor([[3.5, 4.0]]))
@bm.random_variable
def d2c():
return Dirichlet(tensor([[[4.5, 5.0]]]))
@bm.random_variable
def d23():
return Dirichlet(tensor([[5.5, 6.0, 6.5], [7.0, 7.5, 8.0]]))
@bm.random_variable
def d3():
return Dirichlet(tensor([1.0, 1.0, 1.0]))
@bm.functional
def d3_index_0():
return d3()[0]
@bm.random_variable
def flip():
return Bernoulli(0.5)
@bm.functional
def d2a_index_flip():
return d2a()[flip()]
class DirichletTest(unittest.TestCase):
def test_dirichlet_negative(self) -> None:
self.maxDiff = None
g = Graph()
m1 = tensor([1.5, 1.0, 2.0])
cm1 = g.add_constant_pos_matrix(m1)
m2 = tensor([[1.5, 1.0], [2.0, 1.5]])
cm2 = g.add_constant_pos_matrix(m2)
two = g.add_constant(2)
# Input must be a positive real matrix with one column.
with self.assertRaises(ValueError):
g.add_distribution(dirichlet, s3x1, [two])
with self.assertRaises(ValueError):
g.add_distribution(dirichlet, s3x1, [cm2])
# Must be only one input
with self.assertRaises(ValueError):
g.add_distribution(dirichlet, s3x1, [cm1, two])
# Output type must be simplex
with self.assertRaises(ValueError):
g.add_distribution(dirichlet, r3x1, [cm1])
def test_dirichlet_sample(self) -> None:
self.maxDiff = None
g = Graph()
m1 = tensor([1.5, 1.0, 2.0])
cm1 = g.add_constant_pos_matrix(m1)
d = g.add_distribution(dirichlet, s3x1, [cm1])
ds = g.add_operator(sample, [d])
g.query(ds)
samples = g.infer(1, rejection)
# samples has form [[array([[a1],[a2],[a3]])]]
result = tensor(samples[0][0]).reshape([3])
# We get a three-element simplex, so it should sum to 1.0.
self.assertAlmostEqual(1.0, float(sum(result)))
def test_constant_pos_real_matrix(self) -> None:
# To make a BMG graph with a Dirichlet distribution the first thing
# we'll need to do is make a positive real matrix as its input.
# Demonstrate that we can add such a matrix to a graph builder,
# do a type analysis, and generate C++ and Python code that builds
# the graph. Finally, actually build the graph.
self.maxDiff = None
bmg = BMGraphBuilder()
c1 = bmg.add_pos_real_matrix(tensor(1.0))
c2 = bmg.add_pos_real_matrix(tensor([1.0, 1.5]))
c3 = bmg.add_pos_real_matrix(tensor([[1.0, 1.5], [2.0, 2.5]]))
c4 = bmg.add_pos_real_matrix(tensor([1.0, 1.5]))
# These should be deduplicated
self.assertTrue(c4 is c2)
# Verify that we can add these nodes to the graph, do a type analysis,
# and survive the problem-fixing pass without generating an exception.
bmg.add_query(c1, _rv_id())
bmg.add_query(c2, _rv_id())
bmg.add_query(c3, _rv_id())
expected = """
digraph "graph" {
N0[label="1.0:R+"];
N1[label="Query:R+"];
N2[label="[1.0,1.5]:MR+[2,1]"];
N3[label="Query:MR+[2,1]"];
N4[label="[[1.0,1.5],\\\\n[2.0,2.5]]:MR+[2,2]"];
N5[label="Query:MR+[2,2]"];
N0 -> N1;
N2 -> N3;
N4 -> N5;
}"""
observed = to_dot(
bmg,
node_types=True,
label_edges=False,
after_transform=True,
)
self.assertEqual(expected.strip(), observed.strip())
# We should be able to generate correct C++ and Python code to build
# a graph that contains only positive constant matrices. Note that the
# queries are not emitted into the graph because BMG does not allow
# a query on a constant.
#
# NB: m2 is transposed from the source!
expected = """
graph::Graph g;
Eigen::MatrixXd m0(1, 1);
m0 << 1.0;
uint n0 = g.add_constant_pos_matrix(m0);
uint q0 = g.query(n0);
Eigen::MatrixXd m1(2, 1);
m1 << 1.0, 1.5;
uint n1 = g.add_constant_pos_matrix(m1);
uint q1 = g.query(n1);
Eigen::MatrixXd m2(2, 2);
m2 << 1.0, 2.0, 1.5, 2.5;
uint n2 = g.add_constant_pos_matrix(m2);
uint q2 = g.query(n2);
"""
observed = to_bmg_cpp(bmg).code
self.assertEqual(expected.strip(), observed.strip())
# Notice that constant matrices are always expressed as a
# 2-d matrix, and we transpose them so that they are column-major.
expected = """
from beanmachine import graph
from torch import tensor
g = graph.Graph()
n0 = g.add_constant_pos_matrix(tensor([[1.0]]))
q0 = g.query(n0)
n1 = g.add_constant_pos_matrix(tensor([[1.0],[1.5]]))
q1 = g.query(n1)
n2 = g.add_constant_pos_matrix(tensor([[1.0,2.0],[1.5,2.5]]))
q2 = g.query(n2)
"""
observed = to_bmg_python(bmg).code
self.assertEqual(expected.strip(), observed.strip())
# Let's actually get the graph.
# Note that what was a row vector in the original code is now a column vector.
expected = """
0: CONSTANT(matrix<positive real> 1) (out nodes: ) queried
1: CONSTANT(matrix<positive real> 1
1.5) (out nodes: ) queried
2: CONSTANT(matrix<positive real> 1 2
1.5 2.5) (out nodes: ) queried
"""
observed = to_bmg_graph(bmg).graph.to_string()
self.assertEqual(tidy(expected), tidy(observed))
def test_dirichlet_type_analysis(self) -> None:
self.maxDiff = None
queries = [d0(), d1b(), d1c(), d1d(), d2a(), d2b(), d2c(), d23()]
bmg = BMGRuntime().accumulate_graph(queries, {})
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
after_transform=False,
label_edges=False,
)
expected = """
digraph "graph" {
N00[label="[]:T"];
N01[label="Dirichlet:S[1,1]"];
N02[label="Sample:S[1,1]"];
N03[label="Query:S[1,1]"];
N04[label="[1.0]:OH"];
N05[label="Dirichlet:S[1,1]"];
N06[label="Sample:S[1,1]"];
N07[label="Query:S[1,1]"];
N08[label="[[1.5]]:R+"];
N09[label="Dirichlet:S[1,1]"];
N10[label="Sample:S[1,1]"];
N11[label="Query:S[1,1]"];
N12[label="[[[2.0]]]:N"];
N13[label="Dirichlet:S[1,1]"];
N14[label="Sample:S[1,1]"];
N15[label="Query:S[1,1]"];
N16[label="[2.5,3.0]:MR+[2,1]"];
N17[label="Dirichlet:S[2,1]"];
N18[label="Sample:S[2,1]"];
N19[label="Query:S[2,1]"];
N20[label="[[3.5,4.0]]:MR+[2,1]"];
N21[label="Dirichlet:S[2,1]"];
N22[label="Sample:S[2,1]"];
N23[label="Query:S[2,1]"];
N24[label="[[[4.5,5.0]]]:T"];
N25[label="Dirichlet:S[1,1]"];
N26[label="Sample:S[1,1]"];
N27[label="Query:S[1,1]"];
N28[label="[[5.5,6.0,6.5],\\\\n[7.0,7.5,8.0]]:MR+[3,2]"];
N29[label="Dirichlet:S[3,1]"];
N30[label="Sample:S[3,1]"];
N31[label="Query:S[3,1]"];
N00 -> N01[label="R+"];
N01 -> N02[label="S[1,1]"];
N02 -> N03[label=any];
N04 -> N05[label="R+"];
N05 -> N06[label="S[1,1]"];
N06 -> N07[label=any];
N08 -> N09[label="R+"];
N09 -> N10[label="S[1,1]"];
N10 -> N11[label=any];
N12 -> N13[label="R+"];
N13 -> N14[label="S[1,1]"];
N14 -> N15[label=any];
N16 -> N17[label="MR+[2,1]"];
N17 -> N18[label="S[2,1]"];
N18 -> N19[label=any];
N20 -> N21[label="MR+[2,1]"];
N21 -> N22[label="S[2,1]"];
N22 -> N23[label=any];
N24 -> N25[label="R+"];
N25 -> N26[label="S[1,1]"];
N26 -> N27[label=any];
N28 -> N29[label="MR+[3,1]"];
N29 -> N30[label="S[3,1]"];
N30 -> N31[label=any];
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_dirichlet_errors(self) -> None:
self.maxDiff = None
# If the constant tensor given is not supported at all by BMG because of
# its dimensionality then that is the error we will report. If the tensor
# is supported by BMG but not valid for a Dirichlet then that's what we say.
# TODO: Error message is misleading in that it says that the requirement
# is a 3x1 positive real matrix, when the real requirement is that it be
# ANY 1-d positive real matrix.
expected = (
"The concentration of a Dirichlet is required to be"
+ " a 3 x 1 positive real matrix but is"
+ " a 3 x 2 positive real matrix.\n"
+ "The Dirichlet was created in function call d23()."
)
with self.assertRaises(ValueError) as ex:
BMGInference().infer([d23()], {}, 1)
self.assertEqual(expected.strip(), str(ex.exception).strip())
def test_dirichlet_fix_problems(self) -> None:
# Can we take an input that is a valid tensor and deduce that we must
# replace it with a positive real constant matrix node?
self.maxDiff = None
queries = [d2a()]
observations = {d2a(): tensor([0.5, 0.5])}
bmg = BMGRuntime().accumulate_graph(queries, observations)
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
after_transform=True,
label_edges=False,
)
expected = """
digraph "graph" {
N0[label="[2.5,3.0]:MR+[2,1]"];
N1[label="Dirichlet:S[2,1]"];
N2[label="Sample:S[2,1]"];
N3[label="Observation tensor([0.5000, 0.5000]):S[2,1]"];
N4[label="Query:S[2,1]"];
N0 -> N1[label="MR+[2,1]"];
N1 -> N2[label="S[2,1]"];
N2 -> N3[label=any];
N2 -> N4[label=any];
}
"""
self.assertEqual(expected.strip(), observed.strip())
# This is the tricky case: the degenerate case where we have only
# one value, and we need to make sure that we generate a matrix
# constant rather than a regular positive real constant:
queries = [d1b()]
bmg = BMGRuntime().accumulate_graph(queries, {})
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
after_transform=True,
label_edges=False,
)
# This is subtle, but notice that we have a constant matrix here rather
# than a constant; the value is [1.0], not 1.0.
expected = """
digraph "graph" {
N0[label="[1.0]:R+"];
N1[label="Dirichlet:S[1,1]"];
N2[label="Sample:S[1,1]"];
N3[label="Query:S[1,1]"];
N0 -> N1[label="R+"];
N1 -> N2[label="S[1,1]"];
N2 -> N3[label=any];
}"""
self.assertEqual(expected.strip(), observed.strip())
def test_dirichlet_bmg_inference(self) -> None:
# Get Dirichlet samples; verify that the sample set
# is in rows, not columns.
self.maxDiff = None
# 2-element Dirichlet
queries = [d2a()]
observations = {}
num_samples = 10
results = BMGInference().infer(
queries, observations, num_samples, 1, inference_type=nmc
)
samples = results[d2a()]
self.assertEqual(Size([1, num_samples, 2]), samples.size())
# Make sure we get the same thing when we use Bean Machine proper:
results = SingleSiteNewtonianMonteCarlo().infer(
queries, observations, num_samples, 1
)
samples = results[d2a()]
self.assertEqual(Size([1, num_samples, 2]), samples.size())
# 3-element Dirichlet
queries = [d3()]
observations = {}
num_samples = 20
results = BMGInference().infer(
queries, observations, num_samples, 1, inference_type=rejection
)
samples = results[d3()]
self.assertEqual(Size([1, num_samples, 3]), samples.size())
# If we observe a Dirichlet sample to be a value, we'd better get
# that value when we query.
queries = [d3()]
observations = {d3(): tensor([0.5, 0.25, 0.25])}
num_samples = 1
results = BMGInference().infer(
queries, observations, num_samples, 1, inference_type=nmc
)
samples = results[d3()]
expected = "tensor([[[0.5000, 0.2500, 0.2500]]], dtype=torch.float64)"
self.assertEqual(expected, str(samples))
# Make sure we get the same thing when we use Bean Machine proper:
results = SingleSiteNewtonianMonteCarlo().infer(
queries, observations, num_samples, 1
)
samples = results[d3()]
expected = "tensor([[[0.5000, 0.2500, 0.2500]]])"
self.assertEqual(expected, str(samples))
def test_dirichlet_to_python(self) -> None:
self.maxDiff = None
queries = [d2a()]
observations = {d2a(): tensor([0.5, 0.5])}
observed = BMGInference().to_python(queries, observations)
expected = """
from beanmachine import graph
from torch import tensor
g = graph.Graph()
n0 = g.add_constant_pos_matrix(tensor([[2.5],[3.0]]))
n1 = g.add_distribution(
graph.DistributionType.DIRICHLET,
graph.ValueType(
graph.VariableType.COL_SIMPLEX_MATRIX,
graph.AtomicType.PROBABILITY,
2,
1,
),
[n0],
)
n2 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
g.observe(n2, tensor([0.5000, 0.5000]))
q0 = g.query(n2)"""
self.assertEqual(expected.strip(), observed.strip())
def test_dirichlet_to_cpp(self) -> None:
self.maxDiff = None
queries = [d2a()]
observations = {d2a(): tensor([0.5, 0.5])}
observed = BMGInference().to_cpp(queries, observations)
expected = """
graph::Graph g;
Eigen::MatrixXd m0(2, 1);
m0 << 2.5, 3.0;
uint n0 = g.add_constant_pos_matrix(m0);
uint n1 = g.add_distribution(
graph::DistributionType::DIRICHLET,
graph::ValueType(
graph::VariableType::COL_SIMPLEX_MATRIX,
graph::AtomicType::PROBABILITY,
2,
1
),
std::vector<uint>({n0}));
uint n2 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
Eigen::MatrixXd o0(2, 1);
o0 << 0.5, 0.5;
g.observe(n2, o0);
uint q0 = g.query(n2);"""
self.assertEqual(expected.strip(), observed.strip())
def test_dirichlet_observation_errors(self) -> None:
self.maxDiff = None
queries = [d2a()]
# Wrong size, wrong sum
observations = {d2a(): tensor(2.0)}
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, observations, 1)
expected = (
"A Dirichlet distribution is observed to have value 2.0 "
+ "but only produces samples of type 2 x 1 simplex matrix."
)
self.assertEqual(expected, str(ex.exception))
# Wrong size, right sum
observations = {d2a(): tensor([0.25, 0.25, 0.25, 0.25])}
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, observations, 1)
expected = (
"A Dirichlet distribution is observed to have value "
+ "tensor([0.2500, 0.2500, 0.2500, 0.2500]) "
+ "but only produces samples of type 2 x 1 simplex matrix."
)
self.assertEqual(expected, str(ex.exception))
# Right size, wrong sum
observations = {d2a(): tensor([0.25, 0.25])}
with self.assertRaises(ValueError) as ex:
BMGInference().infer(queries, observations, 1)
expected = (
"A Dirichlet distribution is observed to have value "
+ "tensor([0.2500, 0.2500]) "
+ "but only produces samples of type 2 x 1 simplex matrix."
)
self.assertEqual(expected, str(ex.exception))
def test_dirichlet_index(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot([d3_index_0()], {})
expected = """
digraph "graph" {
N0[label="[1.0,1.0,1.0]"];
N1[label=Dirichlet];
N2[label=Sample];
N3[label=0];
N4[label=index];
N5[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N4;
N3 -> N4;
N4 -> N5;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([d2a_index_flip()], {})
expected = """
digraph "graph" {
N00[label="[2.5,3.0]"];
N01[label=Dirichlet];
N02[label=Sample];
N03[label=0.5];
N04[label=Bernoulli];
N05[label=Sample];
N06[label=1];
N07[label=0];
N08[label=if];
N09[label=index];
N10[label=Query];
N00 -> N01;
N01 -> N02;
N02 -> N09;
N03 -> N04;
N04 -> N05;
N05 -> N08;
N06 -> N08;
N07 -> N08;
N08 -> N09;
N09 -> N10;
}
"""
self.assertEqual(expected.strip(), observed.strip())
queries = [d2a(), d2a_index_flip()]
observations = {flip(): tensor(1.0)}
results = BMGInference().infer(queries, observations, 1)
d2a_sample = results[d2a()][0, 0]
index_sample = results[d2a_index_flip()][0]
# The sample and the indexed sample must be the same value
self.assertEqual(d2a_sample[1], index_sample)
| beanmachine-main | tests/ppl/compiler/dirichlet_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Dirichlet compiler tests
import unittest
import beanmachine.ppl as bm
import torch
from beanmachine.ppl.inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli, Normal
@bm.random_variable
def norm(x):
return Normal(0.0, 1.0)
@bm.random_variable
def bern():
return Bernoulli(0.5)
@bm.functional
def cholesky1():
n0 = norm(0) * norm(0)
n1 = norm(1) * norm(1)
t = tensor([[n0, 0.0], [0.0, n1]])
return torch.linalg.cholesky(t)
@bm.functional
def cholesky2():
n0 = norm(0) * norm(0)
n1 = norm(1) * norm(1)
t = tensor([[n0, 0.0], [0.0, n1]])
return torch.Tensor.cholesky(t)
@bm.functional
def cholesky3():
n0 = norm(0) * norm(0)
n1 = norm(1) * norm(1)
t = tensor([[n0, 0.0], [0.0, n1]])
return t.cholesky()
@bm.functional
def cholesky4():
# Matrix of bools should convert to reals
t = tensor([[bern(), 0], [0, 1]])
return t.cholesky()
@bm.functional
def cholesky5():
n0 = norm(0) * norm(0)
n1 = norm(1) * norm(1)
t = tensor([[n0, 0.0], [0.0, n1]])
L, _ = torch.linalg.cholesky_ex(t)
return L
# TODO: Test with a non-square matrix, should give an error.
class CholeskyTest(unittest.TestCase):
def test_cholesky(self) -> None:
self.maxDiff = None
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=Sample];
N05[label=2];
N06[label="*"];
N07[label="*"];
N08[label=ToMatrix];
N09[label=Cholesky];
N10[label=Query];
N00 -> N02;
N00 -> N08;
N00 -> N08;
N01 -> N02;
N02 -> N03;
N02 -> N04;
N03 -> N06;
N03 -> N06;
N04 -> N07;
N04 -> N07;
N05 -> N08;
N05 -> N08;
N06 -> N08;
N07 -> N08;
N08 -> N09;
N09 -> N10;
}
"""
observed = BMGInference().to_dot([cholesky1()], {})
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([cholesky2()], {})
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([cholesky3()], {})
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_dot([cholesky5()], {})
self.assertEqual(expected.strip(), observed.strip())
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=2];
N4[label=False];
N5[label=True];
N6[label=ToMatrix];
N7[label=ToRealMatrix];
N8[label=Cholesky];
N9[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N6;
N3 -> N6;
N3 -> N6;
N4 -> N6;
N4 -> N6;
N5 -> N6;
N6 -> N7;
N7 -> N8;
N8 -> N9;
}
"""
observed = BMGInference().to_dot([cholesky4()], {})
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/cholesky_test.py |
beanmachine-main | tests/ppl/compiler/testlib/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict
import beanmachine.ppl as bm
import torch.distributions as dist
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from torch import Tensor
class BetaBernoulliBasicModel(object):
def __init__(self, alpha: Tensor, beta: Tensor):
self.alpha_ = alpha
self.beta_ = beta
@bm.random_variable
def theta(self):
return dist.Beta(self.alpha_, self.beta_)
@bm.random_variable
def y(self, i):
return dist.Bernoulli(self.theta())
def gen_obs(self, num_obs: int) -> Dict[RVIdentifier, Tensor]:
true_theta = 0.75
obs = {}
for i in range(0, num_obs):
obs[self.y(i)] = dist.Bernoulli(true_theta).sample()
return obs
class BetaBernoulliOpsModel(BetaBernoulliBasicModel):
@bm.functional
def sum_y(self):
sum = 0.0
for i in range(0, 5):
sum = sum + self.y(i)
return sum
class BetaBernoulliScaleHyperParameters(BetaBernoulliBasicModel):
def scale_alpha(self):
factor = 2.0
for i in range(0, 3):
factor = factor * i
return factor
@bm.random_variable
def theta(self):
return dist.Beta(self.alpha_ + self.scale_alpha(), self.beta_ + 2.0)
| beanmachine-main | tests/ppl/compiler/testlib/conjugate_models.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Parameterized test to compare samples from original
and conjugate prior transformed models"""
import random
import pytest
import scipy
import torch
from beanmachine.ppl.inference.bmg_inference import BMGInference
from .conjugate_models import (
BetaBernoulliBasicModel,
BetaBernoulliOpsModel,
BetaBernoulliScaleHyperParameters,
)
_alpha = 2.0
_beta = 2.0
test_models = [
(BetaBernoulliBasicModel, "beta_bernoulli_conjugate_fixer"),
(BetaBernoulliOpsModel, "beta_bernoulli_conjugate_fixer"),
(BetaBernoulliScaleHyperParameters, "beta_bernoulli_conjugate_fixer"),
]
@pytest.mark.parametrize("model, opt", test_models)
def test_samples_with_ks(model, opt):
seed = 0
torch.manual_seed(seed)
random.seed(seed)
num_samples = 3000
num_obs = 4
bmg = BMGInference()
model = model(_alpha, _beta)
observations = model.gen_obs(num_obs)
queries = [model.theta()]
# Generate samples from model when opt is disabled
skip_optimizations = {opt}
posterior_original = bmg.infer(queries, observations, num_samples)
graph_original = bmg.to_dot(
queries, observations, skip_optimizations=skip_optimizations
)
theta_samples_original = posterior_original[model.theta()][0]
# Generate samples from model when opt is enabled
skip_optimizations = set()
posterior_transformed = bmg.infer(
queries, observations, num_samples, 1, skip_optimizations=skip_optimizations
)
graph_transformed = bmg.to_dot(
queries, observations, skip_optimizations=skip_optimizations
)
theta_samples_transformed = posterior_transformed[model.theta()][0]
assert (
graph_original.strip() != graph_transformed.strip()
), "Original and transformed graph should not be identical."
assert type(theta_samples_original) == type(
theta_samples_transformed
), "Sample type of original and transformed model should be the same."
assert len(theta_samples_original) == len(
theta_samples_transformed
), "Sample size of original and transformed model should be the same."
assert (
scipy.stats.ks_2samp(theta_samples_original, theta_samples_transformed).pvalue
>= 0.05
)
| beanmachine-main | tests/ppl/compiler/testlib/fix_beta_conjugacy_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import unittest
import beanmachine.graph as bmg
import numpy as np
def tidy(s: str) -> str:
return "\n".join(c.strip() for c in s.strip().split("\n")).strip()
class TestOperators(unittest.TestCase):
def test_oper_args(self) -> None:
"""
We will test test number of arguments for each operator 0, 1, 2, 3 etc.
"""
self.maxDiff = None
g = bmg.Graph()
c1 = g.add_constant_real(2.5)
c2 = g.add_constant_real(-1.5)
c3 = g.add_constant_probability(0.5)
c4 = g.add_constant_probability(0.6)
c5 = g.add_constant_probability(0.7)
c6 = g.add_constant_natural(23)
c7 = g.add_constant_bool(False)
c8 = g.add_constant_neg_real(-1.25)
c9 = g.add_constant_pos_real(1.25)
# add const matrices, operators on matrix to be added
g.add_constant_bool_matrix(np.array([[True, False], [False, True]]))
g.add_constant_real_matrix(np.array([[-0.1, 0.0], [2.0, -1.0]]))
g.add_constant_natural_matrix(np.array([[1, 2], [0, 999]]))
g.add_constant_pos_matrix(np.array([[0.1, 0.0], [2.0, 1.0]]))
g.add_constant_neg_matrix(np.array(([-0.3, -0.4])))
g.add_constant_probability_matrix(np.array([0.1, 0.9]))
g.add_constant_col_simplex_matrix(np.array([[0.1, 1.0], [0.9, 0.0]]))
with self.assertRaises(ValueError):
g.add_constant_neg_matrix(np.array([[0.1, 0.0], [2.0, -1.0]]))
with self.assertRaises(ValueError):
g.add_constant_pos_matrix(np.array([[0.1, 0.0], [2.0, -1.0]]))
with self.assertRaises(ValueError):
g.add_constant_col_simplex_matrix(np.array([[0.1, 0.0], [2.0, 1.0]]))
with self.assertRaises(ValueError):
g.add_constant_probability_matrix(np.array([1.1, 0.9]))
# test TO_REAL
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.TO_REAL, [])
g.add_operator(bmg.OperatorType.TO_REAL, [c4])
g.add_operator(bmg.OperatorType.TO_REAL, [c6])
g.add_operator(bmg.OperatorType.TO_REAL, [c8])
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.TO_REAL, [c4, c5])
# test EXP
# Exp needs exactly one operand
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.EXP, [])
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.EXP, [c2, c8])
# That operand must be real, negative real or positive real:
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.EXP, [c3]) # prob throws
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.EXP, [c6]) # natural throws
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.EXP, [c7]) # bool throws
g.add_operator(bmg.OperatorType.EXP, [c2]) # real OK
g.add_operator(bmg.OperatorType.EXP, [c8]) # neg_real OK
g.add_operator(bmg.OperatorType.EXP, [c9]) # pos_real OK
# test LOG
# Log needs exactly one operand:
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.LOG, [])
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.LOG, [c1, c2])
# That operand must be positive real or probability:
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.LOG, [c2])
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.LOG, [c6])
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.LOG, [c7])
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.LOG, [c8])
g.add_operator(bmg.OperatorType.LOG, [c3])
# test NEGATE
# Negate needs exactly one operand
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.NEGATE, [])
g.add_operator(bmg.OperatorType.NEGATE, [c2])
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.NEGATE, [c1, c2])
# Negate can take a real, negative real or positive real.
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.NEGATE, [c3])
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.NEGATE, [c6])
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.NEGATE, [c7])
g.add_operator(bmg.OperatorType.NEGATE, [c1])
g.add_operator(bmg.OperatorType.NEGATE, [c8])
# test COMPLEMENT
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.COMPLEMENT, [])
g.add_operator(bmg.OperatorType.COMPLEMENT, [c4])
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.COMPLEMENT, [c4, c4])
g.add_operator(bmg.OperatorType.COMPLEMENT, [c7])
# test MULTIPLY
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.MULTIPLY, [])
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.MULTIPLY, [c3])
g.add_operator(bmg.OperatorType.MULTIPLY, [c4, c5])
g.add_operator(bmg.OperatorType.MULTIPLY, [c3, c4, c5])
# test ADD
# Add requires two or more operands
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.ADD, [])
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.ADD, [c1])
# All operands must be (1) the same type, and (2)
# real, neg real or pos real.
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.ADD, [c1, c8])
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.ADD, [c3, c3])
g.add_operator(bmg.OperatorType.ADD, [c1, c2])
g.add_operator(bmg.OperatorType.ADD, [c1, c2, c1])
g.add_operator(bmg.OperatorType.ADD, [c8, c8, c8])
# test POW
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.POW, [])
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.POW, [c1])
with self.assertRaises(ValueError):
g.add_operator(bmg.OperatorType.POW, [c1, c1, c1])
g.add_operator(bmg.OperatorType.POW, [c1, c2])
observed = g.to_string()
expected = """
0: CONSTANT(real 2.5) (out nodes: 24, 30, 31, 31, 33)
1: CONSTANT(real -1.5) (out nodes: 19, 23, 30, 31, 33)
2: CONSTANT(probability 0.5) (out nodes: 22, 29)
3: CONSTANT(probability 0.6) (out nodes: 16, 26, 28, 29)
4: CONSTANT(probability 0.7) (out nodes: 28, 29)
5: CONSTANT(natural 23) (out nodes: 17)
6: CONSTANT(boolean 0) (out nodes: 27)
7: CONSTANT(negative real -1.25) (out nodes: 18, 20, 25, 32, 32, 32)
8: CONSTANT(positive real 1.25) (out nodes: 21)
9: CONSTANT(matrix<boolean> 1 0
0 1) (out nodes: )
10: CONSTANT(matrix<real> -0.1 0
2 -1) (out nodes: )
11: CONSTANT(matrix<natural> 1 2
0 999) (out nodes: )
12: CONSTANT(matrix<positive real> 0.1 0
2 1) (out nodes: )
13: CONSTANT(matrix<negative real> -0.3
-0.4) (out nodes: )
14: CONSTANT(matrix<probability> 0.1
0.9) (out nodes: )
15: CONSTANT(col_simplex_matrix<probability> 0.1 1
0.9 0) (out nodes: )
16: TO_REAL(3) (out nodes: )
17: TO_REAL(5) (out nodes: )
18: TO_REAL(7) (out nodes: )
19: EXP(1) (out nodes: )
20: EXP(7) (out nodes: )
21: EXP(8) (out nodes: )
22: LOG(2) (out nodes: )
23: NEGATE(1) (out nodes: )
24: NEGATE(0) (out nodes: )
25: NEGATE(7) (out nodes: )
26: COMPLEMENT(3) (out nodes: )
27: COMPLEMENT(6) (out nodes: )
28: MULTIPLY(3, 4) (out nodes: )
29: MULTIPLY(2, 3, 4) (out nodes: )
30: ADD(0, 1) (out nodes: )
31: ADD(0, 1, 0) (out nodes: )
32: ADD(7, 7, 7) (out nodes: )
33: POW(0, 1) (out nodes: )
"""
self.assertEqual(tidy(expected), tidy(observed))
def test_arithmetic(self) -> None:
g = bmg.Graph()
c1 = g.add_constant_natural(3)
o0 = g.add_operator(bmg.OperatorType.TO_REAL, [c1])
o1 = g.add_operator(bmg.OperatorType.NEGATE, [o0])
o2 = g.add_operator(bmg.OperatorType.EXP, [o1]) # positive real
o2_real = g.add_operator(bmg.OperatorType.TO_REAL, [o2])
o3 = g.add_operator(bmg.OperatorType.MULTIPLY, [o2_real, o0])
o4 = g.add_operator(bmg.OperatorType.EXPM1, [o0])
o5 = g.add_operator(bmg.OperatorType.ADD, [o0, o3, o4])
o6 = g.add_operator(bmg.OperatorType.POW, [o5, o0]) # real
# Verify that EXPM1 on a negative real is legal.
o7 = g.add_operator(bmg.OperatorType.NEGATE, [o2])
o8 = g.add_operator(bmg.OperatorType.EXPM1, [o7])
g.query(o6)
g.query(o8)
samples = g.infer(2)
# both samples should have exactly the same value since we are doing
# deterministic operators only
self.assertEqual(type(samples[0][0]), float)
self.assertEqual(samples[0][0], samples[1][0])
# the result should be identical to doing this math directly
const1 = 3.0
r6 = (const1 + math.exp(-const1) * const1 + math.expm1(const1)) ** const1
self.assertAlmostEqual(samples[0][0], r6, 3)
r8 = math.expm1(-math.exp(-const1))
self.assertAlmostEqual(samples[0][1], r8, 3)
def test_probability(self) -> None:
g = bmg.Graph()
c1 = g.add_constant_probability(0.8)
c2 = g.add_constant_probability(0.7)
o1 = g.add_operator(bmg.OperatorType.COMPLEMENT, [c1])
o2 = g.add_operator(bmg.OperatorType.MULTIPLY, [o1, c2])
g.query(o2)
samples = g.infer(2)
self.assertTrue(type(samples[0][0]), float)
self.assertAlmostEqual(samples[0][0], 0.14, 3)
def test_to_probability(self) -> None:
# We have some situations where we know that a real or positive
# real quantity is a probability but we cannot prove it. For
# example, 0.4 * beta_sample + 0.5 is definitely between 0.0 and
# 1.0, but we assume that the sum of two probabilities is a
# positive real.
#
# The to_probability operator takes a real or positive real and
# constrains it to the range (0.0, 1.0)
g = bmg.Graph()
c0 = g.add_constant_real(0.25)
c1 = g.add_constant_real(0.5)
c2 = g.add_constant_real(0.75)
o0 = g.add_operator(bmg.OperatorType.ADD, [c0, c1])
o1 = g.add_operator(bmg.OperatorType.TO_PROBABILITY, [o0])
o2 = g.add_operator(bmg.OperatorType.ADD, [c1, c2])
o3 = g.add_operator(bmg.OperatorType.TO_PROBABILITY, [o2])
g.query(o0)
g.query(o1)
g.query(o2)
g.query(o3)
samples = g.infer(1)
self.assertAlmostEqual(samples[0][0], 0.75, 3)
self.assertAlmostEqual(samples[0][1], 0.75, 3)
self.assertAlmostEqual(samples[0][2], 1.25, 3)
self.assertAlmostEqual(samples[0][3], 1.0, 3)
def test_to_neg_real(self) -> None:
# We have some situations where we know that a real quantity
# is negative but we cannot prove it. For example,
# log(0.4 * beta() + 0.5) is definitely negative but we
# assume that the sum of two probabilities is a positive real,
# and so the log is a real, not a negative real.
#
# The to_neg_real operator takes a real and constrains it to
# be negative.
g = bmg.Graph()
two = g.add_constant_pos_real(2.0)
beta = g.add_distribution(
bmg.DistributionType.BETA, bmg.AtomicType.PROBABILITY, [two, two]
)
s = g.add_operator(bmg.OperatorType.SAMPLE, [beta])
c4 = g.add_constant_probability(0.4)
c5 = g.add_constant_pos_real(0.5)
mult = g.add_operator(bmg.OperatorType.MULTIPLY, [c4, s])
tr = g.add_operator(bmg.OperatorType.TO_POS_REAL, [mult])
add = g.add_operator(bmg.OperatorType.ADD, [tr, c5]) # Positive real
lg = g.add_operator(bmg.OperatorType.LOG, [add]) # Real
tnr = g.add_operator(bmg.OperatorType.TO_NEG_REAL, [lg])
lme = g.add_operator(bmg.OperatorType.LOG1MEXP, [tnr])
ex = g.add_operator(bmg.OperatorType.EXP, [lme])
g.query(add)
g.query(lg)
g.query(tnr)
g.query(ex)
samples = g.infer(1, bmg.InferenceType.NMC)[0]
add_sample = samples[0]
lg_sample = samples[1]
tnr_sample = samples[2]
ex_sample = samples[3]
self.assertTrue(0.5 <= add_sample <= 0.9)
self.assertTrue(lg_sample <= 0.0)
self.assertEqual(lg_sample, tnr_sample)
self.assertAlmostEqual(ex_sample, 1.0 - add_sample, 3)
def test_sample(self) -> None:
# negative test we can't exponentiate the sample from a Bernoulli
g = bmg.Graph()
c1 = g.add_constant_probability(0.6)
d1 = g.add_distribution(
bmg.DistributionType.BERNOULLI, bmg.AtomicType.BOOLEAN, [c1]
)
s1 = g.add_operator(bmg.OperatorType.SAMPLE, [d1])
with self.assertRaises(ValueError) as cm:
o1 = g.add_operator(bmg.OperatorType.EXP, [s1])
self.assertTrue(
"operator EXP requires a neg_real, real or pos_real parent"
in str(cm.exception)
)
# the proper way to do it is to convert to floating point first
g = bmg.Graph()
c1 = g.add_constant_probability(0.6)
d1 = g.add_distribution(
bmg.DistributionType.BERNOULLI, bmg.AtomicType.BOOLEAN, [c1]
)
s1 = g.add_operator(bmg.OperatorType.SAMPLE, [d1])
o1 = g.add_operator(bmg.OperatorType.TO_REAL, [s1])
# o2 and o3 both compute the same value
o2 = g.add_operator(bmg.OperatorType.EXP, [o1])
o3 = g.add_operator(bmg.OperatorType.EXP, [o1])
# direcly negating o3 results in a NEG_REAL value
g.add_operator(bmg.OperatorType.NEGATE, [o3])
# converting o3 to REAL then applying negate results in REAL value
o3_real = g.add_operator(bmg.OperatorType.TO_REAL, [o3])
o4 = g.add_operator(bmg.OperatorType.NEGATE, [o3_real])
o2_real = g.add_operator(bmg.OperatorType.TO_REAL, [o2])
o5 = g.add_operator(bmg.OperatorType.ADD, [o2_real, o4])
# o5 should be 0 in all possible worlds
g.query(o5)
samples = g.infer(10)
self.assertEqual(type(samples[0][0]), float)
self.assertEqual(
[s[0] for s in samples], [0.0] * 10, "all samples should be zero"
)
| beanmachine-main | tests/graph/operator_test.py |
beanmachine-main | tests/graph/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import unittest
import numpy as np
from beanmachine import graph
class TestNMC(unittest.TestCase):
# see https://www.jstatsoft.org/article/view/v012i03/v12i03.pdf
def test_eight_schools(self):
# For each school, the average treatment effect and the standard deviation
DATA = [
(28.39, 14.9),
(7.94, 10.2),
(-2.75, 16.3),
(6.82, 11.0),
(-0.64, 9.4),
(0.63, 11.4),
(18.01, 10.4),
(12.16, 17.6),
]
# the expected mean and standard deviation of each random variable
EXPECTED = [
(11.1, 9.1),
(7.6, 6.6),
(5.7, 8.4),
(7.1, 7.0),
(5.1, 6.8),
(5.7, 7.3),
(10.4, 7.3),
(8.3, 8.4),
(7.6, 5.9), # overall mean
(6.7, 5.6), # overall std
]
g = graph.Graph()
zero = g.add_constant_real(0.0)
thousand = g.add_constant_pos_real(1000.0)
# overall_mean ~ Normal(0, 1000)
overall_mean_dist = g.add_distribution(
graph.DistributionType.NORMAL, graph.AtomicType.REAL, [zero, thousand]
)
overall_mean = g.add_operator(graph.OperatorType.SAMPLE, [overall_mean_dist])
# overall_std ~ HalfCauchy(1000)
# [note: the original paper had overall_std ~ Uniform(0, 1000)]
overall_std_dist = g.add_distribution(
graph.DistributionType.HALF_CAUCHY, graph.AtomicType.POS_REAL, [thousand]
)
overall_std = g.add_operator(graph.OperatorType.SAMPLE, [overall_std_dist])
# for each school we will add two random variables,
# but first we need to define a distribution
school_effect_dist = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[overall_mean, overall_std],
)
for treatment_mean_value, treatment_std_value in DATA:
# school_effect ~ Normal(overall_mean, overall_std)
school_effect = g.add_operator(
graph.OperatorType.SAMPLE, [school_effect_dist]
)
g.query(school_effect)
# treatment_mean ~ Normal(school_effect, treatment_std)
treatment_std = g.add_constant_pos_real(treatment_std_value)
treatment_mean_dist = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[school_effect, treatment_std],
)
treatment_mean = g.add_operator(
graph.OperatorType.SAMPLE, [treatment_mean_dist]
)
g.observe(treatment_mean, treatment_mean_value)
g.query(overall_mean)
g.query(overall_std)
means = g.infer_mean(3000, graph.InferenceType.NMC)
for idx, (mean, std) in enumerate(EXPECTED):
self.assertTrue(
abs(means[idx] - mean) < std * 0.5,
f"index {idx} expected {mean} +- {std*0.5} actual {means[idx]}",
)
# see https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Bivariate_case
# we are assuming zero mean here for simplicity
def test_bivariate_gaussian(self):
g = graph.Graph()
flat = g.add_distribution(
graph.DistributionType.FLAT, graph.AtomicType.REAL, []
)
x = g.add_operator(graph.OperatorType.SAMPLE, [flat])
y = g.add_operator(graph.OperatorType.SAMPLE, [flat])
x_sq = g.add_operator(graph.OperatorType.MULTIPLY, [x, x])
y_sq = g.add_operator(graph.OperatorType.MULTIPLY, [y, y])
x_y = g.add_operator(graph.OperatorType.MULTIPLY, [x, y])
SIGMA_X = 5.0
SIGMA_Y = 2.0
RHO = 0.7
x_sq_term = g.add_constant_real(-0.5 / (1 - RHO**2) / SIGMA_X**2)
g.add_factor(graph.FactorType.EXP_PRODUCT, [x_sq, x_sq_term])
y_sq_term = g.add_constant_real(-0.5 / (1 - RHO**2) / SIGMA_Y**2)
g.add_factor(graph.FactorType.EXP_PRODUCT, [y_sq, y_sq_term])
x_y_term = g.add_constant_real(RHO / (1 - RHO**2) / SIGMA_X / SIGMA_Y)
g.add_factor(graph.FactorType.EXP_PRODUCT, [x_y, x_y_term])
g.query(x)
g.query(x_sq)
g.query(y)
g.query(y_sq)
g.query(x_y)
# note: there are no observations, so this next line should have no effect
g.remove_observations()
means = g.infer_mean(10000, graph.InferenceType.NMC)
print("means", means) # only printed on error
self.assertTrue(abs(means[0] - 0.0) < 0.2, "mean of x should be 0")
self.assertTrue(
abs(means[1] - SIGMA_X**2) < 0.5, f"mean of x^2 should be {SIGMA_X**2}"
)
self.assertTrue(abs(means[2] - 0.0) < 0.2, "mean of y should be 0")
self.assertTrue(
abs(means[3] - SIGMA_Y**2) < 0.2, f"mean of y^2 should be {SIGMA_Y**2}"
)
post_cov = means[4] / math.sqrt(means[1]) / math.sqrt(means[3])
self.assertTrue(
abs(post_cov - RHO) < 0.2, f"covariance should be {RHO} is {post_cov}"
)
def test_probit_regression(self):
"""
x ~ Normal(0, 1)
y ~ Bernoulli(Phi(x))
P(Phi(x) | y = true) ~ Beta(2, 1)
P(Phi(x) | y = false) ~ Beta(1, 2)
"""
g = graph.Graph()
zero = g.add_constant_real(0.0)
one = g.add_constant_pos_real(1.0)
prior = g.add_distribution(
graph.DistributionType.NORMAL, graph.AtomicType.REAL, [zero, one]
)
x = g.add_operator(graph.OperatorType.SAMPLE, [prior])
phi_x = g.add_operator(graph.OperatorType.PHI, [x])
likelihood = g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [phi_x]
)
y = g.add_operator(graph.OperatorType.SAMPLE, [likelihood])
g.observe(y, True)
phi_x_sq = g.add_operator(graph.OperatorType.MULTIPLY, [phi_x, phi_x])
g.query(phi_x)
g.query(phi_x_sq)
means = g.infer_mean(10000, graph.InferenceType.NMC)
post_var = means[1] - means[0] ** 2
self.assertAlmostEqual(
means[0],
2 / (2 + 1),
msg=f"posterior mean {means[0]} is not accurate",
delta=0.01,
)
self.assertAlmostEqual(
post_var,
2 * 1 / (2 + 1) ** 2 / (2 + 1 + 1),
2,
f"posterior variance {post_var} is not accurate",
)
# now test P(Phi(x) | y = false) ~ Beta(1, 2)
g.remove_observations()
g.observe(y, False)
means = g.infer_mean(10000, graph.InferenceType.NMC)
post_var = means[1] - means[0] ** 2
self.assertAlmostEqual(
means[0], 1 / (1 + 2), 2, f"posterior mean {means[0]} is not accurate"
)
self.assertAlmostEqual(
post_var,
1 * 2 / (1 + 2) ** 2 / (1 + 2 + 1),
2,
f"posterior variance {post_var} is not accurate",
)
def test_clara_gp(self):
"""
CLARA-GP model
f() ~ GP(0, squared_exp_covar)
for each labeler l:
spec_l ~ Beta(SPEC_ALPHA, SPEC_BETA)
sens_l ~ Beta(SENS_ALPHA, SENS_BETA)
for each item i
violating_i ~ Bernoulli(Phi(f(i)))
for each labeler l
if violating_i
prob_i_l = sens_l
else
prob_i_l = 1 - spec_l
label_i_l ~ Bernoulli(prob_i_l)
"""
ALPHA = 1.0
RHO = 0.1
SENS_ALPHA = 9.0
SENS_BETA = 1.0
SPEC_ALPHA = 9.5
SPEC_BETA = 0.5
NUM_LABELERS = 2
SCORES = np.array([0.1, 0.2, 0.3])
ITEM_LABELS = [[False, False], [False, True], [True, True]]
# see https://mc-stan.org/docs/2_19/functions-reference/covariance.html for
# a reference on this covariance function
covar = ALPHA**2 * np.exp(
-((np.expand_dims(SCORES, 1) - SCORES) ** 2) / 2 / RHO**2
)
tau = np.linalg.inv(covar) # the precision matrix
g = graph.Graph()
# first we will create f ~ GP
flat = g.add_distribution(
graph.DistributionType.FLAT, graph.AtomicType.REAL, []
)
f = [g.add_operator(graph.OperatorType.SAMPLE, [flat]) for _ in SCORES]
for i in range(len(SCORES)):
tau_i_i = g.add_constant_real(-0.5 * tau[i, i])
g.add_factor(graph.FactorType.EXP_PRODUCT, [tau_i_i, f[i], f[i]])
for j in range(i + 1, len(SCORES)):
tau_i_j = g.add_constant_real(-1.0 * tau[i, j])
g.add_factor(graph.FactorType.EXP_PRODUCT, [tau_i_j, f[i], f[j]])
# for each labeler l:
# spec_l ~ Beta(SPEC_ALPHA, SPEC_BETA)
# sens_l ~ Beta(SENS_ALPHA, SENS_BETA)
spec_alpha = g.add_constant_pos_real(SPEC_ALPHA)
spec_beta = g.add_constant_pos_real(SPEC_BETA)
spec_prior = g.add_distribution(
graph.DistributionType.BETA,
graph.AtomicType.PROBABILITY,
[spec_alpha, spec_beta],
)
sens_alpha = g.add_constant_pos_real(SENS_ALPHA)
sens_beta = g.add_constant_pos_real(SENS_BETA)
sens_prior = g.add_distribution(
graph.DistributionType.BETA,
graph.AtomicType.PROBABILITY,
[sens_alpha, sens_beta],
)
spec, comp_spec, sens = [], [], []
for labeler in range(NUM_LABELERS):
spec.append(g.add_operator(graph.OperatorType.SAMPLE, [spec_prior]))
comp_spec.append(
g.add_operator(graph.OperatorType.COMPLEMENT, [spec[labeler]])
)
sens.append(g.add_operator(graph.OperatorType.SAMPLE, [sens_prior]))
# for each item i
for i, labels in enumerate(ITEM_LABELS):
# violating_i ~ Bernoulli(Phi(f(i)))
dist_i = g.add_distribution(
graph.DistributionType.BERNOULLI,
graph.AtomicType.BOOLEAN,
[g.add_operator(graph.OperatorType.PHI, [f[i]])],
)
violating_i = g.add_operator(graph.OperatorType.SAMPLE, [dist_i])
# for each labeler l
for lidx, label_val in enumerate(labels):
# if violating_i
# prob_i_l = sens_l
# else
# prob_i_l = 1 - spec_l
prob_i_l = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[violating_i, sens[lidx], comp_spec[lidx]],
)
# label_i_l ~ Bernoulli(prob_i_l)
dist_i_l = g.add_distribution(
graph.DistributionType.BERNOULLI,
graph.AtomicType.BOOLEAN,
[prob_i_l],
)
label_i_l = g.add_operator(graph.OperatorType.SAMPLE, [dist_i_l])
g.observe(label_i_l, label_val)
g.query(violating_i)
means = g.infer_mean(1000, graph.InferenceType.NMC)
self.assertLess(means[0], means[1])
self.assertLess(means[1], means[2])
def test_uncoupled_bools(self):
"""
X_1 ~ Bernoulli(0.5)
X_2 ~ Bernoulli(0.5)
P(X_1 == X_2) = 0.5
"""
g = graph.Graph()
half = g.add_constant_probability(0.5)
bernoulli = g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [half]
)
X_1 = g.add_operator(graph.OperatorType.SAMPLE, [bernoulli])
X_2 = g.add_operator(graph.OperatorType.SAMPLE, [bernoulli])
g.query(X_1)
g.query(X_2)
prob_equal = (
sum(x == y for (x, y) in g.infer(100000, graph.InferenceType.NMC)) / 100000
)
self.assertAlmostEqual(prob_equal, 0.5, delta=0.01)
def test_coupled_bools(self):
"""
X_1 ~ Bernoulli(0.5)
X_2 ~ Bernoulli(0.5)
sigma_1 = 1 if X_1 else -1
sigma_2 = 1 if X_2 else -1
target += sigma_1 * sigma_2
P(X_1 == X_2) = e / (e + e^-1)
"""
g = graph.Graph()
half = g.add_constant_probability(0.5)
bernoulli = g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [half]
)
X_1 = g.add_operator(graph.OperatorType.SAMPLE, [bernoulli])
X_2 = g.add_operator(graph.OperatorType.SAMPLE, [bernoulli])
plus_one = g.add_constant_real(1.0)
minus_one = g.add_constant_real(-1.0)
sigma_1 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE, [X_1, plus_one, minus_one]
)
sigma_2 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE, [X_2, plus_one, minus_one]
)
g.add_factor(graph.FactorType.EXP_PRODUCT, [sigma_1, sigma_2])
g.query(X_1)
g.query(X_2)
prob_equal = (
sum(x == y for (x, y) in g.infer(100000, graph.InferenceType.NMC)) / 100000
)
self.assertAlmostEqual(prob_equal, 0.88, delta=0.01)
@classmethod
def create_GPfactor(cls, bmg, alpha, rho, scores, mu=0.0):
# see https://mc-stan.org/docs/2_19/functions-reference/covariance.html for
# a reference on this covariance function
covar = alpha**2 * np.exp(
-((np.expand_dims(scores, 1) - scores) ** 2) / 2 / rho**2
)
tau = np.linalg.inv(covar) # the precision matrix
neg_mu = bmg.add_constant_real(-mu)
# f ~ GP
flat = bmg.add_distribution(
graph.DistributionType.FLAT, graph.AtomicType.REAL, []
)
f = [bmg.add_operator(graph.OperatorType.SAMPLE, [flat]) for _ in scores]
if mu == 0.0:
f_centered = f
else:
f_centered = [
bmg.add_operator(graph.OperatorType.ADD, [fi, neg_mu]) for fi in f
]
for i in range(len(scores)):
tau_i_i = bmg.add_constant_real(-0.5 * tau[i, i])
bmg.add_factor(
graph.FactorType.EXP_PRODUCT, [tau_i_i, f_centered[i], f_centered[i]]
)
for j in range(i + 1, len(scores)):
tau_i_j = bmg.add_constant_real(-1.0 * tau[i, j])
bmg.add_factor(
graph.FactorType.EXP_PRODUCT,
[tau_i_j, f_centered[i], f_centered[j]],
)
return f
@classmethod
def sum_negate_nodes(cls, bmg, in_nodes):
result = bmg.add_operator(
graph.OperatorType.NEGATE,
[
bmg.add_operator(
graph.OperatorType.TO_REAL,
[bmg.add_operator(graph.OperatorType.ADD, in_nodes)],
)
],
)
return result
def test_clara_gp_logit(self):
"""
CLARA-GP model with prev, sens, spec in logit space
f_prev() ~ GP(0, squared_exp_covar)
f_sens() ~ GP(logit(0.9), squared_exp_covar)
f_spec() ~ GP(logit(0.95), squared_exp_covar)
for each item i
log_prev_i = -log1pexp(-f_prev(i)) # log(prev_i)
log_comp_prev_i = -log1pexp(f_prev(i)) # log(1 - prev_i)
# assume all labeller share the same sens and spec
# so sens and spec only depends on score, indexed by i
log_spec_i = -log1pexp(-f_spec(i))
log_com_spec_i = -log1pexp(f_spec(i))
log_sens_i = -log1pexp(-f_sens(i))
log_comp_sens_i = -log1pexp(f_sens(i))
loglik1, loglik2 = log_prev_i, log_comp_prev_i
for each label
loglik1 += label_i_l ? log_sens_i : log_comp_sens_i
loglik2 += label_i_l ? log_comp_spec_i : log_spec_i
add factor:
logsumexp(loglik1, loglk2)
"""
ALPHA = 1.0
RHO = 0.1
SPEC_MU = 2.9 # logit(0.95)
SENS_MU = 2.2 # logit(0.9)
# NUM_LABELERS = 2
SCORES = np.array([0.1, 0.2, 0.3])
ITEM_LABELS = [[False, False], [False, True], [True, True]]
# create f ~ GP
g = graph.Graph()
f_prev = self.create_GPfactor(g, ALPHA, RHO, SCORES)
f_spec = self.create_GPfactor(g, ALPHA, RHO, SCORES, SPEC_MU)
f_sens = self.create_GPfactor(g, ALPHA, RHO, SCORES, SENS_MU)
# for each factor:
# -log(p) = lop1pexp(-f)
# -log(1-p) = log1pexp(f)
# note: the followings log_* are negative log probabilities,
# negate right before LOGSUMEXP
# for each item i
for i, labels in enumerate(ITEM_LABELS):
# in this test case, we assume labelers share the same spec and sens
log_spec = g.add_operator(
graph.OperatorType.LOG1PEXP,
[g.add_operator(graph.OperatorType.NEGATE, [f_spec[i]])],
)
log_comp_spec = g.add_operator(graph.OperatorType.LOG1PEXP, [f_spec[i]])
log_sens = g.add_operator(
graph.OperatorType.LOG1PEXP,
[g.add_operator(graph.OperatorType.NEGATE, [f_sens[i]])],
)
log_comp_sens = g.add_operator(graph.OperatorType.LOG1PEXP, [f_sens[i]])
log_prev = g.add_operator(
graph.OperatorType.LOG1PEXP,
[g.add_operator(graph.OperatorType.NEGATE, [f_prev[i]])],
)
log_comp_prev = g.add_operator(graph.OperatorType.LOG1PEXP, [f_prev[i]])
loglik1, loglik2 = [log_prev], [log_comp_prev]
# for each labeler l
for label_val in labels:
if label_val:
loglik1.append(log_sens)
loglik2.append(log_comp_spec)
else:
loglik1.append(log_comp_sens)
loglik2.append(log_spec)
loglik1 = self.sum_negate_nodes(g, loglik1)
loglik2 = self.sum_negate_nodes(g, loglik2)
g.add_factor(
graph.FactorType.EXP_PRODUCT,
[g.add_operator(graph.OperatorType.LOGSUMEXP, [loglik1, loglik2])],
)
g.query(f_prev[i])
means = g.infer_mean(1000, graph.InferenceType.NMC)
self.assertLess(means[0], means[1])
self.assertLess(means[1], means[2])
| beanmachine-main | tests/graph/nmc_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
from beanmachine import graph
class TestBayesNet(unittest.TestCase):
def test_simple_dep(self):
g = graph.Graph()
c1 = g.add_constant_col_simplex_matrix(np.array([0.8, 0.2]))
d1 = g.add_distribution(
graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c1]
)
g.add_operator(graph.OperatorType.SAMPLE, [d1])
def test_tabular(self):
g = graph.Graph()
c1 = g.add_constant_col_simplex_matrix(np.array([0.8, 0.2]))
# negative test
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, []
)
self.assertTrue("must be COL_SIMPLEX" in str(cm.exception))
g = graph.Graph()
c1 = g.add_constant_col_simplex_matrix(np.array([0.8, 0.2]))
var1 = g.add_operator(
graph.OperatorType.SAMPLE,
[
g.add_distribution(
graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c1]
)
],
)
var2 = g.add_operator(
graph.OperatorType.SAMPLE,
[
g.add_distribution(
graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c1]
)
],
)
# since the following has two parents it must have a tabular dist with
# 3 dimensions in the tensor
with self.assertRaises(ValueError) as cm:
g.add_operator(
graph.OperatorType.SAMPLE,
[
g.add_distribution(
graph.DistributionType.TABULAR,
graph.AtomicType.BOOLEAN,
[c1, var1, var2],
)
],
)
self.assertTrue("expected 4 dims got 1" in str(cm.exception))
c2 = g.add_constant_col_simplex_matrix(np.array([[0.6, 0.99], [0.4, 0.01]]))
g.add_distribution(
graph.DistributionType.TABULAR,
graph.AtomicType.BOOLEAN,
[c2, g.add_constant_bool(True)],
)
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.TABULAR,
graph.AtomicType.BOOLEAN,
[c2, g.add_constant_natural(1)],
)
self.assertTrue("only supports boolean parents" in str(cm.exception))
c3 = g.add_constant_real_matrix(np.array([1.1, -0.1]))
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c3]
)
self.assertTrue("must be COL_SIMPLEX" in str(cm.exception))
c4 = g.add_constant_col_simplex_matrix(np.array([0.6, 0.3, 0.1]))
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c4]
)
self.assertTrue("must have two rows" in str(cm.exception))
def test_bernoulli(self):
g = graph.Graph()
c1 = g.add_constant_probability(1.0)
c2 = g.add_constant_probability(0.8)
# negative tests on number of parents
# 0 parents not allowed
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, []
)
self.assertTrue(
"Bernoulli distribution must have exactly one parent" in str(cm.exception)
)
# 2 parents not allowed
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [c1, c2]
)
self.assertTrue(
"Bernoulli distribution must have exactly one parent" in str(cm.exception)
)
# 1 parent is OK
d1 = g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [c1]
)
# negative test on type of parent
c3 = g.add_constant_natural(1)
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [c3]
)
self.assertTrue("must be a probability" in str(cm.exception))
# negative test on value of parent
with self.assertRaises(ValueError) as cm:
g.add_constant_probability(1.1)
self.assertTrue("must be between 0 and 1" in str(cm.exception))
v1 = g.add_operator(graph.OperatorType.SAMPLE, [d1])
g.query(v1)
samples = g.infer(1)
self.assertEqual(type(samples[0][0]), bool)
self.assertTrue(samples[0][0])
means = g.infer_mean(1)
self.assertEqual(len(means), 1, "exactly one node queried")
def test_beta(self):
g = graph.Graph()
c1 = g.add_constant_pos_real(1.1)
c2 = g.add_constant_pos_real(5.0)
# negative tests on number of parents
# 0 parents not allowed
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, []
)
self.assertTrue(
"Beta distribution must have exactly two parents" in str(cm.exception)
)
# 1 parent not allowed
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [c1]
)
self.assertTrue(
"Beta distribution must have exactly two parents" in str(cm.exception)
)
# negative test on type of parent
c3 = g.add_constant_bool(True)
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [c3, c3]
)
self.assertTrue("must be positive real-valued" in str(cm.exception))
# negative test on sample type
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.BETA, graph.AtomicType.REAL, [c1, c2]
)
self.assertTrue("Beta produces probability samples" in str(cm.exception))
# 2 real-valued parents with probability sample type are OK
d1 = g.add_distribution(
graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [c1, c2]
)
# now let's draw some samples from the Beta distribution
v1 = g.add_operator(graph.OperatorType.SAMPLE, [d1])
g.query(v1)
samples = g.infer(1, graph.InferenceType.REJECTION)
self.assertEqual(type(samples[0][0]), float)
self.assertTrue(samples[0][0] > 0 and samples[0][0] < 1)
means = g.infer_mean(10000, graph.InferenceType.REJECTION)
self.assertAlmostEqual(means[0], 1.1 / (1.1 + 5.0), 2, "beta mean")
def test_binomial(self):
g = graph.Graph()
c1 = g.add_constant_natural(10)
c2 = g.add_constant_probability(0.55)
d1 = g.add_distribution(
graph.DistributionType.BINOMIAL, graph.AtomicType.NATURAL, [c1, c2]
)
v1 = g.add_operator(graph.OperatorType.SAMPLE, [d1])
g.query(v1)
samples = g.infer(1, graph.InferenceType.REJECTION)
self.assertEqual(type(samples[0][0]), int)
self.assertTrue(samples[0][0] <= 10)
means = g.infer_mean(10000, graph.InferenceType.REJECTION)
self.assertTrue(means[0] > 5 and means[0] < 6)
def test_categorical(self):
g = graph.Graph()
simplex = [0.5, 0.25, 0.125, 0.125]
c1 = g.add_constant_col_simplex_matrix(np.array(simplex))
# Negative test: Number of parents must be exactly one:
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.CATEGORICAL, graph.AtomicType.NATURAL, []
)
self.assertTrue(
"Categorical distribution must have exactly one parent" in str(cm.exception)
)
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.CATEGORICAL, graph.AtomicType.NATURAL, [c1, c1]
)
self.assertEqual(
"Categorical distribution must have exactly one parent", str(cm.exception)
)
# Negative test: parent must be simplex:
c3 = g.add_constant_natural(1)
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.CATEGORICAL, graph.AtomicType.NATURAL, [c3]
)
self.assertEqual(
"Categorical parent must be a one-column simplex", str(cm.exception)
)
# Negative test: type must be natural
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.CATEGORICAL, graph.AtomicType.REAL, [c1]
)
self.assertEqual(
"Categorical produces natural valued samples", str(cm.exception)
)
# Positive test:
d1 = g.add_distribution(
graph.DistributionType.CATEGORICAL, graph.AtomicType.NATURAL, [c1]
)
v1 = g.add_operator(graph.OperatorType.SAMPLE, [d1])
g.query(v1)
num_samples = 10000
# TODO: We use rejection sampling in this test because at present NMC
# does not support inference over naturals. If inference over discrete
# variables is important for BMG, we should create a Uniform Proposer
# similar to how it's done in Bean Machine proper.
samples = g.infer(
num_samples=num_samples,
algorithm=graph.InferenceType.REJECTION,
seed=123,
n_chains=1,
)[0]
# The distribution of the samples should closely match the simplex used to
# generate them.
histogram = [0, 0, 0, 0]
for sample in samples:
histogram[sample[0]] += 1
self.assertAlmostEqual(simplex[0], histogram[0] / num_samples, delta=0.01)
self.assertAlmostEqual(simplex[1], histogram[1] / num_samples, delta=0.01)
self.assertAlmostEqual(simplex[2], histogram[2] / num_samples, delta=0.01)
self.assertAlmostEqual(simplex[3], histogram[3] / num_samples, delta=0.01)
def _create_graph(self):
g = graph.Graph()
c1 = g.add_constant_col_simplex_matrix(np.array([0.8, 0.2]))
c2 = g.add_constant_col_simplex_matrix(np.array([[0.6, 0.99], [0.4, 0.01]]))
c3 = g.add_constant_col_simplex_matrix(
np.transpose(np.array([[1, 0], [0.2, 0.8], [0.1, 0.9], [0.01, 0.99]]))
)
Rain = g.add_operator(
graph.OperatorType.SAMPLE,
[
g.add_distribution(
graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c1]
)
],
)
Sprinkler = g.add_operator(
graph.OperatorType.SAMPLE,
[
g.add_distribution(
graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c2, Rain]
)
],
)
GrassWet = g.add_operator(
graph.OperatorType.SAMPLE,
[
g.add_distribution(
graph.DistributionType.TABULAR,
graph.AtomicType.BOOLEAN,
[c3, Sprinkler, Rain],
)
],
)
return g, Rain, Sprinkler, GrassWet
def test_query(self):
g, Rain, Sprinkler, GrassWet = self._create_graph()
g.query(Rain)
g.query(Sprinkler)
g.query(GrassWet)
g.infer(1)
p = g.add_constant_probability(0.8)
b = g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [p]
)
# Querying a constant is weird but allowed
g.query(p)
# But querying a distribution directly rather than a sample is
# illegal:
with self.assertRaises(ValueError) as cm:
g.query(b)
self.assertEqual(
f"Query of node_id {b} expected a node of type 1 or 3 but is 2",
str(cm.exception),
)
def test_to_dot(self):
self.maxDiff = None
g, Rain, Sprinkler, GrassWet = self._create_graph()
g.query(Rain)
g.query(Sprinkler)
g.query(GrassWet)
g.observe(GrassWet, True)
observed = g.to_dot()
expected = """
digraph "graph" {
N0[label="simplex"];
N1[label="simplex"];
N2[label="simplex"];
N3[label="Tabular"];
N4[label="~"];
N5[label="Tabular"];
N6[label="~"];
N7[label="Tabular"];
N8[label="~"];
N0 -> N3;
N1 -> N5;
N2 -> N7;
N3 -> N4;
N4 -> N5;
N4 -> N7;
N5 -> N6;
N6 -> N7;
N7 -> N8;
O0[label="Observation"];
N8 -> O0;
Q0[label="Query"];
N4 -> Q0;
Q1[label="Query"];
N6 -> Q1;
Q2[label="Query"];
N8 -> Q2;
}"""
self.assertEqual(expected.strip(), observed.strip())
def test_observe(self):
g, Rain, Sprinkler, GrassWet = self._create_graph()
g.observe(GrassWet, True)
with self.assertRaises(ValueError) as cm:
g.observe(GrassWet, True)
self.assertTrue("duplicate observe for node" in str(cm.exception))
g = graph.Graph()
c1 = g.add_constant_probability(1.0)
c2 = g.add_constant_probability(0.5)
o1 = g.add_operator(graph.OperatorType.MULTIPLY, [c1, c2])
d1 = g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [o1]
)
o2 = g.add_operator(graph.OperatorType.SAMPLE, [d1])
with self.assertRaises(ValueError) as cm:
g.observe(o1, True)
self.assertTrue(
"only SAMPLE and IID_SAMPLE nodes may be observed" in str(cm.exception)
)
g.observe(o2, True) # ok to observe this node
with self.assertRaises(ValueError) as cm:
g.observe(o2, False)
self.assertTrue("duplicate observe" in str(cm.exception))
g.remove_observations()
g.observe(o2, False)
def test_inference(self):
g, Rain, Sprinkler, GrassWet = self._create_graph()
g.observe(GrassWet, True)
qr = g.query(Rain)
g.query(GrassWet)
# Querying the same node twice is idempotent.
self.assertEqual(g.query(Rain), qr)
samples = g.infer(1)
self.assertTrue(len(samples) == 1)
# since we have observed grass wet is true the query should be true
self.assertEqual(type(samples[0][1]), bool)
self.assertTrue(samples[0][1])
# test parallel inference
samples_all = g.infer(num_samples=1, n_chains=2)
self.assertTrue(len(samples_all) == 2)
self.assertTrue(len(samples_all[0]) == 1)
self.assertTrue(len(samples_all[1]) == 1)
self.assertEqual(samples[0][0], samples_all[0][0][0])
self.assertEqual(samples[0][1], samples_all[0][0][1])
self.assertEqual(type(samples_all[1][0][0]), bool)
self.assertEqual(type(samples_all[1][0][1]), bool)
self.assertTrue(samples_all[1][0][1])
def test_infer_mean(self):
g = graph.Graph()
c1 = g.add_constant_probability(1.0)
op1 = g.add_operator(graph.OperatorType.MULTIPLY, [c1, c1])
d1 = g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [op1]
)
op2 = g.add_operator(graph.OperatorType.SAMPLE, [d1])
g.query(op1)
g.query(op2)
means = g.infer_mean(100)
self.assertAlmostEqual(means[0], 1.0)
self.assertAlmostEqual(means[1], 1.0)
# test parallel inference
means_all = g.infer_mean(num_samples=100, n_chains=2)
self.assertTrue(len(means_all) == 2)
self.assertAlmostEqual(means_all[0][0], 1.0)
self.assertAlmostEqual(means_all[0][1], 1.0)
self.assertAlmostEqual(means_all[1][0], 1.0)
self.assertAlmostEqual(means_all[1][1], 1.0)
def test_neg_real(self):
g = graph.Graph()
with self.assertRaises(ValueError) as cm:
g.add_constant_neg_real(1.25)
self.assertTrue("neg_real must be <=0" in str(cm.exception))
neg1 = g.add_constant_neg_real(-1.25)
expected = """
0: CONSTANT(negative real -1.25) (out nodes: )
"""
self.assertEqual(g.to_string().strip(), expected.strip())
add_negs = g.add_operator(graph.OperatorType.ADD, [neg1, neg1])
g.query(add_negs)
means = g.infer_mean(10)
self.assertAlmostEqual(means[0], -2.5)
samples = g.infer(10)
self.assertAlmostEqual(samples[0][0], -2.5)
def test_get_log_prob(self):
g, Rain, Sprinkler, GrassWet = self._create_graph()
g.observe(GrassWet, True)
g.query(Rain)
g.query(GrassWet)
conf = graph.InferConfig()
conf.keep_log_prob = True
g.infer(
num_samples=10,
algorithm=graph.InferenceType.GIBBS,
seed=123,
n_chains=2,
infer_config=conf,
)
log_probs = g.get_log_prob()
self.assertEqual(len(log_probs), 2)
self.assertEqual(len(log_probs[0]), 10)
def test_graph_stats(self):
g = graph.Graph()
c1 = g.add_constant_natural(10)
c2 = g.add_constant_probability(0.55)
d1 = g.add_distribution(
graph.DistributionType.BINOMIAL, graph.AtomicType.NATURAL, [c1, c2]
)
g.add_operator(graph.OperatorType.SAMPLE, [d1])
stats = g.collect_statistics()
self.maxDiff = None
expected = """
Graph Statistics Report
#######################
Number of nodes: 4
Number of edges: 3
Graph density: 0.25
Number of root nodes: 2
Number of terminal nodes: 1
Maximum no. of incoming edges into a node: 2
Maximum no. of outgoing edges from a node: 1
Node statistics:
################
CONSTANT: 2
\tRoot nodes: 2
\tConstant node statistics:
\t-------------------------
\t\tPROBABILITY and SCALAR: 1
\t\tNATURAL and SCALAR: 1
\t\tDistribution of incoming edges:
\t\t-------------------------------
\t\tNodes with 0 edges: 2
\t\tDistribution of outgoing edges:
\t\t-------------------------------
\t\tNodes with 1 edges: 2
DISTRIBUTION: 1
\tNo root or terminal nodes
\tDistribution node statistics:
\t-----------------------------
\t\tBINOMIAL: 1
\t\tDistribution of incoming edges:
\t\t-------------------------------
\t\tNodes with 2 edges: 1
\t\tDistribution of outgoing edges:
\t\t-------------------------------
\t\tNodes with 1 edges: 1
OPERATOR: 1
\tTerminal nodes: 1
\tOperator node statistics:
\t-------------------------
\t\tSAMPLE: 1
\t\tDistribution of incoming edges:
\t\t-------------------------------
\t\tNodes with 1 edges: 1
\t\tDistribution of outgoing edges:
\t\t-------------------------------
\t\tNodes with 0 edges: 1
Edge statistics:
################
\tDistribution of incoming edges:
\t-------------------------------
\tNodes with 0 edges: 2
\tNodes with 1 edges: 1
\tNodes with 2 edges: 1
\tDistribution of outgoing edges:
\t-------------------------------
\tNodes with 0 edges: 1
\tNodes with 1 edges: 3
"""
self.assertEqual(stats.strip(), expected.strip())
class TestContinuousModels(unittest.TestCase):
def test_product_distribution(self):
g = graph.Graph()
MEAN0 = -5.0
STD0 = 1.0
real0 = g.add_constant_real(MEAN0)
pos0 = g.add_constant_pos_real(STD0)
normal_dist0 = g.add_distribution(
graph.DistributionType.NORMAL, graph.AtomicType.REAL, [real0, pos0]
)
real1 = g.add_operator(graph.OperatorType.SAMPLE, [normal_dist0])
STD1 = 2.0
pos1 = g.add_constant_pos_real(STD1)
normal_dist1 = g.add_distribution(
graph.DistributionType.NORMAL, graph.AtomicType.REAL, [real1, pos1]
)
MEAN2 = 5.0
STD2 = 2.0
real2 = g.add_constant_real(MEAN2)
pos2 = g.add_constant_pos_real(STD2)
normal_dist2 = g.add_distribution(
graph.DistributionType.NORMAL, graph.AtomicType.REAL, [real2, pos2]
)
product_dist1 = g.add_distribution(
graph.DistributionType.PRODUCT,
graph.AtomicType.REAL,
[normal_dist1, normal_dist2],
)
product_sample1 = g.add_operator(graph.OperatorType.SAMPLE, [product_dist1])
product_sample2 = g.add_operator(graph.OperatorType.SAMPLE, [product_dist1])
product_sample3 = g.add_operator(graph.OperatorType.SAMPLE, [product_dist1])
g.observe(product_sample1, -1.0)
g.observe(product_sample2, 0.0)
g.observe(product_sample3, 1.0)
g.query(real1)
default_config = graph.InferConfig()
samples = g.infer(
num_samples=10000,
algorithm=graph.InferenceType.NMC,
seed=5123401,
n_chains=1,
infer_config=default_config,
)
chain = 0
variable = 0
values = [sample_tuple[variable] for sample_tuple in samples[chain]]
mean = sum(values) / len(values)
print(mean)
expected = -2.848 # obtained from the same test ran in C++
self.assertAlmostEqual(mean, expected, delta=0.1)
| beanmachine-main | tests/graph/graph_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import unittest
import numpy as np
from beanmachine import graph
class TestCAVI(unittest.TestCase):
def test_interface(self):
g = graph.Graph()
c1 = g.add_constant_probability(0.1)
d1 = g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [c1]
)
o1 = g.add_operator(graph.OperatorType.SAMPLE, [d1])
g.query(o1)
mean_vars = g.variational(100, 10, elbo_samples=100)
self.assertEqual(len(mean_vars), 1, "number of queries")
self.assertEqual(len(mean_vars[0]), 1, "each parameter must be mean")
elbo = g.get_elbo()
self.assertEqual(len(elbo), 100, "one ELBO value per iteration")
mean_vars = g.variational(100, 10) # elbo_samples=0 by default
elbo = g.get_elbo()
self.assertEqual(len(elbo), 0, "ELBO not computed unless requested")
def build_graph1(self):
"""
o1 ~ Bernoulli( 0.1 )
o2 ~ Bernoulli( exp( - o1 ) )
infer P(o1 | o2 = True)
now, P(o1 = T, o2 = T) = 0.1 * exp(-1) = 0.036787944117144235
and, P(o1 = F, o2 = T) = 0.9 * exp(0) = 0.9
=> P(o1 = True | o2 = True) = 0.03927030055005057
also P(o2 = True) = 0.9367879441171443 >= ELBO
"""
g = graph.Graph()
c1 = g.add_constant_probability(0.1)
d1 = g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [c1]
)
o1 = g.add_operator(graph.OperatorType.SAMPLE, [d1])
c2 = g.add_constant_col_simplex_matrix(
np.array([[0.0, 1 - math.exp(-1)], [1.0, math.exp(-1)]])
)
d2 = g.add_distribution(
graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c2, o1]
)
o2 = g.add_operator(graph.OperatorType.SAMPLE, [d2])
g.observe(o2, True)
g.query(o1)
return g
def test_cavi1(self):
g = self.build_graph1()
means = g.variational(1, 1, elbo_samples=100)
self.assertAlmostEqual(0.039, means[0][0], 2, "posterior mean")
elbo = g.get_elbo()
self.assertGreater(math.log(0.94), elbo[0], "ELBO")
def test_gibbs1(self):
g = self.build_graph1()
samples = g.infer(1000, graph.InferenceType.GIBBS)
means = np.array(samples, dtype=float).mean(axis=0)
self.assertGreater(means[0].item(), 0.03)
self.assertLess(means[0].item(), 0.05)
def build_graph2(self):
"""
This is a simplified noisy-or model
X ~ Bernoulli(0.01)
Y ~ Bernoulli(0.01)
Z ~ Bernoulli(1 - exp( log(0.99) + log(0.01)*X + log(0.01)*Y ))
Note: the last line is equivalent to:
Z ~ BernoulliNoisyOr( - ( log(0.99) + log(0.01)*X + log(0.01)*Y ) )
OR
Z ~ BernoulliNoisyOr( -log(0.99) + (-log(0.01))*X + (-log(0.01))*Y ) )
query (X, Y) observe Z = True
X Y P(X, Y, Z=T) P(X, Y | Z=T)
---------------------------------
F F 0.009801 0.3322
F T 0.009802 0.3322
T F 0.009802 0.3322
T T 0.0000999901 0.0034
P(Z=T) = 0.029505, thus ELBO <= log(.029505) = -3.5232
Let Q(X) = Q(Y) = Bernoulli(q); The KL-Divergence as a function of q is:
kl = lambda q: (1-q)**2 * (2*log(1-q)-log(.3322))
+ 2*q*(1-q)*(log(q)+log(1-q)-log(.3322)) + q**2 * (2*log(q)-log(.0034))
KL Divergence is minimized at q=0.245, and kl(.245) = .2635
And max ELBO = log P(Z=T) - kl(.245) = -3.7867
"""
g = graph.Graph()
c_prior = g.add_constant_probability(0.01)
d_prior = g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [c_prior]
)
x = g.add_operator(graph.OperatorType.SAMPLE, [d_prior])
y = g.add_operator(graph.OperatorType.SAMPLE, [d_prior])
pos_x = g.add_operator(graph.OperatorType.TO_POS_REAL, [x])
pos_y = g.add_operator(graph.OperatorType.TO_POS_REAL, [y])
c_m_log_pt01 = g.add_constant_pos_real(-(math.log(0.01)))
c_m_log_pt99 = g.add_constant_pos_real(-(math.log(0.99)))
param = g.add_operator(
graph.OperatorType.ADD,
[
c_m_log_pt99,
g.add_operator(graph.OperatorType.MULTIPLY, [c_m_log_pt01, pos_x]),
g.add_operator(graph.OperatorType.MULTIPLY, [c_m_log_pt01, pos_y]),
],
)
d_like = g.add_distribution(
graph.DistributionType.BERNOULLI_NOISY_OR, graph.AtomicType.BOOLEAN, [param]
)
z = g.add_operator(graph.OperatorType.SAMPLE, [d_like])
g.observe(z, True)
g.query(x)
g.query(y)
return g
def test_gibbs2(self):
g = self.build_graph2()
samples = np.array(g.infer(10000, graph.InferenceType.GIBBS), dtype=float)
x_marginal = samples.mean(axis=0)[0]
y_marginal = samples.mean(axis=0)[1]
x_y_joint = (samples[:, 0] * samples[:, 1]).mean()
self.assertAlmostEqual(
x_marginal, y_marginal, 1, "posterior marginal of x and y are nearly equal"
)
self.assertAlmostEqual(x_marginal, 0.33, 1, "posterior x is 0.33")
self.assertLess(x_y_joint, 0.01, "joint posterior of x and y < 0.01")
def test_cavi2(self):
g = self.build_graph2()
means = g.variational(100, 1000, elbo_samples=1000)
self.assertAlmostEqual(
means[0][0], means[1][0], 1, "X and Y have same variational posterior"
)
self.assertAlmostEqual(means[0][0], 0.245, 1, "X posterior is ?")
elbo = g.get_elbo()
self.assertAlmostEqual(elbo[-1], -3.7867, 1, "ELBO converged")
| beanmachine-main | tests/graph/cavi_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__version__ = "0.2.0"
| beanmachine-main | src/beanmachine/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
The file contains MiniBM, a minimal implementation of Bean Machine PPL with a Metropolis
Hastings implementation and a coin flipping model at the end. It is standalone, in that
MiniBM does not depend on the Bean Machine framework at all. The only two dependencies
for MiniBM are the PyTorch library and tqdm (for progress bar).
"""
from __future__ import annotations
import itertools
import random
from collections import defaultdict
from functools import wraps
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple
import torch
import torch.distributions as dist
from tqdm.auto import tqdm
class RVIdentifier(NamedTuple):
"""
A struct whose attributes uniquely identifies a random variable in Bean Machine.
Args:
wrapper: A reference to the decorated random variable function
args: Arguments taken by the
"""
wrapper: Callable
args: Tuple
@property
def function(self):
"""A pointer to the original function that returns the distribution object"""
return self.wrapper.__wrapped__ # calls the original function
def random_variable(f: Callable[Any, dist.Distribution]):
"""A decorator that convert a Python function that returns a distribution into a
function that evaluates to a Bean Machine random variable.
In Bean Machine, a @random_variable function can be used in two ways:
1. When being invoked outside of an inference scope, it returns an RVIdentifier
without evaluating the original function.
2. During inference, or when being invoked from another random variable, the
function will update the graph (if needed) and return its value at state of
inference.
For example::
@random_variable
def foo():
return dist.Normal(0., 1.0)
print(foo()) # RVIdentifier(wrapper=foo, args=())
@random_variable
def bar():
mean = foo() # evaluates to value of theta() during inference
return dist.Normal(mean, 1.0)
Args:
f: A function that returns a PyTorch Distribution object
"""
@wraps(f)
def wrapper(*args):
rvid = RVIdentifier(wrapper, args)
# Bean Machine inference methods use the World class to store and control the
# state of inference
world = get_world_context()
if world is None:
# We're not in an active inference. Return an ID for the random variable
return rvid
else:
# Update the graph and return current value of the random variable in world
return world.update_graph(rvid)
return wrapper
RVDict = Dict[RVIdentifier, torch.Tensor] # alias for typing
WORLD_STACK: List[World] = []
def get_world_context() -> Optional[World]:
"""Returns the active World (if any) or None"""
return WORLD_STACK[-1] if WORLD_STACK else None
class World:
"""
A World is Bean Machine's internal representation of a state of the model. At the
high level, it stores a value for each of the random variables. It can also be used
as a context manager to control the behavior of random variables. For example::
@random_variable
def foo():
return dist.Normal(0., 1.0)
@random_variable
def bar():
return dist.Normal(foo(), 1.0)
# initialize world and add bar() and its ancesters to it
world = World.initialize_world([bar()])
world[bar()] # returns the value of bar() in world
world[foo()] # since foo() is bar()'s parent, it is also initialized in world
# World is also used within inference as a context manager to control the
# behavior of random variable
with world:
foo() # returns the value of foo() in world, which equals to world[foo()]
"""
def __init__(self, observations: Optional[RVDict] = None):
self.observations: RVDict = observations or {}
self.variables: RVDict = {}
def __getitem__(self, node: RVIdentifier) -> torch.Tensor:
return self.variables[node]
def __enter__(self) -> World:
WORLD_STACK.append(self)
return self
def __exit__(self, *args) -> None:
WORLD_STACK.pop()
def update_graph(self, node: RVIdentifier) -> torch.Tensor:
"""Update the graphy by adding node to self (if needed) and retuurn the value
of node in self."""
if node not in self.variables:
# parent nodes will be invoked when calling node.get_distribution
distribution = self.get_distribution(node)
if node in self.observations:
self.variables[node] = self.observations[node]
else:
self.variables[node] = distribution.sample()
return self.variables[node]
def replace(self, values: RVDict) -> World:
"""Return a new world where the values of the random variables are replaced by
the provided values"""
new_world = World(self.observations)
new_world.variables = {**self.variables, **values}
return new_world
def log_prob(self) -> torch.Tensor:
"""Return the joint log prob on all random variables in the world"""
log_prob = torch.tensor(0.0)
for node, value in self.variables.items():
distribution = self.get_distribution(node)
log_prob += distribution.log_prob(value).sum()
return log_prob
def get_distribution(self, node: RVIdentifier) -> dist.Distribution:
"""A utility method that activate the current world and invoke the function
associated with node. Bean Machine requires random variable functions to return
a distribution object, so this method will also return a distribution object."""
with self:
return node.function(*node.args)
@staticmethod
def initialize_world(
queries: List[RVIdentifier], observations: Optional[RVDict] = None
) -> World:
"""Initializes and returns a new world. Starting from the queries and
observations, the parent nodes will be added recursively to the world."""
observations = observations or {}
world = World(observations)
for node in itertools.chain(queries, observations):
world.update_graph(node)
return world
class MetropolisHastings:
"""A naive implementation of the `Metropolis-Hastings algorithm
<https://en.wikipedia.org/wiki/Metropolis%E2%80%93Hastings_algorithm>`_"""
def infer(
self,
queries: List[RVIdentifier],
observations: Optional[RVDict],
num_samples: int,
) -> RVDict:
world = World.initialize_world(queries, observations)
samples = defaultdict(list)
# the main inference loop
for _ in tqdm(range(num_samples)):
latent_nodes = world.variables.keys() - world.observations.keys()
random.shuffle(latent_nodes)
# randomly select a node to be updated at a time
for node in latent_nodes:
proposer_distribution = world.get_distribution(node)
new_value = proposer_distribution.sample()
new_world = world.replace({node: new_value})
backward_distribution = new_world.get_distribution(node)
# log P(x, y)
old_log_prob = world.log_prob()
# log P(x', y)
new_log_prob = new_world.log_prob()
# log g(x'|x)
forward_log_prob = proposer_distribution.log_prob(new_value).sum()
# log g(x|x')
backward_log_prob = backward_distribution.log_prob(world[node]).sum()
accept_log_prob = (
new_log_prob + backward_log_prob - old_log_prob - forward_log_prob
)
if torch.bernoulli(accept_log_prob.exp().clamp(max=1)):
# accept the new state
world = new_world
# collect the samples before moving to the next iteration
for node in queries:
samples[node].append(world[node])
# stack the list of tensors into a single tensor
samples = {node: torch.stack(samples[node]) for node in samples}
return samples
def main():
# coin fliping model adapted from our tutorial
# (https://beanmachine.org/docs/overview/tutorials/Coin_flipping/CoinFlipping/)
@random_variable
def weight():
return dist.Beta(2, 2)
@random_variable
def y():
return dist.Bernoulli(weight()).expand((N,))
# data generation
true_weight = 0.75
true_y = dist.Bernoulli(true_weight)
N = 100
y_obs = true_y.sample((N,))
print("Head rate:", y_obs.mean())
# running inference
samples = MetropolisHastings().infer([weight()], {y(): y_obs}, num_samples=500)
print("Estimated weight of the coin:", samples[weight()].mean())
if __name__ == "__main__":
main()
| beanmachine-main | src/beanmachine/minibm.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch.distributions import Distribution
from . import experimental
from .diagnostics import Diagnostics
from .diagnostics.common_statistics import effective_sample_size, r_hat, split_r_hat
from .diagnostics.tools import viz
from .inference import (
CompositionalInference,
empirical,
GlobalHamiltonianMonteCarlo,
GlobalNoUTurnSampler,
seed,
simulate,
SingleSiteAncestralMetropolisHastings,
SingleSiteHamiltonianMonteCarlo,
SingleSiteNewtonianMonteCarlo,
SingleSiteNoUTurnSampler,
SingleSiteRandomWalk,
SingleSiteUniformMetropolisHastings,
)
from .model import (
functional,
get_beanmachine_logger,
param,
random_variable,
RVIdentifier,
)
LOGGER = get_beanmachine_logger()
# TODO(@neerajprad): Remove once T81756389 is fixed.
Distribution.set_default_validate_args(False)
__all__ = [
"CompositionalInference",
"Diagnostics",
"GlobalHamiltonianMonteCarlo",
"GlobalNoUTurnSampler",
"Predictive",
"RVIdentifier",
"SingleSiteAncestralMetropolisHastings",
"SingleSiteHamiltonianMonteCarlo",
"SingleSiteNewtonianMonteCarlo",
"SingleSiteNoUTurnSampler",
"SingleSiteRandomWalk",
"SingleSiteUniformMetropolisHastings",
"effective_sample_size",
"empirical",
"experimental",
"functional",
"seed",
"param",
"r_hat",
"random_variable",
"simulate",
"split_r_hat",
"viz",
]
| beanmachine-main | src/beanmachine/ppl/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| beanmachine-main | src/beanmachine/ppl/experimental/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from enum import Enum
from typing import Callable
from beanmachine.ppl.inference.proposer.nnc import nnc_jit
class TorchJITBackend(Enum):
NONE = "none"
NNC = "nnc"
INDUCTOR = "inductor"
# TODO (T135789755): update the API to select between backends when we move this
# integration out of experimental.
def get_backend(
nnc_compile: bool, experimental_inductor_compile: bool
) -> TorchJITBackend:
"""A helper function to select between the Torch JIT backends based on the
flags"""
if experimental_inductor_compile:
if nnc_compile:
warnings.warn(
"Overriding nnc_compile option with experimental_inductor_compile",
stacklevel=3,
)
warnings.warn(
"The support of TorchInductor is experimental and the API is "
"subject to change in the future releases of Bean Machine. For "
"questions regarding TorchInductor, please see "
"https://github.com/pytorch/torchdynamo.",
stacklevel=3,
)
return TorchJITBackend.INDUCTOR
elif nnc_compile:
return TorchJITBackend.NNC
else:
return TorchJITBackend.NONE
def inductor_jit(f: Callable) -> Callable:
"""
A helper function that lazily imports the TorchInductor utils and the
related libraries, then invoke functorch to JIT compile the provided
function.
"""
# Lazily import related libraries so users don't have them (e.g. from using
# an older version of PyTorch) won't run into ModuleNotFound error when
# importing Bean Machine
from functorch.compile import aot_function
from torch._inductor.compile_fx import compile_fx_inner
from torch._inductor.decomposition import select_decomp_table
return aot_function(f, compile_fx_inner, decompositions=select_decomp_table())
def jit_compile(f: Callable, backend: TorchJITBackend) -> Callable:
if backend is TorchJITBackend.NNC:
return nnc_jit(f)
elif backend is TorchJITBackend.INDUCTOR:
return inductor_jit(f)
else:
# Fall back to use PyTorch
return f
| beanmachine-main | src/beanmachine/ppl/experimental/torch_jit_backend.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import gpytorch as gpt
import torch
from botorch.models.gpytorch import GPyTorchModel
from botorch.posteriors.gpytorch import GPyTorchPosterior
class SimpleGP(gpt.models.ExactGP, GPyTorchModel):
"""
GPytorch model that supports Bean Machine sampling and broadcasting semantics.
In train mode, BM priors may be specified over GP parameters. In eval mode,
this objects acts as a Gpytorch model and generates predictions using Gpytorch's
prediction strategies. For an example, see the [tutorial](link:TODO)
"""
def __init__(self, x_train, y_train, mean, kernel, likelihood, *args, **kwargs):
super().__init__(x_train, y_train, likelihood)
self.mean = mean
self.kernel = kernel
def forward(self, data, *args, **kwargs):
"""
Default forward definining a GP prior. Should be overridden by child class.
"""
mean = self.mean(data)
cov = self.kernel(data)
return gpt.distributions.MultivariateNormal(mean, cov)
def bm_load_samples(self, rv_dict):
"""
Loads tensors from a dict keyed on module name and valued by tensor
whose shape is (num_samples, sample_shape). See `~gpytorch.Module.initialize`.
:param rv_dict: Python dict keyed on module name and valued by tensor
whose shape is (num_samples, sample_shape)
"""
self.pyro_load_from_samples(rv_dict)
class BoTorchGP(SimpleGP, GPyTorchModel):
"""
Experimental module that is compatible with BoTorch.
samples = nuts.infer(queries, obs, num_samples).get_chain(0)
gp.eval()
gp.bm_load_samples({kernel.lengthscale=samples[lengthscale_prior()]})
from botorch.acquisition.objective import IdentityMCObjective
acqf = get_acquisition_function("qEI", gp, IdentityMCObjective(), x_train)
new_point = acqf(new_input).mean()
"""
def __init__(self, x_train, y_train, *args, **kwargs):
super().__init__(x_train, y_train, *args, **kwargs)
if y_train.dim() > 1:
self._num_outputs = y_train.shape[-1]
else:
self._num_outputs = 1
def posterior(self, data, observation_noise=False, **kwargs):
"""
Returns the posterior conditioned on new data. Used in BoTorch.
See `~botorch.models.model.Model.posterior`.
:param data: a `torch.Tensor` containing test data of shape `(batch, data_dim)`.
:returns: `~botorch.posteriors.gpytorch.GPytorchPosterior` MultivariateNormal
distribution.
"""
self.eval()
try:
mvn = self(data, batch_shape=(data.shape[0],))
except AttributeError as e:
raise AttributeError(
"Running in eval mode but one of the parameters is still"
"a BM random variable. Did you `bm_load_samples`? \n" + str(e)
)
if observation_noise is not False:
if torch.is_tensor(observation_noise):
# TODO: Make sure observation noise is transformed correctly
self._validate_tensor_args(X=data, Y=observation_noise)
if observation_noise.shape[-1] == 1:
observation_noise = observation_noise.squeeze(-1)
mvn = self.likelihood(mvn, data, noise=observation_noise)
else:
mvn = self.likelihood(mvn, data)
return GPyTorchPosterior(mvn=mvn)
| beanmachine-main | src/beanmachine/ppl/experimental/gp/models.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict
import beanmachine.ppl as bm
import gpytorch
from beanmachine.ppl import RVIdentifier
from beanmachine.ppl.world import get_world_context
def _trace_bm(module, name_to_rv=None, is_tracing=True, memo=None, prefix=""):
"Adapted from https://github.com/cornellius-gp/gpytorch/blob/master/gpytorch/module.py#L470"
if memo is None:
memo = set()
if name_to_rv is None:
name_to_rv = {}
if hasattr(module, "_priors"):
for prior_name, (prior, closure, setting_closure) in module._priors.items():
if prior is not None and prior not in memo:
if setting_closure is None:
raise RuntimeError(
"Cannot perform fully Bayesian inference without a setting_closure for each prior,"
f" but the following prior had none: {prior_name}, {prior}."
)
memo.add(prior_name)
prior = prior.expand(closure(module).shape)
rv_name = prefix + ("." if prefix else "") + prior_name
if is_tracing:
# tracing pass, no enclosing World
def f():
return prior
f.__name__ = rv_name
rv = bm.random_variable(f)
name_to_rv[rv_name] = rv()
else:
# sampling pass, must be enclosed by World
world = get_world_context()
assert (
world is not None
), "Expected enclosing World context for bm.random_variable priors"
value = world.update_graph(name_to_rv[rv_name])
setting_closure(module, value)
for mname, module_ in module.named_children():
submodule_prefix = prefix + ("." if prefix else "") + mname
_, child_name_to_rv = _trace_bm(
module=module_,
name_to_rv=name_to_rv,
is_tracing=is_tracing,
memo=memo,
prefix=submodule_prefix,
)
name_to_rv.update(child_name_to_rv)
return module, name_to_rv
def make_prior_random_variables(
module: gpytorch.module.Module,
) -> Dict[str, RVIdentifier]:
"""
Recurses through `module` and its childrens' `._priors`, creating `bm.random_variable`s
for each prior. Returns a map from prior names to `random_variable`s.
"""
return _trace_bm(module, name_to_rv=None, is_tracing=True)[1]
def bm_sample_from_prior(
model: gpytorch.module.Module,
name_to_rv: Dict[str, RVIdentifier],
) -> gpytorch.module.Module:
"""
Samples from `model` with parameters drawn by invoking the
`random_variable` to their prior in `name_to_rv`.
"""
return _trace_bm(model, name_to_rv, is_tracing=False)[0]
| beanmachine-main | src/beanmachine/ppl/experimental/gp/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| beanmachine-main | src/beanmachine/ppl/experimental/causal_inference/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| beanmachine-main | src/beanmachine/ppl/experimental/causal_inference/models/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional, Union
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.exceptions import (
TreeStructureError,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.mutation import (
GrowMutation,
PruneMutation,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.node import (
LeafNode,
SplitNode,
)
class Tree:
"""
Encapsulates a tree structure where each node is either a nonterminal `SplitNode` or a terminal `LeafNode`.
This class consists of methods to track and modify overall tree structure.
Args:
nodes: List of nodes comprising the tree.
"""
def __init__(self, nodes: List[Union[LeafNode, SplitNode]]):
self._nodes = nodes
def num_nodes(self) -> int:
"""
Returns the total number of nodes in the tree.
"""
return len(self._nodes)
def leaf_nodes(self) -> List[LeafNode]:
"""
Returns a list of all of the leaf nodes in the tree.
"""
return [node for node in self._nodes if isinstance(node, LeafNode)]
def growable_leaf_nodes(self, X: torch.Tensor) -> List[LeafNode]:
"""
List of all leaf nodes in the tree which can be grown in a non-degenerate way
i.e. such that not all values in the column of the covariate matrix are duplicates
conditioned on the rules of that node.
Args:
X: Input / covariate matrix.
"""
return [node for node in self.leaf_nodes() if node.is_growable(X)]
def num_growable_leaf_nodes(self, X: torch.Tensor) -> int:
"""
Returns the number of nodes which can be grown in the tree.
"""
return len(self.growable_leaf_nodes(X))
def split_nodes(self) -> List[SplitNode]:
"""
List of internal `SplitNode`s in the tree.
"""
return [node for node in self._nodes if isinstance(node, SplitNode)]
def prunable_split_nodes(self) -> List[SplitNode]:
"""
List of decision nodes in the tree that are suitable for pruning
i.e., `SplitNode`s` that have two terminal `LeafNode` children
"""
return [node for node in self.split_nodes() if node.is_prunable()]
def num_prunable_split_nodes(self) -> int:
"""
Number of prunable split nodes in tree.
"""
return len(self.prunable_split_nodes())
def predict(self, X: torch.Tensor) -> torch.Tensor:
"""
Generate a set of predictions with the same dimensionality as the target array
Note that the prediction is from one tree, so represents only (1 / number_of_trees) of the target.
"""
prediction = torch.zeros((len(X), 1), dtype=torch.float)
for leaf in self.leaf_nodes():
prediction[leaf.composite_rules.condition_on_rules(X)] = leaf.predict()
return prediction
def mutate(self, mutation: Union[GrowMutation, PruneMutation]) -> None:
"""
Apply a change to the structure of the tree.
Args:
mutation: The mutation to apply to the tree.
Only grow and prune mutations are accepted.
"""
if isinstance(mutation, PruneMutation):
self._remove_node(mutation.old_node.left_child)
self._remove_node(mutation.old_node.right_child)
self._remove_node(mutation.old_node)
self._add_node(mutation.new_node)
elif isinstance(mutation, GrowMutation):
self._remove_node(mutation.old_node)
self._add_node(mutation.new_node)
self._add_node(mutation.new_node.left_child)
self._add_node(mutation.new_node.right_child)
else:
raise TreeStructureError("Only Grow and Prune mutations are valid.")
for node in self._nodes:
if node.right_child == mutation.old_node:
node._right_child = mutation.new_node
if node.left_child == mutation.old_node:
node._left_child = mutation.new_node
def _remove_node(self, node: Optional[Union[LeafNode, SplitNode]] = None) -> None:
"""
Remove a single node from the tree non-recursively.
Only drops the node and not any children.
"""
if node is not None:
self._nodes.remove(node)
def _add_node(self, node: Optional[Union[LeafNode, SplitNode]] = None) -> None:
"""
Add a node to the tree non-recursively.
Only adds the node and does not link it to any node.
"""
if node is not None:
self._nodes.append(node)
| beanmachine-main | src/beanmachine/ppl/experimental/causal_inference/models/bart/tree.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC
from dataclasses import dataclass
from typing import Union
from beanmachine.ppl.experimental.causal_inference.models.bart.exceptions import (
GrowError,
PruneError,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.node import (
LeafNode,
SplitNode,
)
@dataclass
class Mutation(ABC):
"""
A data class for storing the nodes before and after a mutation to a tree.
These mutations are applied to traverse the space of tree structures. The possible mutations considered here are:
- **Grow**: Where a `LeafNode` of the tree is split based on a decision rule, turning it into an internal `SplitNode`.
- **Prune**: Where an internal `SplitNode` with only terminal children is converted into a `LeafNode`.
These steps constitute the Grow-Prune approach of Pratola [1] where the additional steps of
BART (Change and Swap) are eliminated.
Reference:
[1] Pratola MT, Chipman H, Higdon D, McCulloch R, Rust W (2013). “Parallel Bayesian Additive Regression Trees.”
Technical report, University of Chicago.
https://arxiv.org/pdf/1309.1906.pdf
Args:
old_node: The node before mutation.
new_node: The node after mutation.
"""
__slots__ = ["old_node", "new_node"]
def __init__(
self,
old_node: Union[SplitNode, LeafNode],
new_node: Union[SplitNode, LeafNode],
):
self.old_node = old_node
self.new_node = new_node
@dataclass
class PruneMutation(Mutation):
"""Encapsulates the prune action where an internal `SplitNode` with only terminal children
is converted into a `LeafNode`.
Args:
old_node: The node before mutation.
new_node: The node after mutation.
"""
def __init__(self, old_node: SplitNode, new_node: LeafNode):
"""
Raises:
PruneError: if the prune mutation is invalid.
"""
if not isinstance(old_node, SplitNode) or not old_node.is_prunable():
raise PruneError("Pruning only valid on prunable SplitNodes")
if not isinstance(new_node, LeafNode):
raise PruneError("Pruning can only create a LeafNode")
super().__init__(old_node, new_node)
@dataclass
class GrowMutation(Mutation):
"""Encapsulates the grow action where a `LeafNode` of the tree is split based on a decision rule,
turning it into an internal `SplitNode`.
Args:
old_node: The node before mutation.
new_node: The node after mutation.
"""
def __init__(self, old_node: LeafNode, new_node: SplitNode):
"""
Raises:
GrowError: if the grow mutation is invalid.
"""
if not isinstance(old_node, LeafNode):
raise GrowError("Can only grow LeafNodes")
if not isinstance(new_node, SplitNode):
raise GrowError("Growing a LeafNode turns it into a SplitNode")
super().__init__(old_node, new_node)
| beanmachine-main | src/beanmachine/ppl/experimental/causal_inference/models/bart/mutation.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABCMeta, abstractmethod
import torch
from .tree import Tree
class TreeProposer(metaclass=ABCMeta):
@abstractmethod
def propose(self, tree: Tree, X: torch.Tensor) -> Tree:
raise NotImplementedError
| beanmachine-main | src/beanmachine/ppl/experimental/causal_inference/models/bart/tree_proposer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| beanmachine-main | src/beanmachine/ppl/experimental/causal_inference/models/bart/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from collections import Counter
from math import log
from typing import cast, List, NamedTuple, Optional, Tuple
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.node import LeafNode
from beanmachine.ppl.experimental.causal_inference.models.bart.scalar_samplers import (
LeafMean,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.split_rule import (
Operator,
SplitRule,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.tree import Tree
from torch import multinomial
class CutPoint(NamedTuple):
dim: int
cut_val: float
class SortedInvariants(NamedTuple):
O_: torch.Tensor
uniq_vals: List[List[float]]
val_counts: List[Counter]
class GrowFromRootTreeProposer:
"""
Implements the "Grow-from-root" backfitting algorithm as described in [1].
Reference:
[1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees"
https://arxiv.org/abs/1810.02215
"""
def __init__(self):
self.num_cuts = None
self.num_null_cuts = None
def propose(
self,
X: torch.Tensor,
partial_residual: torch.Tensor,
m: int,
w: torch.Tensor,
sigma_val: float,
leaf_sampler: LeafMean,
alpha: float,
beta: float,
root_node: LeafNode,
num_cuts: int,
num_null_cuts: int,
) -> Tuple[Tree, torch.Tensor]:
"""
Propose a new tree and modified Dirichlet weights based on the grow-from-root algorithm [1].
Reference:
[1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees"
https://arxiv.org/abs/1810.02215
Args:
X: Training data / covariate matrix of shape (num_observations, input_dimensions).
partial_residual: Residual vector of shape (num_observations, 1).
m: Number of input dimensions / variables to sample. This is usually a subset of the total number of input dimensions in the input data.
w: Vector of weights or probabilities of picking an input dimension.
sigma_val: Current value of noise staqndard deviation.
leaf_sampler: A sampler to sample the posterior distribution of leaf means.
alpha: Hyperparameter controlling the prior probability of a node being terminal as discussed in [1].
beta: Hyperparameter controlling the prior probability of a node being terminal as discussed in [1].
root_node: Root of the tree to grow.
num_cuts: Number of cuts to make along each dimensions.
num_null_cuts: Weighting given to the no-split cut along each dimension as discussed in [1].
"""
if num_cuts <= 0:
raise ValueError("num_cuts has to be nonnegative")
self.num_cuts = num_cuts
if num_null_cuts <= 0 or num_null_cuts >= num_cuts:
raise ValueError(
"num_null_cuts has to be greater than or equal to 1 and lesser than total number of cuts"
)
self.num_null_cuts = num_null_cuts
O_ = self._presort(X)
uniq_vals, val_counts = self._get_uniq_elems(X, O_)
root_invariants = SortedInvariants(
O_=O_, uniq_vals=uniq_vals, val_counts=val_counts
)
all_leaf_nodes = []
variable_counts = [0 for _ in range(X.shape[-1])]
self._grow_from_root(
current_node=root_node,
X=X,
partial_residual=partial_residual,
invariants=root_invariants,
m=m,
w=w,
sigma_val=sigma_val,
leaf_sampler=leaf_sampler,
alpha=alpha,
beta=beta,
all_leaf_nodes=all_leaf_nodes,
variable_counts=variable_counts,
)
out_tree = Tree(nodes=all_leaf_nodes)
return out_tree, torch.Tensor(variable_counts)
def _presort(self, X: torch.Tensor) -> torch.Tensor:
"""
Presort the input data to generate the O matrix as discussed in section 3.2 [1].
Reference:
[1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees"
https://arxiv.org/abs/1810.02215
Args:
X: Training data / covariate matrix of shape (num_observations, input_dimensions).
"""
num_observations, num_dims = X.shape
O_ = torch.sort(X, 0)[-1]
return torch.transpose(O_, dim0=0, dim1=1)
def _get_uniq_elems(self, X: torch.Tensor, O_: torch.Tensor) -> Tuple[list, list]:
"""
Get the unique values along every input dimension and the counts for each unique value.
Args:
X: Training data / covariate matrix of shape (num_observations, input_dimensions).
O_: Index matrix of shape (input_dimensions, num_observations) contained the indexes
of input data sorted along each dimension.
"""
num_dims, num_observations = O_.shape
uniq_vals = []
val_counts = []
for inp_dim in range(num_dims):
dim_uniq_vals = []
value_counter = Counter()
for obs in range(num_observations):
current_val = X[O_[inp_dim, obs], inp_dim].item()
if obs == 0 or (current_val > X[O_[inp_dim, obs - 1], inp_dim]):
dim_uniq_vals.append(current_val)
value_counter[current_val] += 1
uniq_vals.append(dim_uniq_vals)
val_counts.append(value_counter)
return uniq_vals, val_counts
def _grow_from_root(
self,
current_node: LeafNode,
X: torch.Tensor,
partial_residual: torch.Tensor,
invariants: SortedInvariants,
m: int,
w: torch.Tensor,
sigma_val: float,
leaf_sampler: LeafMean,
alpha: float,
beta: float,
all_leaf_nodes: List[LeafNode],
variable_counts: List[int],
):
"""
Implement the recursive grow-from-root strategy proposed in [1].
Reference:
[1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees"
https://arxiv.org/abs/1810.02215
Args:
current_node: The node being mutated.
X: Training data / covariate matrix of shape (num_observations, input_dimensions).
partial_residual: Residual vector of shape (num_observations, 1).
invariants: The sorted index matrix and unique values and unique counts used to maintain sorted order.
m: Number of input dimensions / variables to sample. This is usually a subset of the total number of input dimensions in the input data.
w: Vector of weights or probabilities of picking an input dimension.
sigma_val: Current value of noise staqndard deviation.
leaf_sampler: A sampler to sample the posterior distribution of leaf means.
alpha: Hyperparameter controlling the prior probability of a node being terminal as discussed in [1].
beta: Hyperparameter controlling the prior probability of a node being terminal as discussed in [1].
all_leaf_nodes: All the laf nodes of the grown tree.
variable_counts: The number of time each input dimensions / variable has been split while growing this tree.
"""
dims_to_sample = self._sample_variables(m=m, w=w)
cut_points = self._select_cutpoints(
candidate_dims=dims_to_sample, uniq_vals=invariants.uniq_vals
)
sampled_cut_point = self._sample_cut_point(
candidate_cut_points=cut_points,
invariants=invariants,
partial_residual=partial_residual,
sigma_val=sigma_val,
leaf_sampler=leaf_sampler,
current_node=current_node,
alpha=alpha,
beta=beta,
)
if sampled_cut_point is None:
current_node.val = leaf_sampler.sample_posterior(
X=X, y=partial_residual, current_sigma_val=sigma_val, node=current_node
)
all_leaf_nodes.append(current_node)
return
variable_counts[sampled_cut_point.dim] += 1
left_rule, right_rule = SplitRule(
grow_dim=sampled_cut_point.dim,
grow_val=sampled_cut_point.cut_val,
operator=Operator.le,
), SplitRule(
grow_dim=sampled_cut_point.dim,
grow_val=sampled_cut_point.cut_val,
operator=Operator.gt,
)
new_node = LeafNode.grow_node(
current_node, left_rule=left_rule, right_rule=right_rule
)
left_invariants, right_invariants = self._sift(
X=X, cut_point=sampled_cut_point, invariants=invariants
)
self._grow_from_root(
current_node=cast(LeafNode, new_node.left_child),
X=X,
partial_residual=partial_residual,
invariants=left_invariants,
m=m,
w=w,
sigma_val=sigma_val,
leaf_sampler=leaf_sampler,
alpha=alpha,
beta=beta,
all_leaf_nodes=all_leaf_nodes,
variable_counts=variable_counts,
)
self._grow_from_root(
current_node=cast(LeafNode, new_node.right_child),
X=X,
partial_residual=partial_residual,
invariants=right_invariants,
m=m,
w=w,
sigma_val=sigma_val,
leaf_sampler=leaf_sampler,
alpha=alpha,
beta=beta,
all_leaf_nodes=all_leaf_nodes,
variable_counts=variable_counts,
)
def _sample_variables(self, m: int, w: torch.Tensor) -> List[int]:
"""
Sample a subset of input dimensions to split on as discussed in section 3.4 of [1].
Reference:
[1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees"
https://arxiv.org/abs/1810.02215.
Note:
The number of sampled variables are set to min(m, count_nonzero(w)).
Args:
m: number of dimensions to sample, corresponding to 'm' in [1].
w: Vector of weights of picking an input dimension.
"""
m = cast(int, min(m, torch.count_nonzero(w).item()))
return [
_.item() for _ in multinomial(input=w, num_samples=m, replacement=False)
]
def _select_cutpoints(
self,
candidate_dims: List[int],
uniq_vals: List[List[float]],
) -> List[CutPoint]:
"""
Select cutpoints along every dimension.
Args:
candidate_dims: Dimensions that are being split along.
uniq_vals: Unique values along every dimension.
"""
candidate_cuts = []
for inp_dim in candidate_dims:
# check for degeneracy
if len(uniq_vals[inp_dim]) < 2:
continue
if len(uniq_vals[inp_dim]) <= self.num_cuts:
skip_val_freq = 1
elif self.num_cuts == 1:
skip_val_freq = len(
uniq_vals[inp_dim]
) # just select the first val if only 1 cut required
else:
skip_val_freq = math.floor(
(len(uniq_vals[inp_dim]) - 2) / (self.num_cuts - 1)
)
curr_id = 0
# all uniq vals except last get added to the bag
while curr_id < (len(uniq_vals[inp_dim]) - 1):
candidate_cuts.append(
CutPoint(dim=inp_dim, cut_val=uniq_vals[inp_dim][curr_id])
)
curr_id += skip_val_freq
return candidate_cuts
def _sample_cut_point(
self,
candidate_cut_points: List[CutPoint],
partial_residual: torch.Tensor,
invariants: SortedInvariants,
sigma_val: float,
leaf_sampler: LeafMean,
current_node: LeafNode,
alpha: float,
beta: float,
) -> Optional[CutPoint]:
"""
Select a sample cut point by using sampling probabilities calculated in eq. (4) of [1].
Args:
candidate_cut_points: DCut points to sample from.
partial_residual: Residual vector of shape (num_observations, 1).
invariants: The sorted index matrix and unique values and unique counts used to maintain sorted order.
sigma_val: Current value of noise standard deviation.
leaf_sampler: A sampler to sample the posterior distribution of leaf means.
current_node: The node being mutated.
alpha: Hyperparameter controlling the prior probability of a node being terminal as discussed in [1].
beta: Hyperparameter controlling the prior probability of a node being terminal as discussed in [1].
"""
if len(candidate_cut_points) == 0:
return None
selection_log_likelihoods = []
selection_probabs = []
total_num_observations = invariants.O_.shape[-1]
total_residual = torch.sum(partial_residual[invariants.O_[0]]).item()
tau = leaf_sampler.prior_scale**2
sigma2 = sigma_val**2
MAX_LOG_LIKELIHOOD = -float("inf")
def _integrated_log_likelihood(
num_observations: int,
residual: float,
) -> float:
log_likelihood = +0.5 * log(
(sigma2) / (sigma2 + tau * num_observations)
) + 0.5 * (tau * (residual**2)) / (
(sigma2) * (sigma2 + tau * num_observations)
)
return log_likelihood
kappa = self.num_null_cuts * (
(math.pow((1 + current_node.depth), beta) / alpha) - 1
)
null_log_likelihood = (
_integrated_log_likelihood(
num_observations=total_num_observations, residual=total_residual
)
+ log(kappa)
+ log(len(candidate_cut_points))
)
if null_log_likelihood > MAX_LOG_LIKELIHOOD:
MAX_LOG_LIKELIHOOD = null_log_likelihood
selection_log_likelihoods.append(null_log_likelihood)
current_O_id_, current_uniq_val_id_ = 0, 0
residuals_le_cutpoint, num_obs_le_cutpoint = [], []
for cut_id, cut_point in enumerate(candidate_cut_points):
current_residual = 0.0
current_num_obs = 0
if cut_id == 0 or cut_point.dim != candidate_cut_points[cut_id - 1].dim:
residuals_le_cutpoint = []
num_obs_le_cutpoint = []
current_O_id_ = 0
current_uniq_val_id_ = 0
else:
current_residual += residuals_le_cutpoint[-1]
current_num_obs += num_obs_le_cutpoint[-1]
while (
invariants.uniq_vals[cut_point.dim][current_uniq_val_id_]
<= cut_point.cut_val
):
num_ties = invariants.val_counts[cut_point.dim][
invariants.uniq_vals[cut_point.dim][current_uniq_val_id_]
]
current_num_obs += num_ties
for _ in range(num_ties):
current_residual += partial_residual[
invariants.O_[cut_point.dim, current_O_id_]
].item()
current_O_id_ += 1
current_uniq_val_id_ += 1
residuals_le_cutpoint.append(current_residual)
num_obs_le_cutpoint.append(current_num_obs)
cut_point_log_likelihood = _integrated_log_likelihood(
num_observations=current_num_obs,
residual=current_residual,
) + _integrated_log_likelihood(
num_observations=(total_num_observations - current_num_obs),
residual=(total_residual - current_residual),
)
if cut_point_log_likelihood > MAX_LOG_LIKELIHOOD:
MAX_LOG_LIKELIHOOD = cut_point_log_likelihood
selection_log_likelihoods.append(cut_point_log_likelihood)
# turn it into likelihoods
sum_ = 0.0
for log_likelihood in selection_log_likelihoods:
likelihood = math.exp(log_likelihood - MAX_LOG_LIKELIHOOD)
sum_ += likelihood
selection_probabs.append(likelihood)
selection_probabs = torch.tensor([_ / sum_ for _ in selection_probabs])
sampled_cut_id = cast(
int, multinomial(input=selection_probabs, num_samples=1).item()
)
if sampled_cut_id == 0:
# no split
return None
return candidate_cut_points[sampled_cut_id - 1]
def _sift(
self, X: torch.Tensor, invariants: SortedInvariants, cut_point: CutPoint
) -> Tuple[SortedInvariants, SortedInvariants]:
"""
Sift all data into left and right partitions to maintain sorted order during recursion.
Args:
X: Training data / covariate matrix of shape (num_observations, input_dimensions).
invariants: The sorted index matrix and unique values and unique counts used to maintain sorted order.
cut_point: The cut point to split along.
"""
num_dims, num_observations = invariants.O_.shape
O_left, O_right = [], []
uniq_vals_left, uniq_vals_right = [], []
val_counts_left, val_counts_right = [], []
for dim in range(num_dims):
dim_O_left, dim_O_right = [], []
dim_uniq_vals_left, dim_uniq_vals_right = [], []
dim_val_counts_left, dim_val_counts_right = Counter(), Counter()
for col in range(num_observations):
obs_id = invariants.O_[dim, col].item()
curr_observation_dim_val = X[obs_id, dim].item()
if X[obs_id, cut_point.dim] <= cut_point.cut_val:
dim_O_left.append(obs_id)
if (
len(dim_uniq_vals_left) == 0
or dim_uniq_vals_left[-1] != curr_observation_dim_val
):
dim_uniq_vals_left.append(curr_observation_dim_val)
dim_val_counts_left[curr_observation_dim_val] += 1
else:
dim_O_right.append(obs_id)
if (
len(dim_uniq_vals_right) == 0
or dim_uniq_vals_right[-1] != curr_observation_dim_val
):
dim_uniq_vals_right.append(curr_observation_dim_val)
dim_val_counts_right[curr_observation_dim_val] += 1
O_left.append(dim_O_left)
O_right.append(dim_O_right)
uniq_vals_left.append(dim_uniq_vals_left)
uniq_vals_right.append(dim_uniq_vals_right)
val_counts_left.append(dim_val_counts_left)
val_counts_right.append(dim_val_counts_right)
left_invariants = SortedInvariants(
O_=torch.tensor(O_left),
uniq_vals=uniq_vals_left,
val_counts=val_counts_left,
)
right_invariants = SortedInvariants(
O_=torch.tensor(O_right),
uniq_vals=uniq_vals_right,
val_counts=val_counts_right,
)
return left_invariants, right_invariants
| beanmachine-main | src/beanmachine/ppl/experimental/causal_inference/models/bart/grow_from_root_tree_proposer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import enum
from dataclasses import dataclass
from typing import List, Optional
import torch
class Operator(enum.Enum):
le = "less than equal to"
gt = "greater than"
@dataclass(eq=True)
class SplitRule:
"""
A representation of a split in feature space as a result of a decision node node growing to a leaf node.
Args:
grow_dim: The dimension used for the split.
grow_val: The value used for splitting.
operator: The relational operation used for the split. The two operators considered are
"less than or equal" for the left child and "greater than" for the right child.
"""
__slots__ = ["grow_dim", "grow_val", "operator"]
def __init__(
self,
grow_dim: int,
grow_val: float,
operator: Operator,
):
self.grow_dim = grow_dim
self.grow_val = grow_val
self.operator = operator
class DimensionalRule:
"""
Represents the range of values along one dimension of the input which passes a rule.
For example, if input is X = [x1, x2] then a dimensional rule for x1 could be
x1 in [3, 4 , 5...20] representing the rule 3 < x1 <=20 (assuming x1 is an integer).
Args:
grow_dim: The dimension used for the rule.
min_val: The minimum value of grow_dim which satisfies the rule (exclusive i.e. min_val fails the rule).
max_val: The maximum value of grow_dim which satisfies the rule (inclusive i.e. max_val passes the rule).
"""
def __init__(self, grow_dim: int, min_val: float, max_val: float):
self.grow_dim = grow_dim
self.min_val, self.max_val = min_val, max_val
def add_rule(self, new_rule: SplitRule) -> "DimensionalRule":
"""Add a rule to the dimension. If the rule is less restrictive than an existing rule, nothing changes.
Args:
new_rule: The new rule to add.
"""
if self.grow_dim != new_rule.grow_dim:
raise ValueError("New rule grow dimension does not match")
if new_rule.operator == Operator.gt and new_rule.grow_val > self.min_val:
return DimensionalRule(self.grow_dim, new_rule.grow_val, self.max_val)
elif new_rule.operator == Operator.le and new_rule.grow_val < self.max_val:
return DimensionalRule(self.grow_dim, self.min_val, new_rule.grow_val)
else:
# new rule is already covered by existing rule
return self
class CompositeRules:
"""
Represents a composition of `DimensionalRule`s along multiple dimensions of input.
For example, if input is X = [x1, x2] then a composite rule could be
x1 in [3, 4 , 5...20] and x2 in [-inf..-10] representing the rule 3 < x1 <=20
(assuming x1 is an integer) and x2<= -10.
Note:
CompositeRules arre immutable and all changes to them return copies with the desired modification.
Args:
all_dims: All dimensions which have rules.
all_split_rules: All rules corresponding to each dimension in `all_dims`.
"""
def __init__(
self, all_dims: List[int], all_split_rules: Optional[List[SplitRule]] = None
):
self.dimensional_rules = {
dim: DimensionalRule(dim, -float("inf"), float("inf")) for dim in all_dims
}
if all_split_rules is None:
self.all_split_rules = []
else:
self.all_split_rules = all_split_rules
for split_rule in self.all_split_rules:
self.dimensional_rules[split_rule.grow_dim] = self.dimensional_rules[
split_rule.grow_dim
].add_rule(split_rule)
if len(self.all_split_rules) > 0:
self.grow_dim = self.all_split_rules[-1].grow_dim
else:
self.grow_dim = None
def condition_on_rules(self, X: torch.Tensor) -> torch.Tensor:
"""Condition the input on a composite rule and get a mask such that X[mask]
satisfies the rule.
Args:
X: Input / covariate matrix.
"""
mask = torch.ones(len(X), dtype=torch.bool)
for dim in self.dimensional_rules.keys():
mask = (
mask
& (X[:, dim].gt(self.dimensional_rules[dim].min_val))
& (X[:, dim].le(self.dimensional_rules[dim].max_val))
)
return mask
def add_rule(self, new_rule: SplitRule) -> "CompositeRules":
"""Add a split rule to the composite ruleset. Returns a copy of `CompositeRules`"""
if new_rule.grow_dim not in self.dimensional_rules.keys():
raise ValueError(
"The dimension of new split rule is outside the scope of the composite rule"
)
return CompositeRules(
list(self.dimensional_rules.keys()), self.all_split_rules + [new_rule]
)
def most_recent_split_rule(self) -> Optional[SplitRule]:
"""Returns the most recent split_rule added. Returns None if no rules were applied."""
if len(self.all_split_rules) == 0:
return None
else:
return self.all_split_rules[-1]
| beanmachine-main | src/beanmachine/ppl/experimental/causal_inference/models/bart/split_rule.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
class TreeStructureError(Exception):
"""Base class for errors related to tree structure"""
pass
class PruneError(TreeStructureError):
"""Raised for errors in pruning operations on a tree such as trying to prune
a root node or trying to prune a node which would has non-terminal children."""
pass
class GrowError(TreeStructureError):
"""Raised for errors in growing a tree such as trying to grow from a node along
an input dimension which has no unique values."""
pass
class NotInitializedError(AttributeError):
"""Raised for errors in accessing model attributes which have not been initialized
for example trying to predict a model which has not been trained."""
pass
| beanmachine-main | src/beanmachine/ppl/experimental/causal_inference/models/bart/exceptions.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# For now supports only ordered numeric variables
from __future__ import annotations
import math
from copy import deepcopy
from typing import cast, List, Optional, Tuple
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.exceptions import (
NotInitializedError,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.grow_from_root_tree_proposer import (
GrowFromRootTreeProposer,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.grow_prune_tree_proposer import (
GrowPruneTreeProposer,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.node import LeafNode
from beanmachine.ppl.experimental.causal_inference.models.bart.scalar_samplers import (
LeafMean,
NoiseStandardDeviation,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.split_rule import (
CompositeRules,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.tree import Tree
from torch.distributions.dirichlet import Dirichlet
from tqdm.auto import trange
class BART:
"""Bayesian Additive Regression Trees (BART) are Bayesian sum of trees models [1] Default parameters are taken from [1].
Reference:
[1] Hugh A. Chipman, Edward I. George, Robert E. McCulloch (2010). "BART: Bayesian additive regression trees"
https://projecteuclid.org/journals/annals-of-applied-statistics/volume-4/issue-1/BART-Bayesian-additive-regression-trees/10.1214/09-AOAS285.full
Args:
num_trees: Number of trees.
alpha: Parameter used in the tree depth prior, Eq. 7 of [1].
beta: Parameter used in the tree depth prior, Eq. 7 of [1].
k: Parameter used in the u_i_j prior, Eq. 8 of [1].
sigma_concentration: Concentration parameter (alpha) for the inverse gamma distribution prior of p(sigma).
sigma_rate: Rate parameter (beta) for the inverse gamma distribution prior of p(sigma).
num_burn: Number of samples burned-in.
tree_sampler: The tree sampling method used.
num_sample: Number of samples to collect.
p_grow: Probability of tree growth. Used by the tree sampler.
random_state: Random state used to seed.
"""
def __init__(
self,
num_trees: int = 200,
alpha: float = 0.95,
beta: float = 2.0,
k: float = 2.0,
noise_sd_concentration: float = 3.0,
noise_sd_rate: float = 1.0,
tree_sampler: Optional[GrowPruneTreeProposer] = None,
random_state: Optional[int] = None,
):
self.num_trees = num_trees
self.num_samples = None
self.all_tree_predictions = None
self._all_trees = None
self.leaf_mean = None
self.k = k
self.alpha = alpha
self.beta = beta
if noise_sd_concentration <= 0 or noise_sd_rate <= 0:
raise ValueError("Invalid specification of noise_sd distribution priors")
self.noise_sd_concentration = noise_sd_concentration
self.noise_sd_rate = noise_sd_rate
self.sigma = NoiseStandardDeviation(
prior_concentration=self.noise_sd_concentration,
prior_rate=self.noise_sd_rate,
)
self.samples = None
self.X = None
self.y = None
self.y_min = None
self.y_max = None
if random_state is not None:
torch.manual_seed(random_state)
if tree_sampler is None:
self.tree_sampler = GrowPruneTreeProposer(grow_probability=0.5)
elif isinstance(tree_sampler, GrowPruneTreeProposer):
self.tree_sampler = tree_sampler
else:
NotImplementedError("tree_sampler not implemented")
if isinstance(self.tree_sampler, GrowPruneTreeProposer):
self._step = self._grow_prune_step
else:
NotImplementedError(
"step function not defined"
) # this should never be raised
def fit(
self,
X: torch.Tensor,
y: torch.Tensor,
num_samples: int = 1000,
num_burn: int = 250,
) -> BART:
"""Fit the training data and learn the parameters of the model.
Args:
X: Training data / covariate matrix of shape (num_observations, input_dimensions).
y: Response vector of shape (num_observations, 1).
"""
self.num_samples = num_samples
self._load_data(X, y)
self.samples = {"trees": [], "sigmas": []}
self.leaf_mean = LeafMean(
prior_loc=0.0, prior_scale=0.5 / (self.k * math.sqrt(self.num_trees))
)
self._init_trees(X)
for iter_id in trange(num_burn + num_samples):
trees, sigma = self._step()
self._all_trees = trees
if iter_id >= num_burn:
self.samples["trees"].append(trees)
self.samples["sigmas"].append(sigma)
return self
def _load_data(self, X: torch.Tensor, y: torch.Tensor):
"""
Load the training data. The response is scaled to [-1, 1] as per [1].
Reference:
[1] Hugh A. Chipman, Edward I. George, Robert E. McCulloch (2010). "BART: Bayesian additive regression trees"
https://projecteuclid.org/journals/annals-of-applied-statistics/volume-4/issue-1/BART-Bayesian-additive-regression-trees/10.1214/09-AOAS285.full
Args:
X: Training data / covariate matrix of shape (num_observations, input_dimensions).
y: Response vector of shape (num_observations, 1).
"""
if not isinstance(X, torch.Tensor) or not isinstance(y, torch.Tensor):
raise ValueError("Expected type torch.Tensor")
if X.shape[0] != y.shape[0]:
raise ValueError(
f"Number of samples in X {X.shape[0]} not the same as in y {y.shape[0]}"
)
self.X = X
self.y_min = y.min()
self.y_max = y.max()
self.y = self._scale(y).reshape(-1, 1)
def _scale(self, y: torch.Tensor) -> torch.Tensor:
"""
Scale tensor to [-1. ,1.]
Args:
y: Input tensor.
"""
max_ = torch.ones_like(y)
min_ = -torch.ones_like(y)
y_std = (y - self.y_min) / (self.y_max - self.y_min)
return y_std * (max_ - min_) + min_
def _inverse_scale(self, y: torch.Tensor) -> torch.Tensor:
"""
Rescale tensor back from [-1. ,1.].
Args:
y: Input tensor.
"""
max_ = torch.ones_like(y)
min_ = -torch.ones_like(y)
y_std = (y - min_) / (max_ - min_)
return y_std * (self.y_max - self.y_min) + self.y_min
def _init_trees(self, X: torch.Tensor):
"""
Initialize the trees of the model.
Args:
X: Training data / covariate matrix of shape (num_observations, input_dimensions).
"""
self._all_trees = []
num_dims = X.shape[-1]
num_points = X.shape[0]
for _ in range(self.num_trees):
self._all_trees.append(
Tree(
nodes=[
LeafNode(
val=self.leaf_mean.sample_prior(),
composite_rules=CompositeRules(
all_dims=list(range(num_dims))
),
depth=0,
)
]
)
)
self.all_tree_predictions = torch.zeros(
(num_points, self.num_trees, 1), dtype=torch.float
)
def _grow_prune_step(self) -> Tuple[List, float]:
"""Take a single MCMC step using the GrowPrune approach of the original BART [1].
Reference:
[1] Hugh A. Chipman, Edward I. George, Robert E. McCulloch (2010). "BART: Bayesian additive regression trees"
https://projecteuclid.org/journals/annals-of-applied-statistics/volume-4/issue-1/BART-Bayesian-additive-regression-trees/10.1214/09-AOAS285.full
"""
if self.X is None or self.y is None:
raise NotInitializedError("No training data")
new_trees = [deepcopy(tree) for tree in self._all_trees]
all_tree_predictions = deepcopy(self.all_tree_predictions)
for tree_id in range(len(new_trees)):
# all_tree_predictions.shape -> (num_observations, num_trees, 1)
current_predictions = torch.sum(all_tree_predictions, dim=1)
last_iter_tree_prediction = all_tree_predictions[:, tree_id]
partial_residual = self.y - current_predictions + last_iter_tree_prediction
new_trees[tree_id] = self.tree_sampler.propose(
tree=new_trees[tree_id],
X=self.X,
partial_residual=partial_residual,
alpha=self.alpha,
beta=self.beta,
sigma_val=self.sigma.val,
leaf_mean_prior_scale=self.leaf_mean_prior_scale,
)
self._update_leaf_mean(new_trees[tree_id], partial_residual)
all_tree_predictions[:, tree_id] = new_trees[tree_id].predict(self.X)
self.all_tree_predictions = all_tree_predictions
self._update_sigma(self.y - torch.sum(all_tree_predictions, dim=1))
return new_trees, self.sigma.val
def _update_leaf_mean(self, tree: Tree, partial_residual: torch.Tensor):
"""
Use Eq. 2.10 of [1] to update leaf node values by sampling from posterior distribution.
Reference:
[1] Andrew Gelman et al. "Bayesian Data Analysis", 3rd ed.
Args:
tree: Tree whos leaf is being updated.
partial_residual: Current residual of the model excluding this tree of shape (num_observations, 1).
"""
if self.X is None:
raise NotInitializedError("No training data")
for leaf_node in tree.leaf_nodes():
new_leaf_val = self.leaf_mean.sample_posterior(
node=leaf_node,
X=self.X,
y=partial_residual,
current_sigma_val=self.sigma.val,
)
if new_leaf_val is not None:
leaf_node.val = new_leaf_val
def _update_sigma(self, full_residual: torch.Tensor):
"""
Use Eq. from section 2.6 of [1] to update sigma by sampling from posterior distribution.
Reference:
[1] Andrew Gelman et al. "Bayesian Data Analysis", 3rd ed.
Args:
partial_residual: Current residual of the model excluding this tree of shape (num_observations, 1).
"""
self.sigma.sample(self.X, full_residual)
def _predict_step(
self,
X: Optional[torch.Tensor] = None,
trees: Optional[List[torch.Tensor]] = None,
) -> torch.Tensor:
"""Get a prediction from a list of trees.
Args:
X: Covariate matrix to predict on. If None provided, predictions are made on the training set of shape (num_samples, input_dimensions).
trees: Trees to perform prediction. The prediction is the sum of predictions from these trees.
If None provided, the last drawn sample of trees is used for prediction.
Returns:
prediction: Prediction of shape (num_samples, 1).
"""
if self.X is None or self._all_trees is None:
raise NotInitializedError("Model not trained")
if X is None:
X = self.X
if trees is None:
trees = self._all_trees
prediction = torch.zeros((len(X), 1), dtype=torch.float)
for single_tree in trees:
prediction += single_tree.predict(X)
return prediction
def predict(self, X: torch.Tensor) -> torch.Tensor:
"""
Perform a prediction using all the samples collected in the model.
Args:
X: Covariate matrix to predict on of shape (num_observations, input_dimensions).
Returns:
prediction: Prediction corresponding to average of all samples of shape (num_observations, 1).
"""
prediction = torch.mean(
self.get_posterior_predictive_samples(X), dim=-1, dtype=torch.float
)
return prediction.reshape(-1, 1)
def predict_with_quantiles(
self, X: torch.Tensor, quantiles: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Returns the quantiles of prediction.
Args:
X: Covariate matrix to predict on of shape (num_samples, input_dimensions).
quantiles: The quantiles required. If nothing supplied, the default quantiles are [0.025, 0.5, 0.975]
Returns:
prediction, qvals: Prediction corresponding to average of all samples of shape (num_observations, 1),
qvals tensor of shape (num_obs, len(quantiles)) and qvals[:, i]
is quantile value corresponding to quantiles[i].
"""
if quantiles is None:
quantiles = torch.Tensor([0.025, 0.5, 0.975])
for q in quantiles:
if not 0.0 < q < 1.0:
raise ValueError("Quantiles must be in (0, 1)")
prediction_samples = self.get_posterior_predictive_samples(X)
prediction = torch.mean(prediction_samples, dim=-1, dtype=torch.float).reshape(
-1, 1
)
qvals = (
torch.quantile(prediction_samples, dim=1, q=quantiles)
.transpose(0, 1)
.reshape(-1, len(quantiles))
)
return prediction, qvals
def get_posterior_predictive_samples(self, X: torch.Tensor) -> torch.Tensor:
"""
Returns samples from the posterior predictive distribution P(y|X).
Args:
X: Covariate matrix to predict on of shape (num_observations, input_dimensions).
Returns:
posterior_predictive_samples: Samples from the predictive distribution P(y|X) of shape (num_observations, num_samples).
"""
posterior_predictive_samples = []
for sample_id in range(self.num_samples):
single_prediction_sample = self._inverse_scale(
self._predict_step(X=X, trees=self.samples["trees"][sample_id])
) # ( torch.Size(num_observations, 1) )
posterior_predictive_samples.append(single_prediction_sample)
return torch.concat(posterior_predictive_samples, dim=-1)
@property
def leaf_mean_prior_scale(self):
if self.leaf_mean is None:
raise NotInitializedError("LeafMean prior not set.")
return self.leaf_mean.prior_scale
class XBART(BART):
"""Implementes XBART [1] which is a faster implementation of Bayesian Additive Regression Trees (BART) are Bayesian sum of trees models [2].
Default parameters are taken from [1].
Reference:
[1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees"
https://arxiv.org/abs/1810.02215
[2] Hugh A. Chipman, Edward I. George, Robert E. McCulloch (2010). "BART: Bayesian additive regression trees"
https://projecteuclid.org/journals/annals-of-applied-statistics/volume-4/issue-1/BART-Bayesian-additive-regression-trees/10.1214/09-AOAS285.full
Args:
num_trees: Number of trees. If this is not set in the constructor explicitly,
it defaults to 0 and is adaptively set as a function of the trianing data in the ```fit()``` method.
alpha: Parameter used in the tree depth prior, Eq. 7 of [2].
beta: Parameter used in the tree depth prior, Eq. 7 of [2].
tau: Prior variance of the leaf-specific mean paramete used in the u_i_j prior, section 2.2 of [1].
noise_sd_concentration: Concentration parameter (alpha) for the inverse gamma distribution prior of p(sigma).
noise_sd_rate: Rate parameter (beta) for the inverse gamma distribution prior of p(sigma).
tree_sampler: The tree sampling method used.
random_state: Random state used to seed.
num_cuts: The maximum number of cuts per dimension.
num_null_cuts: Number of "no split" null cuts to consider along each dimension.
This affects the tree depth as discussed in [1].
m: Size of the subset of variables that are sampled for cutting points in the post
burnin period as discussed in section 3.4 of [1].
"""
def __init__(
self,
num_trees: int = 0,
alpha: float = 0.95,
beta: float = 2.0,
tau: Optional[float] = None,
noise_sd_concentration: float = 3.0,
noise_sd_rate: float = 1.0,
tree_sampler: Optional[GrowFromRootTreeProposer] = None,
random_state: Optional[int] = None,
num_cuts: Optional[int] = None,
num_null_cuts: int = 1,
m: Optional[int] = None,
):
self.num_cuts = num_cuts
self.num_null_cuts = num_null_cuts
self.tau = tau
self.m = m
super().__init__(
num_trees=num_trees,
alpha=0.95,
beta=1.25,
noise_sd_concentration=3.0,
noise_sd_rate=1.0,
tree_sampler=None,
random_state=None,
)
if tree_sampler is None:
self.tree_sampler = GrowFromRootTreeProposer()
elif isinstance(tree_sampler, GrowFromRootTreeProposer):
self.tree_sampler = tree_sampler
else:
raise NotImplementedError("tree_sampler not implemented")
self._step = self._grow_from_root_step
self.var_counts = None
self.all_tree_var_counts = None
def fit(
self,
X: torch.Tensor,
y: torch.Tensor,
num_samples: int = 25,
num_burn: int = 15,
) -> XBART:
"""Fit the training data and learn the parameters of the model.
Reference:
[1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees"
https://arxiv.org/abs/1810.02215
Args:
X: Training data / covariate matrix of shape (num_observations, input_dimensions).
y: Response vector of shape (num_observations, 1).
num_samples: Number of post burnin samples to draw.
num_burn: Number of burnin samples to draw (for adaptation).
"""
self.num_samples = num_samples
self._load_data(X, y)
if not self.num_trees > 0:
self._adaptively_init_num_trees()
if self.tau is None:
self._adaptively_init_tau()
self.tau = cast(float, self.tau)
self.leaf_mean = LeafMean(prior_loc=0.0, prior_scale=math.sqrt(self.tau))
if self.num_cuts is None:
self._adaptively_init_num_cuts()
if self.m is None:
self.m = self.X.shape[-1]
self.samples = {"trees": []}
self._init_trees(X)
self.all_tree_predictions = (
(torch.clone(self.y) / self.num_trees)
.unsqueeze(1)
.tile((1, self.num_trees, 1))
)
self.var_counts = torch.ones((X.shape[-1],))
self.all_tree_var_counts = torch.ones((self.num_trees, X.shape[-1]))
is_burnin_period = True
num_dims_to_sample = self.X.shape[-1]
for iter_id in trange(num_burn + num_samples):
if iter_id >= num_burn:
is_burnin_period = False
num_dims_to_sample = self.m
trees = self._step(num_dims_to_sample=num_dims_to_sample)
self._all_trees = trees
if not is_burnin_period:
self.samples["trees"].append(trees)
return self
def _adaptively_init_num_trees(self):
"""Implements the default for number of trees from section 3.1 of [1].
Reference:
[1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees"
https://arxiv.org/abs/1810.02215
"""
n = len(self.X)
self.num_trees = int(math.pow(math.log(n), math.log(math.log(n))) / 4)
def _adaptively_init_tau(self):
"""Implements the default for tau from section 3.1 of [1].
Reference:
[1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees"
https://arxiv.org/abs/1810.02215
"""
if not self.num_trees > 0:
raise NotInitializedError("num_trees not set")
self.tau = (3 / 10) * (torch.var(self.y).item() / self.num_trees)
def _adaptively_init_num_cuts(self):
"""Implements the default for number of cuts, C from section 3.3 of [1].
Reference:
[1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees"
https://arxiv.org/abs/1810.02215
"""
n = len(self.X)
self.num_cuts = max(math.sqrt(n), 100)
def _grow_from_root_step(self, num_dims_to_sample: int) -> List[Tree]:
"""Take a single MCMC step using the Grow-from-root approach of xBART [1].
Reference:
[1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees"
https://arxiv.org/abs/1810.02215
Args:
num_dims_to_sample: Size of the subset of variables that are sampled for cutting points
as discussed in section 3.4 of [1].
"""
if self.X is None or self.y is None:
raise NotInitializedError("No training data")
all_tree_predictions = deepcopy(self.all_tree_predictions)
new_trees = []
for tree_id in range(self.num_trees):
# all_tree_predictions.shape -> (num_observations, num_trees, 1)
current_predictions = torch.sum(all_tree_predictions, dim=1)
last_iter_tree_prediction = all_tree_predictions[:, tree_id]
partial_residual = self.y - current_predictions + last_iter_tree_prediction
w = self._draw_var_weights()
new_tree, new_var_counts = self.tree_sampler.propose(
X=self.X,
partial_residual=partial_residual,
m=num_dims_to_sample,
w=w,
alpha=self.alpha,
beta=self.beta,
sigma_val=self.sigma.val,
leaf_sampler=self.leaf_mean,
root_node=self._get_root_node(),
num_cuts=self.num_cuts,
num_null_cuts=self.num_null_cuts,
)
new_trees.append(new_tree)
self.var_counts += new_var_counts - self.all_tree_var_counts[tree_id]
self.all_tree_var_counts[tree_id] = new_var_counts
all_tree_predictions[:, tree_id] = new_tree.predict(self.X)
self._update_sigma(self.y - torch.sum(all_tree_predictions, dim=1))
self.all_tree_predictions = all_tree_predictions
return new_trees
def _draw_var_weights(self) -> torch.Tensor:
return Dirichlet(self.var_counts).sample()
def _get_root_node(self):
return LeafNode(
depth=0,
composite_rules=CompositeRules(all_dims=list(range(self.X.shape[-1]))),
)
| beanmachine-main | src/beanmachine/ppl/experimental/causal_inference/models/bart/bart_model.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.