python_code
stringlengths
0
4.04M
repo_name
stringlengths
7
58
file_path
stringlengths
5
147
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import warnings import beanmachine.ppl as bm import numpy as np import pytest import torch import torch.distributions as dist from beanmachine.ppl.inference.proposer.hmc_utils import ( DualAverageAdapter, MassMatrixAdapter, RealSpaceTransform, WelfordCovariance, WindowScheme, ) from beanmachine.ppl.inference.proposer.utils import DictToVecConverter from beanmachine.ppl.world import World class SampleModel: @bm.random_variable def foo(self): return dist.Uniform(0.0, 1.0) @bm.random_variable def bar(self): return dist.Normal(self.foo(), 1.0) class DiscreteModel: @bm.random_variable def baz(self): return dist.Poisson(5.0) def test_dual_average_adapter(): adapter = DualAverageAdapter(torch.tensor(0.1)) epsilon1 = adapter.step(torch.tensor(1.0)) epsilon2 = adapter.step(torch.tensor(0.0)) assert epsilon2 < adapter.finalize() < epsilon1 def test_dual_average_with_different_delta(): adapter1 = DualAverageAdapter(torch.tensor(1.0), delta=0.8) adapter2 = DualAverageAdapter(torch.tensor(1.0), delta=0.2) prob = torch.tensor(0.5) # prob > delta means we can increase the step size, wherease prob < delta means # we need to decrease the step size epsilon1 = adapter1.step(prob) epsilon2 = adapter2.step(prob) assert epsilon1 < epsilon2 def test_small_window_scheme(): num_adaptive_samples = 10 scheme = WindowScheme(num_adaptive_samples) for _ in range(num_adaptive_samples): # no window should be created if num_adaptive_samples is too small assert not scheme.is_in_window scheme.step() def test_middle_window_scheme(): num_adaptive_samples = 125 scheme = WindowScheme(num_adaptive_samples) num_windows = 0 for i in range(num_adaptive_samples): if scheme.is_in_window: # there should be a margin at the beginning and the end of a window assert i > 0 if scheme.is_end_window: num_windows += 1 assert i < num_adaptive_samples scheme.step() # there should only be a single window assert num_windows == 1 @pytest.mark.parametrize("num_adaptive_samples", [175, 300, 399, 543]) def test_large_window_scheme(num_adaptive_samples): scheme = WindowScheme(num_adaptive_samples) window_sizes = [] for _ in range(num_adaptive_samples): if scheme.is_end_window: window_sizes.append(scheme._window_size) scheme.step() # size of windows should be monotonically increasing sorted_window_sizes = sorted(window_sizes) assert window_sizes == sorted_window_sizes for win1, win2 in zip(window_sizes[:-1], window_sizes[1:-1]): # except for last window, window size should keep doubling assert win2 == win1 * 2 @pytest.mark.parametrize("full_mass_matrix", [True, False]) def test_mass_matrix_adapter(full_mass_matrix): model = SampleModel() world = World() world.call(model.bar()) positions_dict = RealSpaceTransform(world, world.latent_nodes)(dict(world)) dict2vec = DictToVecConverter(positions_dict) positions = dict2vec.to_vec(positions_dict) mass_matrix_adapter = MassMatrixAdapter(positions, full_mass_matrix) momentums = mass_matrix_adapter.initialize_momentums(positions) assert isinstance(momentums, torch.Tensor) assert momentums.shape == positions.shape mass_inv_old = mass_matrix_adapter.mass_inv.clone() mass_matrix_adapter.step(positions) with warnings.catch_warnings(): warnings.simplefilter("ignore") mass_matrix_adapter.finalize() # mass matrix adapter has seen less than 2 samples, so mass_inv is not updated assert torch.allclose(mass_inv_old, mass_matrix_adapter.mass_inv) # check the size of the matrix matrix_width = len(positions) if full_mass_matrix: assert mass_inv_old.shape == (matrix_width, matrix_width) else: assert mass_inv_old.shape == (matrix_width,) def test_diagonal_welford_covariance(): samples = dist.MultivariateNormal( loc=torch.rand(5), scale_tril=torch.randn(5, 5).tril() ).sample((1000,)) welford = WelfordCovariance(diagonal=True) for sample in samples: welford.step(sample) sample_var = torch.var(samples, dim=0) estimated_var = welford.finalize(regularize=False) assert torch.allclose(estimated_var, sample_var) regularized_var = welford.finalize(regularize=True) assert (torch.argsort(regularized_var) == torch.argsort(estimated_var)).all() def test_dense_welford_covariance(): samples = dist.MultivariateNormal( loc=torch.rand(5), scale_tril=torch.randn(5, 5).tril() ).sample((1000,)) welford = WelfordCovariance(diagonal=False) for sample in samples: welford.step(sample) sample_cov = torch.from_numpy(np.cov(samples.T.numpy())).to(samples.dtype) estimated_cov = welford.finalize(regularize=False) assert torch.allclose(estimated_cov, sample_cov) regularized_cov = welford.finalize(regularize=True) assert (torch.argsort(regularized_cov) == torch.argsort(estimated_cov)).all() def test_welford_exception(): welford = WelfordCovariance() welford.step(torch.rand(5)) with pytest.raises(RuntimeError): # number of samples is too small welford.finalize() def test_discrete_rv_exception(): model = DiscreteModel() world = World() world.call(model.baz()) with pytest.raises(TypeError): RealSpaceTransform(world, world.latent_nodes)(dict(world))
beanmachine-main
tests/ppl/inference/proposer/hmc_utils_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch import torch.distributions as dist from beanmachine import ppl as bm from beanmachine.ppl.inference.proposer.nmc import SingleSiteSimplexSpaceNMCProposer from beanmachine.ppl.inference.single_site_nmc import SingleSiteNewtonianMonteCarlo from beanmachine.ppl.world import World from torch import tensor class SingleSiteSimplexNewtonianMonteCarloProposerTest(unittest.TestCase): def test_alpha_for_dirichlet(self): alpha = tensor([[0.5, 0.5], [0.5, 0.5]]) @bm.random_variable def a(): return dist.Dirichlet(alpha) world_ = World() with world_: a() nw_proposer = SingleSiteSimplexSpaceNMCProposer(a()) is_valid, predicted_alpha = nw_proposer.compute_alpha(world_) self.assertEqual(is_valid, True) self.assertAlmostEqual( alpha.sum().item(), (predicted_alpha).sum().item(), delta=0.0001 ) def test_coin_flip(self): prior_heads, prior_tails = 2.0, 2.0 p = bm.random_variable(lambda: dist.Beta(2.0, 2.0)) x = bm.random_variable(lambda: dist.Bernoulli(p())) heads_observed = 5 samples = ( SingleSiteNewtonianMonteCarlo() .infer( queries=[p()], observations={x(): torch.ones(heads_observed)}, num_samples=100, num_chains=1, ) .get_chain(0) ) # assert we are close to the conjugate poserior mean self.assertAlmostEqual( samples[p()].mean(), (prior_heads + heads_observed) / (prior_heads + prior_tails + heads_observed), delta=0.05, )
beanmachine-main
tests/ppl/inference/proposer/nmc/single_site_simplex_newtonian_monte_carlo_proposer_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm import torch import torch.distributions as dist from beanmachine.ppl.inference.proposer.nmc import SingleSiteHalfSpaceNMCProposer from beanmachine.ppl.world import World from torch import tensor class SingleSiteHalfSpaceNewtonianMonteCarloProposerTest(unittest.TestCase): class SampleNormalModel: @bm.random_variable def foo(self): return dist.Normal(tensor(2.0), tensor(2.0)) @bm.random_variable def bar(self): return dist.Normal(self.foo(), torch.tensor(1.0)) class SampleLogisticRegressionModel: @bm.random_variable def theta_0(self): return dist.Normal(tensor(0.0), tensor(1.0)) @bm.random_variable def theta_1(self): return dist.Normal(tensor(0.0), tensor(1.0)) @bm.random_variable def x(self, i): return dist.Normal(tensor(0.0), tensor(1.0)) @bm.random_variable def y(self, i): y = self.theta_1() * self.x(i) + self.theta_0() probs = 1 / (1 + (y * -1).exp()) return dist.Bernoulli(probs) class SampleFallbackModel: @bm.random_variable def foo(self): return dist.Gamma(tensor(2.0), tensor(2.0)) @bm.random_variable def bar(self): return dist.Normal(self.foo(), torch.tensor(1.0)) def test_alpha_and_beta_for_gamma(self): alpha = tensor([2.0, 2.0, 2.0]) beta = tensor([2.0, 2.0, 2.0]) @bm.random_variable def gamma(): return dist.Gamma(alpha, beta) world = World() with world: gamma() nw_proposer = SingleSiteHalfSpaceNMCProposer(gamma()) is_valid, predicted_alpha, predicted_beta = nw_proposer.compute_alpha_beta( world ) self.assertEqual(is_valid, True) self.assertAlmostEqual( alpha.sum().item(), (predicted_alpha).sum().item(), delta=0.0001 ) self.assertAlmostEqual( beta.sum().item(), (predicted_beta).sum().item(), delta=0.0001 )
beanmachine-main
tests/ppl/inference/proposer/nmc/single_site_half_space_newtonian_monte_carlo_proposer_test.py
beanmachine-main
tests/ppl/inference/proposer/nmc/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm import torch import torch.autograd import torch.distributions as dist from beanmachine.ppl.inference.proposer.nmc.single_site_real_space_nmc_proposer import ( SingleSiteRealSpaceNMCProposer as SingleSiteRealSpaceNewtonianMonteCarloProposer, ) from beanmachine.ppl.world import World from beanmachine.ppl.world.variable import Variable from torch import tensor class SingleSiteRealSpaceNewtonianMonteCarloProposerTest(unittest.TestCase): class SampleNormalModel: @bm.random_variable def foo(self): return dist.MultivariateNormal(torch.zeros(2), torch.eye(2)) @bm.random_variable def bar(self): return dist.MultivariateNormal(self.foo(), torch.eye(2)) class SampleLogisticRegressionModel: @bm.random_variable def theta_0(self): return dist.Normal(tensor(0.0), tensor(1.0)) @bm.random_variable def theta_1(self): return dist.Normal(tensor(0.0), tensor(1.0)) @bm.random_variable def x(self, i): return dist.Normal(tensor(0.0), tensor(1.0)) @bm.random_variable def y(self, i): y = self.theta_1() * self.x(i) + self.theta_0() probs = 1 / (1 + (y * -1).exp()) return dist.Bernoulli(probs) def test_mean_scale_tril_for_node_with_child(self): foo_key = bm.random_variable( lambda: dist.MultivariateNormal( tensor([1.0, 1.0]), tensor([[1.0, 0.8], [0.8, 1]]) ) ) bar_key = bm.random_variable( lambda: dist.MultivariateNormal( foo_key(), tensor([[1.0, 0.8], [0.8, 1.0]]), ) ) nw_proposer = SingleSiteRealSpaceNewtonianMonteCarloProposer(foo_key()) val = tensor([2.0, 2.0]) queries = [foo_key(), bar_key()] observed_val = tensor([2.0, 2.0]) observations = {bar_key(): observed_val} world = World.initialize_world(queries, observations) world_vars = world._variables world_vars[foo_key] = val nw_proposer.learning_rate_ = 1.0 prop_dist = nw_proposer.get_proposal_distribution(world).base_dist mean, scale_tril = prop_dist.mean, prop_dist.scale_tril expected_mean = tensor([1.5, 1.5]) expected_scale_tril = torch.linalg.cholesky( tensor([[0.5000, 0.4000], [0.4000, 0.5000]]) ) self.assertTrue(torch.isclose(mean, expected_mean).all()) self.assertTrue(torch.isclose(scale_tril, expected_scale_tril).all()) def test_mean_scale_tril(self): model = self.SampleNormalModel() foo_key = model.foo() nw_proposer = SingleSiteRealSpaceNewtonianMonteCarloProposer(foo_key) val = tensor([2.0, 2.0]) val.requires_grad_(True) distribution = dist.MultivariateNormal( tensor([1.0, 1.0]), tensor([[1.0, 0.8], [0.8, 1]]) ) queries = [foo_key] observations = {} world = World.initialize_world(queries, observations) world_vars = world._variables world_vars[foo_key] = Variable( value=val, distribution=distribution, ) nw_proposer.learning_rate_ = 1.0 prop_dist = nw_proposer.get_proposal_distribution(world).base_dist mean, scale_tril = prop_dist.mean, prop_dist.scale_tril expected_mean = tensor([1.0, 1.0]) expected_scale_tril = torch.linalg.cholesky(tensor([[1.0, 0.8], [0.8, 1]])) self.assertTrue(torch.isclose(mean, expected_mean).all()) self.assertTrue(torch.isclose(scale_tril, expected_scale_tril).all()) def test_mean_scale_tril_for_iids(self): model = self.SampleNormalModel() foo_key = model.foo() nw_proposer = SingleSiteRealSpaceNewtonianMonteCarloProposer(foo_key) val = tensor([[2.0, 2.0], [2.0, 2.0]]) val.requires_grad_(True) distribution = dist.Normal( tensor([[1.0, 1.0], [1.0, 1.0]]), tensor([[1.0, 1.0], [1.0, 1.0]]) ) queries = [foo_key] observations = {} world = World.initialize_world(queries, observations) world_vars = world._variables world_vars[foo_key] = Variable( value=val, distribution=distribution, ) nw_proposer.learning_rate_ = 1.0 prop_dist = nw_proposer.get_proposal_distribution(world).base_dist mean, scale_tril = prop_dist.mean, prop_dist.scale_tril expected_mean = tensor([1.0, 1.0, 1.0, 1.0]) expected_scale_tril = torch.eye(4) self.assertTrue(torch.isclose(mean, expected_mean).all()) self.assertTrue(torch.isclose(scale_tril, expected_scale_tril).all()) def test_multi_mean_scale_tril_computation_in_inference(self): model = self.SampleLogisticRegressionModel() theta_0_key = model.theta_0() theta_1_key = model.theta_1() nw_proposer = SingleSiteRealSpaceNewtonianMonteCarloProposer(theta_0_key) x_0_key = model.x(0) x_1_key = model.x(1) y_0_key = model.y(0) y_1_key = model.y(1) theta_0_value = tensor(1.5708) theta_0_value.requires_grad_(True) x_0_value = tensor(0.7654) x_1_value = tensor(-6.6737) theta_1_value = tensor(-0.4459) theta_0_distribution = dist.Normal(torch.tensor(0.0), torch.tensor(1.0)) queries = [theta_0_key, theta_1_key] observations = {} world = World.initialize_world(queries, observations) world_vars = world._variables world_vars[theta_0_key] = Variable( value=theta_0_value, distribution=theta_0_distribution, children=set({y_0_key, y_1_key}), ) world_vars[theta_1_key] = Variable( value=theta_1_value, distribution=theta_0_distribution, children=set({y_0_key, y_1_key}), ) x_distribution = dist.Normal(torch.tensor(0.0), torch.tensor(5.0)) world_vars[x_0_key] = Variable( value=x_0_value, distribution=x_distribution, children=set({y_0_key, y_1_key}), ) world_vars[x_1_key] = Variable( value=x_1_value, distribution=x_distribution, children=set({y_0_key, y_1_key}), ) y = theta_0_value + theta_1_value * x_0_value probs_0 = 1 / (1 + (y * -1).exp()) y_0_distribution = dist.Bernoulli(probs_0) world_vars[y_0_key] = Variable( value=tensor(1.0), distribution=y_0_distribution, parents=set({theta_0_key, theta_1_key, x_0_key}), ) y = theta_0_value + theta_1_value * x_1_value probs_1 = 1 / (1 + (y * -1).exp()) y_1_distribution = dist.Bernoulli(probs_1) world_vars[y_1_key] = Variable( value=tensor(1.0), distribution=y_1_distribution, parents=set({theta_0_key, theta_1_key, x_1_key}), ) nw_proposer.learning_rate_ = 1.0 prop_dist = nw_proposer.get_proposal_distribution(world).base_dist mean, scale_tril = prop_dist.mean, prop_dist.scale_tril score = theta_0_distribution.log_prob(theta_0_value) score += ( 1 / (1 + (-1 * (theta_0_value + theta_1_value * x_0_value)).exp()) ).log() score += ( 1 / (1 + (-1 * (theta_0_value + theta_1_value * x_1_value)).exp()) ).log() expected_first_gradient = torch.autograd.grad( score, theta_0_value, create_graph=True )[0] expected_second_gradient = torch.autograd.grad( expected_first_gradient, theta_0_value )[0] expected_covar = expected_second_gradient.reshape(1, 1).inverse() * -1 expected_scale_tril = torch.linalg.cholesky(expected_covar) self.assertAlmostEqual( expected_scale_tril.item(), scale_tril.item(), delta=0.001 ) expected_first_gradient = expected_first_gradient.unsqueeze(0) expected_mean = ( theta_0_value.unsqueeze(0) + expected_first_gradient.unsqueeze(0).mm(expected_covar) ).squeeze(0) self.assertAlmostEqual(mean.item(), expected_mean.item(), delta=0.001) proposal_value = ( dist.MultivariateNormal(mean, scale_tril=scale_tril) .sample() .reshape(theta_0_value.shape) ) proposal_value.requires_grad_(True) world_vars[theta_0_key].value = proposal_value y = proposal_value + theta_1_value * x_0_value probs_0 = 1 / (1 + (y * -1).exp()) y_0_distribution = dist.Bernoulli(probs_0) world_vars[y_0_key].distribution = y_0_distribution world_vars[y_0_key].log_prob = y_0_distribution.log_prob(tensor(1.0)) y = proposal_value + theta_1_value * x_1_value probs_1 = 1 / (1 + (y * -1).exp()) y_1_distribution = dist.Bernoulli(probs_1) world_vars[y_1_key].distribution = y_1_distribution nw_proposer.learning_rate_ = 1.0 prop_dist = nw_proposer.get_proposal_distribution(world).base_dist mean, scale_tril = prop_dist.mean, prop_dist.scale_tril score = tensor(0.0) score = theta_0_distribution.log_prob(proposal_value) score += ( 1 / (1 + (-1 * (proposal_value + theta_1_value * x_0_value)).exp()) ).log() score += ( 1 / (1 + (-1 * (proposal_value + theta_1_value * x_1_value)).exp()) ).log() expected_first_gradient = torch.autograd.grad( score, proposal_value, create_graph=True )[0] expected_second_gradient = torch.autograd.grad( expected_first_gradient, proposal_value )[0] expected_covar = expected_second_gradient.reshape(1, 1).inverse() * -1 expected_scale_tril = torch.linalg.cholesky(expected_covar) self.assertAlmostEqual( expected_scale_tril.item(), scale_tril.item(), delta=0.001 ) expected_first_gradient = expected_first_gradient.unsqueeze(0) expected_mean = ( proposal_value.unsqueeze(0) + expected_first_gradient.unsqueeze(0).mm(expected_covar) ).squeeze(0) self.assertAlmostEqual(mean.item(), expected_mean.item(), delta=0.001) self.assertAlmostEqual( scale_tril.item(), expected_scale_tril.item(), delta=0.001 ) def test_adaptive_alpha_beta_computation(self): model = self.SampleLogisticRegressionModel() theta_0_key = model.theta_0() nw_proposer = SingleSiteRealSpaceNewtonianMonteCarloProposer(theta_0_key) nw_proposer.learning_rate_ = tensor(0.0416, dtype=torch.float64) nw_proposer.running_mean_, nw_proposer.running_var_ = ( tensor(0.079658), tensor(0.0039118), ) nw_proposer.accepted_samples_ = 37 alpha, beta = nw_proposer.compute_beta_priors_from_accepted_lr() self.assertAlmostEqual(nw_proposer.running_mean_.item(), 0.0786, delta=0.0001) self.assertAlmostEqual(nw_proposer.running_var_.item(), 0.00384, delta=0.00001) self.assertAlmostEqual(alpha.item(), 1.4032, delta=0.001) self.assertAlmostEqual(beta.item(), 16.4427, delta=0.001) def test_adaptive_vectorized_alpha_beta_computation(self): model = self.SampleLogisticRegressionModel() theta_0_key = model.theta_0() nw_proposer = SingleSiteRealSpaceNewtonianMonteCarloProposer(theta_0_key) nw_proposer.learning_rate_ = tensor([0.0416, 0.0583], dtype=torch.float64) nw_proposer.running_mean_, nw_proposer.running_var_ = ( tensor([0.079658, 0.089861]), tensor([0.0039118, 0.0041231]), ) nw_proposer.accepted_samples_ = 37 alpha, beta = nw_proposer.compute_beta_priors_from_accepted_lr() self.assertListEqual( [round(x.item(), 4) for x in list(nw_proposer.running_mean_)], [0.0786, 0.089], ) self.assertListEqual( [round(x.item(), 4) for x in list(nw_proposer.running_var_)], [0.0038, 0.004], ) self.assertListEqual( [round(x.item(), 4) for x in list(alpha)], [1.4032, 1.6984] ) self.assertListEqual( [round(x.item(), 4) for x in list(beta)], [16.4427, 17.3829] )
beanmachine-main
tests/ppl/inference/proposer/nmc/single_site_real_space_newtonian_monte_carlo_proposer_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Tests for fix_problems.py""" import unittest from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.fix_problems import fix_problems from beanmachine.ppl.compiler.gen_dot import to_dot from beanmachine.ppl.model.rv_identifier import RVIdentifier from torch import tensor class FixProblemsTest(unittest.TestCase): def test_fix_problems_01(self) -> None: # Problems that need to be fixed: # # * Single-valued tensors are used in contexts where scalars are needed. # * A multiplication of 0.5 by a probability (from a beta) is used both # as a probability (by a Bernoulli) and a real (by a normal). # # The solutions: # # * The constants are replaced by constants of the appropriate kinds. # * A to-real node is inserted between the multiplication and the normal. # self.maxDiff = None bmg = BMGraphBuilder() one = bmg.add_constant(tensor(1.0)) two = bmg.add_constant(tensor(2.0)) half = bmg.add_constant(tensor(0.5)) beta = bmg.add_beta(two, two) betas = bmg.add_sample(beta) mult = bmg.add_multiplication(half, betas) norm = bmg.add_normal(mult, one) bern = bmg.add_bernoulli(mult) bmg.add_sample(norm) bmg.add_sample(bern) bmg.add_query(mult, RVIdentifier(wrapper=lambda a, b: a, arguments=(1, 1))) observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N00[label="0.5:P"]; N01[label="2.0:N"]; N02[label="Beta:P"]; N03[label="Sample:P"]; N04[label="*:P"]; N05[label="1.0:OH"]; N06[label="Normal:R"]; N07[label="Sample:R"]; N08[label="Bernoulli:B"]; N09[label="Sample:B"]; N10[label="Query:P"]; N00 -> N04[label="left:P"]; N01 -> N02[label="alpha:R+"]; N01 -> N02[label="beta:R+"]; N02 -> N03[label="operand:P"]; N03 -> N04[label="right:P"]; N04 -> N06[label="mu:R"]; N04 -> N08[label="probability:P"]; N04 -> N10[label="operator:any"]; N05 -> N06[label="sigma:R+"]; N06 -> N07[label="operand:R"]; N08 -> N09[label="operand:B"]; } """ self.assertEqual(expected.strip(), observed.strip()) bmg, error_report = fix_problems(bmg) observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N00[label="0.5:P"]; N01[label="2.0:N"]; N02[label="1.0:OH"]; N03[label="0.5:P"]; N04[label="2.0:R+"]; N05[label="Beta:P"]; N06[label="Sample:P"]; N07[label="*:P"]; N08[label="ToReal:R"]; N09[label="1.0:R+"]; N10[label="Normal:R"]; N11[label="Sample:R"]; N12[label="Bernoulli:B"]; N13[label="Sample:B"]; N14[label="Query:P"]; N03 -> N07[label="left:P"]; N04 -> N05[label="alpha:R+"]; N04 -> N05[label="beta:R+"]; N05 -> N06[label="operand:P"]; N06 -> N07[label="right:P"]; N07 -> N08[label="operand:<=R"]; N07 -> N12[label="probability:P"]; N07 -> N14[label="operator:any"]; N08 -> N10[label="mu:R"]; N09 -> N10[label="sigma:R+"]; N10 -> N11[label="operand:R"]; N12 -> N13[label="operand:B"]; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fix_problems_2(self) -> None: """test_fix_problems_2""" # Problems that need to be fixed: # # * Single-valued tensors are used in contexts where scalars are needed. # * A Boolean (from a Bernoulli) is used in an addition to make a positive real. # * The Boolean is also used as a real and a natural. # # The solutions: # # * The constants are replaced by constants of the appropriate kinds. # * A to-positive-real node is inserted between the addition and the Bernoulli. # * A to-real node is inserted between the normal and the Bernoulli # * An if-then-else is inserted to make the Bernoulli into a natural. # self.maxDiff = None bmg = BMGraphBuilder() # @rv def bern(): # return Bernoulli(tensor(0.5)) # @rv def norm(): # return Normal(bern(), bern() + tensor(1.0)) # @rv def bino(): # return Binomial(bern(), 0.5) one = bmg.add_constant(tensor(1.0)) half = bmg.add_constant(tensor(0.5)) bern = bmg.add_bernoulli(half) berns = bmg.add_sample(bern) plus = bmg.add_addition(berns, one) norm = bmg.add_normal(berns, plus) bino = bmg.add_binomial(berns, half) bmg.add_sample(norm) bmg.add_sample(bino) observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N0[label="0.5:P"]; N1[label="Bernoulli:B"]; N2[label="Sample:B"]; N3[label="1.0:OH"]; N4[label="+:R+"]; N5[label="Normal:R"]; N6[label="Sample:R"]; N7[label="Binomial:N"]; N8[label="Sample:N"]; N0 -> N1[label="probability:P"]; N0 -> N7[label="probability:P"]; N1 -> N2[label="operand:B"]; N2 -> N4[label="left:R+"]; N2 -> N5[label="mu:R"]; N2 -> N7[label="count:N"]; N3 -> N4[label="right:R+"]; N4 -> N5[label="sigma:R+"]; N5 -> N6[label="operand:R"]; N7 -> N8[label="operand:N"]; } """ self.assertEqual(expected.strip(), observed.strip()) bmg, error_report = fix_problems(bmg) observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N00[label="0.5:P"]; N01[label="1.0:OH"]; N02[label="0.5:P"]; N03[label="Bernoulli:B"]; N04[label="Sample:B"]; N05[label="ToReal:R"]; N06[label="ToPosReal:R+"]; N07[label="1.0:R+"]; N08[label="+:R+"]; N09[label="Normal:R"]; N10[label="Sample:R"]; N11[label="1:N"]; N12[label="0:N"]; N13[label="if:N"]; N14[label="Binomial:N"]; N15[label="Sample:N"]; N02 -> N03[label="probability:P"]; N02 -> N14[label="probability:P"]; N03 -> N04[label="operand:B"]; N04 -> N05[label="operand:<=R"]; N04 -> N06[label="operand:<=R+"]; N04 -> N13[label="condition:B"]; N05 -> N09[label="mu:R"]; N06 -> N08[label="left:R+"]; N07 -> N08[label="right:R+"]; N08 -> N09[label="sigma:R+"]; N09 -> N10[label="operand:R"]; N11 -> N13[label="consequence:N"]; N12 -> N13[label="alternative:N"]; N13 -> N14[label="count:N"]; N14 -> N15[label="operand:N"]; }""" self.assertEqual(expected.strip(), observed.strip()) def test_fix_problems_3(self) -> None: """test_fix_problems_3""" # This test has some problems that cannot be fixed. # # * Two-valued tensor constant used as probability # * Negative number for standard deviation # * Fraction used for count # * Number greater than 1.0 used as probability # @rv def bern(): # return Bernoulli(tensor([0.5, 0.5])) # @rv def norm(): # return Normal(-1.0, -1.0) # @rv def bino(): # return Binomial(3.14, 3.14) self.maxDiff = None bmg = BMGraphBuilder() pi = bmg.add_constant(3.14) mone = bmg.add_constant(-1.0) half = bmg.add_constant(tensor([0.5, 0.5])) bern = bmg.add_bernoulli(half) norm = bmg.add_normal(mone, mone) bino = bmg.add_binomial(pi, pi) bmg.add_sample(bern) bmg.add_sample(norm) bmg.add_sample(bino) bmg, error_report = fix_problems(bmg) observed = str(error_report) expected = """ The count of a binomial is required to be a natural but is a positive real. The probability of a binomial is required to be a probability but is a positive real. The sigma of a normal is required to be a positive real but is a negative real. """ self.assertEqual(observed.strip(), expected.strip()) def test_fix_problems_4(self) -> None: """test_fix_problems_4""" # The problem we have here is: # # * Multiplication is only defined on probability or larger # * We have a multiplication of a bool by a natural # * We require a natural. # # In this scenario, the problem fixer turns the multplication # into an if-then-else. # # @rv def berns(): # return Bernoulli(0.5) # @rv def nats(): # return Binomial(2, 0.5) # @rv def bino(): # return Binomial(berns() * nats(), 0.5) self.maxDiff = None bmg = BMGraphBuilder() two = bmg.add_natural(2) half = bmg.add_probability(0.5) bern = bmg.add_bernoulli(half) berns = bmg.add_sample(bern) nat = bmg.add_binomial(two, half) nats = bmg.add_sample(nat) mult = bmg.add_multiplication(berns, nats) bino = bmg.add_binomial(mult, half) bmg.add_sample(bino) observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N0[label="0.5:P"]; N1[label="Bernoulli:B"]; N2[label="Sample:B"]; N3[label="2:N"]; N4[label="Binomial:N"]; N5[label="Sample:N"]; N6[label="*:R+"]; N7[label="Binomial:N"]; N8[label="Sample:N"]; N0 -> N1[label="probability:P"]; N0 -> N4[label="probability:P"]; N0 -> N7[label="probability:P"]; N1 -> N2[label="operand:B"]; N2 -> N6[label="left:R+"]; N3 -> N4[label="count:N"]; N4 -> N5[label="operand:N"]; N5 -> N6[label="right:R+"]; N6 -> N7[label="count:N"]; N7 -> N8[label="operand:N"]; } """ self.assertEqual(expected.strip(), observed.strip()) bmg, error_report = fix_problems(bmg) self.assertEqual("", str(error_report).strip()) observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N00[label="0.5:P"]; N01[label="Bernoulli:B"]; N02[label="Sample:B"]; N03[label="2:N"]; N04[label="Binomial:N"]; N05[label="Sample:N"]; N06[label="*:R+"]; N07[label="0:N"]; N08[label="if:N"]; N09[label="Binomial:N"]; N10[label="Sample:N"]; N11[label="0.0:Z"]; N00 -> N01[label="probability:P"]; N00 -> N04[label="probability:P"]; N00 -> N09[label="probability:P"]; N01 -> N02[label="operand:B"]; N02 -> N06[label="left:R+"]; N02 -> N08[label="condition:B"]; N03 -> N04[label="count:N"]; N04 -> N05[label="operand:N"]; N05 -> N06[label="right:R+"]; N05 -> N08[label="consequence:N"]; N07 -> N08[label="alternative:N"]; N08 -> N09[label="count:N"]; N09 -> N10[label="operand:N"]; }""" self.assertEqual(expected.strip(), observed.strip()) def test_fix_problems_5(self) -> None: """test_fix_problems_5""" # Division becomes power. self.maxDiff = None bmg = BMGraphBuilder() # @rv def hcs(n): # return HalfCauchy(1.0) # @rv def norm(): # return Normal(log(hcs(3) ** (hcs(1) / hcs(2))), 1.0) one = bmg.add_constant(1.0) hc = bmg.add_halfcauchy(one) hcs1 = bmg.add_sample(hc) hcs2 = bmg.add_sample(hc) hcs3 = bmg.add_sample(hc) q = bmg.add_division(hcs1, hcs2) p = bmg.add_power(hcs3, q) lg = bmg.add_log(p) norm = bmg.add_normal(lg, one) bmg.add_sample(norm) bmg, error_report = fix_problems(bmg) observed = str(error_report) expected = "" self.assertEqual(observed.strip(), expected.strip()) observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N00[label="1.0:OH"]; N01[label="1.0:R+"]; N02[label="HalfCauchy:R+"]; N03[label="Sample:R+"]; N04[label="Sample:R+"]; N05[label="/:U"]; N06[label="Sample:R+"]; N07[label="-1.0:R"]; N08[label="**:R+"]; N09[label="*:R+"]; N10[label="**:R+"]; N11[label="Log:R"]; N12[label="Normal:R"]; N13[label="Sample:R"]; N14[label="-1.0:R-"]; N01 -> N02[label="scale:R+"]; N01 -> N12[label="sigma:R+"]; N02 -> N03[label="operand:R+"]; N02 -> N04[label="operand:R+"]; N02 -> N06[label="operand:R+"]; N03 -> N05[label="left:any"]; N03 -> N09[label="left:R+"]; N04 -> N05[label="right:any"]; N04 -> N08[label="left:R+"]; N06 -> N10[label="left:R+"]; N07 -> N08[label="right:R"]; N08 -> N09[label="right:R+"]; N09 -> N10[label="right:R+"]; N10 -> N11[label="operand:R+"]; N11 -> N12[label="mu:R"]; N12 -> N13[label="operand:R"]; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fix_problems_6(self) -> None: """test_fix_problems_6""" # This test shows that we can rewrite a division by a constant # into a multiplication. self.maxDiff = None bmg = BMGraphBuilder() # def hcs(): return HalfCauchy(1.0) # def norm(): return Normal(hcs() / 2.5, 1.0) one = bmg.add_constant(1.0) two = bmg.add_constant(2.5) hc = bmg.add_halfcauchy(one) hcs = bmg.add_sample(hc) q = bmg.add_division(hcs, two) norm = bmg.add_normal(q, one) bmg.add_sample(norm) observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N0[label="1.0:OH"]; N1[label="HalfCauchy:R+"]; N2[label="Sample:R+"]; N3[label="2.5:R+"]; N4[label="/:U"]; N5[label="Normal:U"]; N6[label="Sample:U"]; N0 -> N1[label="scale:R+"]; N0 -> N5[label="sigma:any"]; N1 -> N2[label="operand:R+"]; N2 -> N4[label="left:any"]; N3 -> N4[label="right:any"]; N4 -> N5[label="mu:any"]; N5 -> N6[label="operand:any"]; } """ self.assertEqual(expected.strip(), observed.strip()) bmg, error_report = fix_problems(bmg) self.assertEqual("", str(error_report).strip()) observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N00[label="1.0:OH"]; N01[label="1.0:R+"]; N02[label="HalfCauchy:R+"]; N03[label="Sample:R+"]; N04[label="2.5:R+"]; N05[label="/:U"]; N06[label="0.4:R+"]; N07[label="*:R+"]; N08[label="ToReal:R"]; N09[label="Normal:R"]; N10[label="Sample:R"]; N11[label="0.4:P"]; N01 -> N02[label="scale:R+"]; N01 -> N09[label="sigma:R+"]; N02 -> N03[label="operand:R+"]; N03 -> N05[label="left:any"]; N03 -> N07[label="left:R+"]; N04 -> N05[label="right:any"]; N06 -> N07[label="right:R+"]; N07 -> N08[label="operand:<=R"]; N08 -> N09[label="mu:R"]; N09 -> N10[label="operand:R"]; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fix_problems_7(self) -> None: """test_fix_problems_7""" # The problem here is that we have two uniform distributions that # we cannot turn into a flat distribution, and one we can. We therefore # expect that we will get two errors. self.maxDiff = None bmg = BMGraphBuilder() # @rv def foo1(): # return Uniform(0.0, 1.0) # OK # @rv def foo2(): # return Uniform(1.0, 2.0) # Bad # @rv def foo3(): # return Uniform(0.0, foo2()) # Bad zero = bmg.add_constant(0.0) one = bmg.add_constant(1.0) two = bmg.add_constant(2.0) foo1 = bmg.add_uniform(zero, one) bmg.add_sample(foo1) foo2 = bmg.add_uniform(one, two) foo2s = bmg.add_sample(foo2) foo3 = bmg.add_uniform(one, foo2s) bmg.add_sample(foo3) bmg, error_report = fix_problems(bmg) observed = str(error_report) expected = """ The model uses a uniform operation unsupported by Bean Machine Graph. The unsupported node is the operand of a sample. The model uses a uniform operation unsupported by Bean Machine Graph. The unsupported node is the operand of a sample. """ self.assertEqual(observed.strip(), expected.strip()) def test_fix_problems_8(self) -> None: """test_fix_problems_8""" # This test shows that we can rewrite a chi2 into a gamma. self.maxDiff = None bmg = BMGraphBuilder() # @rv def hcs(): # return HalfCauchy(1.0) # @rv def chi2(): # return Chi2(hcs()) one = bmg.add_constant(1.0) hc = bmg.add_halfcauchy(one) hcs = bmg.add_sample(hc) chi2 = bmg.add_chi2(hcs) bmg.add_sample(chi2) observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N0[label="1.0:OH"]; N1[label="HalfCauchy:R+"]; N2[label="Sample:R+"]; N3[label="Chi2:U"]; N4[label="Sample:U"]; N0 -> N1[label="scale:R+"]; N1 -> N2[label="operand:R+"]; N2 -> N3[label="df:any"]; N3 -> N4[label="operand:any"]; } """ self.assertEqual(expected.strip(), observed.strip()) bmg, error_report = fix_problems(bmg) self.assertEqual("", str(error_report).strip()) observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N0[label="1.0:OH"]; N1[label="1.0:R+"]; N2[label="HalfCauchy:R+"]; N3[label="Sample:R+"]; N4[label="Chi2:U"]; N5[label="0.5:R+"]; N6[label="*:R+"]; N7[label="Gamma:R+"]; N8[label="Sample:R+"]; N1 -> N2[label="scale:R+"]; N2 -> N3[label="operand:R+"]; N3 -> N4[label="df:any"]; N3 -> N6[label="left:R+"]; N5 -> N6[label="right:R+"]; N5 -> N7[label="rate:R+"]; N6 -> N7[label="concentration:R+"]; N7 -> N8[label="operand:R+"]; }""" self.assertEqual(expected.strip(), observed.strip()) def test_fix_problems_9(self) -> None: """test_fix_problems_9""" # The problem we have here is that natural raised to bool # is not supported in BMG without converting both to # positive real, but natural raised to bool is plainly # natural. We generate an if-then-else. # @rv def berns(): # return Bernoulli(0.5) # @rv def nats(): # return Binomial(2, 0.5) # @rv def bino(): # return Binomial(nats() ** berns(), 0.5) self.maxDiff = None bmg = BMGraphBuilder() two = bmg.add_natural(2) half = bmg.add_probability(0.5) bern = bmg.add_bernoulli(half) berns = bmg.add_sample(bern) nat = bmg.add_binomial(two, half) nats = bmg.add_sample(nat) powr = bmg.add_power(nats, berns) bino = bmg.add_binomial(powr, half) bmg.add_sample(bino) observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N0[label="2:N"]; N1[label="0.5:P"]; N2[label="Binomial:N"]; N3[label="Sample:N"]; N4[label="Bernoulli:B"]; N5[label="Sample:B"]; N6[label="**:R+"]; N7[label="Binomial:N"]; N8[label="Sample:N"]; N0 -> N2[label="count:N"]; N1 -> N2[label="probability:P"]; N1 -> N4[label="probability:P"]; N1 -> N7[label="probability:P"]; N2 -> N3[label="operand:N"]; N3 -> N6[label="left:R+"]; N4 -> N5[label="operand:B"]; N5 -> N6[label="right:R+"]; N6 -> N7[label="count:N"]; N7 -> N8[label="operand:N"]; } """ self.assertEqual(expected.strip(), observed.strip()) bmg, error_report = fix_problems(bmg) self.assertEqual("", str(error_report).strip()) observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N00[label="2:N"]; N01[label="0.5:P"]; N02[label="Binomial:N"]; N03[label="Sample:N"]; N04[label="Bernoulli:B"]; N05[label="Sample:B"]; N06[label="**:R+"]; N07[label="1:N"]; N08[label="if:N"]; N09[label="Binomial:N"]; N10[label="Sample:N"]; N11[label="1.0:OH"]; N00 -> N02[label="count:N"]; N01 -> N02[label="probability:P"]; N01 -> N04[label="probability:P"]; N01 -> N09[label="probability:P"]; N02 -> N03[label="operand:N"]; N03 -> N06[label="left:R+"]; N03 -> N08[label="consequence:N"]; N04 -> N05[label="operand:B"]; N05 -> N06[label="right:R+"]; N05 -> N08[label="condition:B"]; N07 -> N08[label="alternative:N"]; N08 -> N09[label="count:N"]; N09 -> N10[label="operand:N"]; }""" self.assertEqual(expected.strip(), observed.strip()) def test_fix_problems_10(self) -> None: """test_fix_problems_10""" # Demonstrate that we can rewrite 1-p for probability p into # complement(p) -- which is of type P -- instead of # add(1, negate(p)) which is of type R. # TODO: Also demonstrate that this works for 1-b # TODO: Get this working for the "not" operator, since 1-b # and "not b" are the same thing for bool b. self.maxDiff = None bmg = BMGraphBuilder() # @rv def beta(): # return Beta(2.0, 2.0) # @rv def bern(): # return Bernoulli(1 - beta()) # good! one = bmg.add_constant(1.0) two = bmg.add_constant(2.0) beta = bmg.add_beta(two, two) betas = bmg.add_sample(beta) negate = bmg.add_negate(betas) complement = bmg.add_addition(one, negate) bern = bmg.add_bernoulli(complement) bmg.add_sample(bern) observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N0[label="1.0:OH"]; N1[label="2.0:N"]; N2[label="Beta:P"]; N3[label="Sample:P"]; N4[label="-:R-"]; N5[label="+:R"]; N6[label="Bernoulli:B"]; N7[label="Sample:B"]; N0 -> N5[label="left:R"]; N1 -> N2[label="alpha:R+"]; N1 -> N2[label="beta:R+"]; N2 -> N3[label="operand:P"]; N3 -> N4[label="operand:R+"]; N4 -> N5[label="right:R"]; N5 -> N6[label="probability:P"]; N6 -> N7[label="operand:B"]; } """ self.assertEqual(expected.strip(), observed.strip()) bmg, error_report = fix_problems(bmg) self.assertEqual("", str(error_report).strip()) observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N0[label="2.0:N"]; N1[label="1.0:OH"]; N2[label="2.0:R+"]; N3[label="Beta:P"]; N4[label="Sample:P"]; N5[label="-:R-"]; N6[label="+:R"]; N7[label="complement:P"]; N8[label="Bernoulli:B"]; N9[label="Sample:B"]; N1 -> N6[label="left:R"]; N2 -> N3[label="alpha:R+"]; N2 -> N3[label="beta:R+"]; N3 -> N4[label="operand:P"]; N4 -> N5[label="operand:R+"]; N4 -> N7[label="operand:P"]; N5 -> N6[label="right:R"]; N7 -> N8[label="probability:P"]; N8 -> N9[label="operand:B"]; }""" self.assertEqual(expected.strip(), observed.strip()) def test_fix_problems_11(self) -> None: """test_fix_problems_11""" # Here we demonstrate that we treat the negative log of a # probability as a positive real. (In a previous iteration # we generated a special negative log node, but now we can # do it directly without fixing up the graph.) # @rv def beta1(): # return Beta(2.0, 2.0) # @rv def beta2(): # return Beta(-beta1.log(), 2.0) self.maxDiff = None bmg = BMGraphBuilder() two = bmg.add_constant(2.0) beta1 = bmg.add_beta(two, two) beta1s = bmg.add_sample(beta1) logprob = bmg.add_log(beta1s) neglogprob = bmg.add_negate(logprob) beta2 = bmg.add_beta(neglogprob, two) bmg.add_sample(beta2) observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N0[label="2.0:N"]; N1[label="Beta:P"]; N2[label="Sample:P"]; N3[label="Log:R-"]; N4[label="-:R+"]; N5[label="Beta:P"]; N6[label="Sample:P"]; N0 -> N1[label="alpha:R+"]; N0 -> N1[label="beta:R+"]; N0 -> N5[label="beta:R+"]; N1 -> N2[label="operand:P"]; N2 -> N3[label="operand:P"]; N3 -> N4[label="operand:R-"]; N4 -> N5[label="alpha:R+"]; N5 -> N6[label="operand:P"]; } """ self.assertEqual(expected.strip(), observed.strip()) bmg, error_report = fix_problems(bmg) self.assertEqual("", str(error_report).strip()) observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N0[label="2.0:N"]; N1[label="2.0:R+"]; N2[label="Beta:P"]; N3[label="Sample:P"]; N4[label="Log:R-"]; N5[label="-:R+"]; N6[label="Beta:P"]; N7[label="Sample:P"]; N1 -> N2[label="alpha:R+"]; N1 -> N2[label="beta:R+"]; N1 -> N6[label="beta:R+"]; N2 -> N3[label="operand:P"]; N3 -> N4[label="operand:P"]; N4 -> N5[label="operand:R-"]; N5 -> N6[label="alpha:R+"]; N6 -> N7[label="operand:P"]; }""" self.assertEqual(expected.strip(), observed.strip()) def test_fix_problems_12(self) -> None: """test_fix_problems_12""" # We flag impossible observations as errors. self.maxDiff = None bmg = BMGraphBuilder() # @rv def bern(): # return Bernoulli(0.5) # @rv def bino(): # return Binomial(2, 0.5) # @rv def norm(): # return Normal(0, 1) zero = bmg.add_constant(0.0) one = bmg.add_constant(1.0) two = bmg.add_constant(2.0) half = bmg.add_constant(0.5) bern = bmg.add_bernoulli(half) berns = bmg.add_sample(bern) bino = bmg.add_binomial(two, half) binos = bmg.add_sample(bino) norm = bmg.add_normal(zero, one) norms = bmg.add_sample(norm) bmg.add_observation(berns, -1.5) # Bad bmg.add_observation(binos, 5.25) # Bad bmg.add_observation(norms, True) # OK; can be converted to 1.0 bmg, error_report = fix_problems(bmg) observed = str(error_report) expected = """ A Bernoulli distribution is observed to have value -1.5 but only produces samples of type bool. A binomial distribution is observed to have value 5.25 but only produces samples of type natural. """ self.assertEqual(observed.strip(), expected.strip()) def test_fix_problems_13(self) -> None: """test_fix_problems_13""" # Observations of the wrong type are fixed up. self.maxDiff = None bmg = BMGraphBuilder() # @rv def bern(): # return Bernoulli(0.5) # @rv def bino(): # return Binomial(2, 0.5) # @rv def norm(): # return Normal(0, 1) zero = bmg.add_constant(0.0) one = bmg.add_constant(1.0) two = bmg.add_constant(2.0) half = bmg.add_constant(0.5) bern = bmg.add_bernoulli(half) berns = bmg.add_sample(bern) bino = bmg.add_binomial(two, half) binos = bmg.add_sample(bino) norm = bmg.add_normal(zero, one) norms = bmg.add_sample(norm) bmg.add_observation(berns, 0.0) # Should be bool bmg.add_observation(binos, 5.0) # Should be int bmg.add_observation(norms, True) # Should be real bmg, error_report = fix_problems(bmg) self.assertEqual(str(error_report).strip(), "") observed = to_dot( bmg, node_types=True, edge_requirements=True, ) # The observations have been converted to the correct types: expected = """ digraph "graph" { N00[label="0.5:P"]; N01[label="0.5:P"]; N02[label="Bernoulli:B"]; N03[label="Sample:B"]; N04[label="Observation False:B"]; N05[label="2.0:N"]; N06[label="2:N"]; N07[label="Binomial:N"]; N08[label="Sample:N"]; N09[label="Observation 5:N"]; N10[label="0.0:Z"]; N11[label="1.0:OH"]; N12[label="0.0:R"]; N13[label="1.0:R+"]; N14[label="Normal:R"]; N15[label="Sample:R"]; N16[label="Observation 1.0:R"]; N01 -> N02[label="probability:P"]; N01 -> N07[label="probability:P"]; N02 -> N03[label="operand:B"]; N03 -> N04[label="operand:any"]; N06 -> N07[label="count:N"]; N07 -> N08[label="operand:N"]; N08 -> N09[label="operand:any"]; N12 -> N14[label="mu:R"]; N13 -> N14[label="sigma:R+"]; N14 -> N15[label="operand:R"]; N15 -> N16[label="operand:any"]; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fix_problems_14(self) -> None: """test_fix_problems_14""" # Fixes for problems involving negative reals. self.maxDiff = None bmg = BMGraphBuilder() # Right now the only node we have of type negative real is # a constant; if we force a scenario where a negative real # constant is used in a context where a real is needed, # we generate a new real constant. m = bmg.add_neg_real(-1.0) s = bmg.add_pos_real(1.0) norm = bmg.add_normal(m, s) bmg.add_sample(norm) bmg, error_report = fix_problems(bmg) self.assertEqual(str(error_report).strip(), "") observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N0[label="-1.0:R-"]; N1[label="-1.0:R"]; N2[label="1.0:R+"]; N3[label="Normal:R"]; N4[label="Sample:R"]; N1 -> N3[label="mu:R"]; N2 -> N3[label="sigma:R+"]; N3 -> N4[label="operand:R"]; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/fix_problems_test.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # BM -> BMG compiler index tests import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor from torch.distributions import Bernoulli, Binomial, HalfCauchy, Normal # Simplexes are tested in dirichlet_test.py # TODO: Test array of Booleans @bm.random_variable def flip(): return Bernoulli(0.5) @bm.random_variable def real(): return Normal(tensor([1.5, -1.5])[flip()], 1.0) @bm.random_variable def pos_real(): return Normal(0.0, tensor([1.5, 2.5])[flip()]) @bm.random_variable def neg_real(): return Bernoulli(tensor([-1.5, -2.5])[flip()].exp()) @bm.random_variable def prob(): return Bernoulli(tensor([0.5, 0.25])[flip()]) @bm.random_variable def natural(): return Binomial(tensor([2, 3])[flip()], 0.75) @bm.random_variable def normal(): return Normal(0.0, 1.0) @bm.random_variable def hc(): return HalfCauchy(0.0) @bm.random_variable def optimize_away_index(): t = tensor([normal(), hc()]) return Normal(t[0], t[1]) @bm.functional def column_index(): t = tensor([[normal(), hc()], [hc(), normal()]]) return t[flip()][flip()] @bm.functional def tuple_index_0(): # Normal tensor, normal tuple index t = tensor([[2.0, 3.0], [4.0, 5.0]]) return flip() * t[(1, 1)] @bm.functional def tuple_index_1(): # Normal tensor, stochastic tuple index t = tensor([[2.0, 3.0], [4.0, 5.0]]) return t[flip(), flip()] @bm.functional def tuple_index_2(): # Stochastic tensor, normal tuple index t = tensor([[normal(), hc()], [hc(), normal()]]) return t[1, 1] @bm.functional def tuple_index_3(): # Stochastic tensor, stochastic tuple index t = tensor([[normal(), hc()], [hc(), normal()]]) return t[flip(), flip()] @bm.functional def negative_constant_index(): # Python allows an index to be negative; it means to start counting from # the other end. BMG does not. Verify that we give an error message. # TODO: Consider allowing this if the index is a constant; we can do # a transformation to t[1] here. t = tensor([hc(), normal()]) return t[-1] @bm.functional def unsupported_slice_1(): t = tensor([hc(), normal()]) return t[1::] @bm.functional def unsupported_slice_2(): t = tensor([1.0, 2.0]) return t[flip() : :] class IndexTest(unittest.TestCase): def test_index_constant_vector_stochastic_index(self) -> None: self.maxDiff = None observed = BMGInference().to_dot( [pos_real(), real(), neg_real(), prob(), natural()], {}, ) expected = """ digraph "graph" { N00[label=0.5]; N01[label=Bernoulli]; N02[label=Sample]; N03[label=0.0]; N04[label="[1.5,2.5]"]; N05[label=1]; N06[label=0]; N07[label=if]; N08[label=index]; N09[label=Normal]; N10[label=Sample]; N11[label=Query]; N12[label="[1.5,-1.5]"]; N13[label=index]; N14[label=1.0]; N15[label=Normal]; N16[label=Sample]; N17[label=Query]; N18[label="[-1.5,-2.5]"]; N19[label=index]; N20[label=Exp]; N21[label=Bernoulli]; N22[label=Sample]; N23[label=Query]; N24[label="[0.5,0.25]"]; N25[label=index]; N26[label=Bernoulli]; N27[label=Sample]; N28[label=Query]; N29[label="[2,3]"]; N30[label=index]; N31[label=0.75]; N32[label=Binomial]; N33[label=Sample]; N34[label=Query]; N00 -> N01; N01 -> N02; N02 -> N07; N03 -> N09; N04 -> N08; N05 -> N07; N06 -> N07; N07 -> N08; N07 -> N13; N07 -> N19; N07 -> N25; N07 -> N30; N08 -> N09; N09 -> N10; N10 -> N11; N12 -> N13; N13 -> N15; N14 -> N15; N15 -> N16; N16 -> N17; N18 -> N19; N19 -> N20; N20 -> N21; N21 -> N22; N22 -> N23; N24 -> N25; N25 -> N26; N26 -> N27; N27 -> N28; N29 -> N30; N30 -> N32; N31 -> N32; N32 -> N33; N33 -> N34; } """ self.assertEqual(expected.strip(), observed.strip()) def test_index_stochastic_tensor_constant_index(self) -> None: self.maxDiff = None # Here we demonstrate that we can make a tensor containing graph # nodes and index into that with a constant; the indexing operation # is optimized out. observed = BMGInference().to_dot([optimize_away_index()], {}) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=0.0]; N5[label=HalfCauchy]; N6[label=Sample]; N7[label=Normal]; N8[label=Sample]; N9[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N7; N4 -> N5; N5 -> N6; N6 -> N7; N7 -> N8; N8 -> N9; } """ self.assertEqual(expected.strip(), observed.strip()) def test_column_index(self) -> None: self.maxDiff = None observed = BMGInference().to_dot([column_index()], {}) expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=0.0]; N05[label=HalfCauchy]; N06[label=Sample]; N07[label=0.5]; N08[label=Bernoulli]; N09[label=Sample]; N10[label=2]; N11[label=ToReal]; N12[label=ToMatrix]; N13[label=1]; N14[label=0]; N15[label=if]; N16[label=ColumnIndex]; N17[label=index]; N18[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N03 -> N12; N03 -> N12; N04 -> N05; N05 -> N06; N06 -> N11; N07 -> N08; N08 -> N09; N09 -> N15; N10 -> N12; N10 -> N12; N11 -> N12; N11 -> N12; N12 -> N16; N13 -> N15; N14 -> N15; N15 -> N16; N15 -> N17; N16 -> N17; N17 -> N18; } """ self.assertEqual(expected.strip(), observed.strip()) def test_tuple_index(self) -> None: self.maxDiff = None # Normal tensor, normal tuple index, so there should be no stochastic # index operation in the graph: observed = BMGInference().to_dot([tuple_index_0()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=5]; N4[label=0]; N5[label=if]; N6[label=Query]; N0 -> N1; N1 -> N2; N2 -> N5; N3 -> N5; N4 -> N5; N5 -> N6; } """ self.assertEqual(expected.strip(), observed.strip()) # Normal tensor, stochastic tuple index: observed = BMGInference().to_dot([tuple_index_1()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label="[[2.0,3.0],\\\\n[4.0,5.0]]"]; N4[label=1]; N5[label=0]; N6[label=if]; N7[label=ColumnIndex]; N8[label=index]; N9[label=Query]; N0 -> N1; N1 -> N2; N2 -> N6; N3 -> N7; N4 -> N6; N5 -> N6; N6 -> N7; N6 -> N8; N7 -> N8; N8 -> N9; } """ self.assertEqual(expected.strip(), observed.strip()) # Stochastic tensor, normal tuple index. Note that in this case # we optimize away the stochastic tensor entirely since the # index is a constant. observed = BMGInference().to_dot([tuple_index_2()], {}) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=0.0]; N5[label=HalfCauchy]; N6[label=Sample]; N7[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N7; N4 -> N5; N5 -> N6; } """ self.assertEqual(expected.strip(), observed.strip()) # Stochastic tensor, stochastic tuple index. observed = BMGInference().to_dot([tuple_index_3()], {}) expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=0.0]; N05[label=HalfCauchy]; N06[label=Sample]; N07[label=0.5]; N08[label=Bernoulli]; N09[label=Sample]; N10[label=2]; N11[label=ToReal]; N12[label=ToMatrix]; N13[label=1]; N14[label=0]; N15[label=if]; N16[label=ColumnIndex]; N17[label=index]; N18[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N03 -> N12; N03 -> N12; N04 -> N05; N05 -> N06; N06 -> N11; N07 -> N08; N08 -> N09; N09 -> N15; N10 -> N12; N10 -> N12; N11 -> N12; N11 -> N12; N12 -> N16; N13 -> N15; N14 -> N15; N15 -> N16; N15 -> N17; N16 -> N17; N17 -> N18; } """ self.assertEqual(expected.strip(), observed.strip()) def test_negative_index(self) -> None: self.maxDiff = None with self.assertRaises(ValueError) as ex: BMGInference().to_dot([negative_constant_index()], {}) self.assertEqual( "The right of an index is required to be a natural but is a negative real.", str(ex.exception), ) def test_unsupported_slice(self) -> None: self.maxDiff = None with self.assertRaises(ValueError) as ex: BMGInference().to_dot([unsupported_slice_1()], {}) self.assertEqual( "Stochastic slices are not yet implemented.", str(ex.exception), ) with self.assertRaises(ValueError) as ex: BMGInference().to_dot([unsupported_slice_2()], {}) self.assertEqual( "Stochastic slices are not yet implemented.", str(ex.exception), )
beanmachine-main
tests/ppl/compiler/index_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """End-to-end test for tutorial on GMM with Poisson number of components""" # This file is a manual replica of the Bento tutorial with the same name # TODO: The disabled test generates the following error: # E TypeError: Distribution 'Poisson' is not supported by Bean Machine Graph. # This will need to be fixed for OSS readiness task import logging import unittest # Comments after imports suggest alternative comment style (for original tutorial) import beanmachine.ppl as bm import torch # from torch import manual_seed, tensor import torch.distributions as dist # from torch.distributions import Bernoulli, Normal, Uniform from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor # This makes the results deterministic and reproducible. logging.getLogger("beanmachine").setLevel(50) torch.manual_seed(42) # Model class GaussianMixtureModel(object): def __init__(self, K): self.K = k @bm.random_variable def alpha(self, k): return dist.Dirichlet(5 * torch.ones(k)) @bm.random_variable def mu(self, c): return dist.MultivariateNormal( loc=torch.zeros(2), covariance_matrix=10.0 * torch.eye(2) ) @bm.random_variable def sigma(self, c): return dist.Gamma(1, 10) @bm.random_variable def component(self, i): alpha = self.alpha(self.K) return dist.Categorical(alpha) @bm.random_variable def y(self, i): c = self.component(i).item() return dist.MultivariateNormal( loc=self.mu(c), covariance_matrix=self.sigma(c) ** 2 * torch.eye(2) + 1e-3 ) # Creating sample data n = 32 # num observations k = 4 # true number of clusters gmm = GaussianMixtureModel(K=4) ground_truth = { **{ gmm.alpha(k): torch.ones(k) * 1.0 / k, }, **{gmm.mu(i): tensor(i % 2).float() for i in range(k)}, **{gmm.sigma(i): tensor(0.1) for i in range(k)}, **{gmm.component(i): tensor(i % k).float() for i in range(n)}, } # [Visualization code in tutorial skipped] # Inference parameters num_samples = ( 1 ###00 Sample size should not affect (the ability to find) compilation issues. ) queries = ( [gmm.alpha(gmm.K)] + [gmm.component(j) for j in range(n)] + [gmm.mu(i) for i in range(k)] + [gmm.sigma(i) for i in range(k)] ) observations = { gmm.y(i): ground_truth[gmm.mu(ground_truth[gmm.component(i)].item())] for i in range(n) } class tutorialGMMwith2DimensionsAnd4Components(unittest.TestCase): def test_tutorial_GMM_with_2_dimensions_and_4_components(self) -> None: """Check BM and BMG inference both terminate""" self.maxDiff = None # Inference with BM torch.manual_seed( 42 ) # Note: Second time we seed. Could be a good tutorial style mh = bm.CompositionalInference({...: bm.SingleSiteNewtonianMonteCarlo()}) mh.infer( queries, observations, num_samples=num_samples, num_chains=1, ) self.assertTrue(True, msg="We just want to check this point is reached") @unittest.skip("TODO: enable when passing") def test_tutorial_GMM_with_2_dimensions_and_4_components_to_dot_cpp_python( self, ) -> None: self.maxDiff = None observed = BMGInference().to_dot(queries, observations) expected = """ """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_cpp(queries, observations) expected = """ """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_python(queries, observations) expected = """ """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/tutorial_GMM_with_2_dimensions_and_4_components_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Compare original and conjugate prior transformed Beta-Bernoulli model""" import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor from torch.distributions import Bernoulli, Beta class HeadsRateModel(object): """Original, untransformed model""" @bm.random_variable def theta(self): return Beta(2.0, 2.0) @bm.random_variable def y(self, i): return Bernoulli(self.theta()) def run(self): queries = [self.theta()] observations = { self.y(0): tensor(0.0), self.y(1): tensor(0.0), self.y(2): tensor(1.0), self.y(3): tensor(0.0), } num_samples = 1000 bmg = BMGInference() skip_optimizations = set() posterior = bmg.infer( queries, observations, num_samples, 1, skip_optimizations=skip_optimizations ) bmg_graph = bmg.to_dot( queries, observations, num_samples, skip_optimizations=skip_optimizations ) theta_samples = posterior[self.theta()][0] return theta_samples, bmg_graph class HeadsRateModelTransformed(object): """Conjugate Prior Transformed model""" @bm.random_variable def theta(self): return Beta(2.0, 2.0) @bm.random_variable def y(self, i): return Bernoulli(self.theta()) @bm.random_variable def theta_transformed(self): # Analytical posterior Beta(alpha + sum y_i, beta + n - sum y_i) return Beta(2.0 + 1.0, 2.0 + (4.0 - 1.0)) def run(self): # queries = [self.theta()] queries_transformed = [self.theta_transformed()] # observations = { # self.y(0): tensor(0.0), # self.y(1): tensor(0.0), # self.y(2): tensor(1.0), # self.y(3): tensor(0.0), # } observations_transformed = {} num_samples = 1000 bmg = BMGInference() # posterior = bmg.infer(queries, observations, num_samples) posterior_transformed = bmg.infer( queries_transformed, observations_transformed, num_samples ) # theta_samples = posterior[self.theta](0) bmg_graph = bmg.to_dot(queries_transformed, observations_transformed) theta_samples_transformed = posterior_transformed[self.theta_transformed()][0] return theta_samples_transformed, bmg_graph class HeadsRateModelTest(unittest.TestCase): def test_beta_bernoulli_conjugate_graph(self) -> None: _, heads_rate_model_graph = HeadsRateModel().run() _, heads_rate_model_transformed_graph = HeadsRateModelTransformed().run() self.assertEqual(heads_rate_model_graph, heads_rate_model_transformed_graph)
beanmachine-main
tests/ppl/compiler/fix_beta_bernoulli_basic_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import platform import re import unittest import beanmachine.graph as graph import beanmachine.ppl as bm import beanmachine.ppl.compiler.performance_report as pr from beanmachine.ppl.inference import BMGInference from torch import tensor from torch.distributions import Bernoulli, Beta @bm.random_variable def coin(): return Beta(2.0, 2.0) @bm.random_variable def flip(): return Bernoulli(coin()) def tidy(s): s = re.sub(r"generated_at:.*\n", "generated_at: --\n", s) s = re.sub(r"\d+ ms", "-- ms", s) s = re.sub(r"\(\d+\)", "(--)", s) return s class PerfReportTest(unittest.TestCase): def test_bmg_performance_report_1(self) -> None: if platform.system() == "Windows": self.skipTest("Disabling perf tests until flakiness is resolved") # How to obtain the performance report from BMGInference self.maxDiff = None queries = [coin()] observations = {flip(): tensor(1.0)} num_samples = 1000 # We have an _infer method which returns both samples and a # performance report. _, report = BMGInference()._infer(queries, observations, num_samples) # You can convert the report to a string: observed = str(report) expected = """ title: Bean Machine Graph performance report generated_at: -- num_samples: 1000 algorithm: 3 seed: 5123401 node_count: 5 edge_count: 5 factor_count: 0 dist_count: 2 const_count: 1 op_count: 2 add_count: 0 det_supp_count: [0] bmg_profiler_report: nmc_infer:(1) -- ms initialize:(1) -- ms collect_samples:(1) -- ms """ # Note that there are two profiler reports: one for time spent # in the compiler and one for time spent in BMG inference. # # See next test for details of how to access the elements of the # perf report and the profile reports self.assertTrue(tidy(observed).strip().startswith(tidy(expected).strip())) def test_bmg_performance_report_2(self) -> None: if platform.system() == "Windows": self.skipTest("Disabling perf tests until flakiness is resolved") # How to use the performance reporter calling BMG directly # rather than through BMGInference / BMGraphBuilder. self.maxDiff = None g = graph.Graph() # Turn on data collection g.collect_performance_data(True) # Build a simple model: # # BETA(2, 2) --> SAMPLE --> BERNOULLI --> SAMPLE --> observe False # n0 = g.add_constant_pos_real(2.0) n1 = g.add_distribution( graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [n0, n0] ) n2 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) n3 = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [n2] ) n4 = g.add_operator(graph.OperatorType.SAMPLE, [n3]) g.observe(n4, False) g.query(n2) # Run inference num_samples = 1000 g.infer(num_samples, graph.InferenceType.NMC) # Fetch raw perf data (JSON string) js = g.performance_report() # decode perf_report = pr.json_to_perf_report(js) # You can dump the entire report as a string. Notice that this # version of the report does not include beanstalk compiler timings # because of course we did not run the compiler in this test. observed = str(perf_report) expected = """ title: Bean Machine Graph performance report generated_at: -- num_samples: 1000 algorithm: 3 seed: 5123401 node_count: 5 edge_count: 5 factor_count: 0 dist_count: 2 const_count: 1 op_count: 2 add_count: 0 det_supp_count: [0] bmg_profiler_report: nmc_infer:(1) -- ms initialize:(1) -- ms collect_samples:(1) -- ms step:(1000) -- ms create_prop:(2000) -- ms compute_grads:(--) -- ms unattributed: -- ms sample:(1000) -- ms save_old:(1000) -- ms eval:(1000) -- ms clear_grads:(1000) -- ms restore_old:(7) -- ms unattributed: -- ms collect_sample:(1000) -- ms unattributed: -- ms unattributed: -- ms Total time: -- ms """ self.assertEqual(tidy(expected).strip(), tidy(observed).strip()) # Or you can look at each element programmatically: self.assertEqual("Bean Machine Graph performance report", perf_report.title) self.assertEqual(3, perf_report.algorithm) self.assertEqual(num_samples, perf_report.num_samples) self.assertEqual(5, perf_report.node_count) self.assertEqual(2, perf_report.dist_count) self.assertEqual(1, perf_report.const_count) self.assertEqual(0, perf_report.factor_count) self.assertEqual(2, perf_report.op_count) self.assertEqual(0, perf_report.add_count) self.assertEqual(5, perf_report.edge_count) # You can also look at profiler elements programmatically. # # Ex: how much time do we spend initializing the inference algorithm # data structures? prof_report = perf_report.bmg_profiler_report self.assertLess(0, prof_report.nmc_infer.total_time) self.assertLess(0, prof_report.nmc_infer.initialize.total_time) # How many times did we do a step? self.assertEqual(1000, prof_report.nmc_infer.collect_samples.step.calls) # Or you can dump just the profiler report as a string. observed = str(prof_report) expected = """ nmc_infer:(1) -- ms initialize:(1) -- ms collect_samples:(1) -- ms step:(1000) -- ms create_prop:(2000) -- ms compute_grads:(--) -- ms unattributed: -- ms sample:(1000) -- ms save_old:(1000) -- ms eval:(1000) -- ms clear_grads:(1000) -- ms restore_old:(7) -- ms unattributed: -- ms collect_sample:(1000) -- ms unattributed: -- ms unattributed: -- ms Total time: -- ms """ self.assertEqual(tidy(expected).strip(), tidy(observed).strip())
beanmachine-main
tests/ppl/compiler/perf_report_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """End-to-end test of realistic coin flip model""" import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor from torch.distributions import Bernoulli, Beta @bm.random_variable def beta(): return Beta(2.0, 2.0) @bm.random_variable def flip(n): return Bernoulli(beta()) class CoinFlipTest(unittest.TestCase): def test_coin_flip_inference(self) -> None: """test_inference from coin_flip_test.py""" # We've got a prior on the coin of Beta(2,2), so it is most # likely to be actually fair, but still with some probability # of being unfair in either direction. # # We flip the coin four times and get heads 25% of the time, # so this is some evidence that the true fairness of the coin is # closer to 25% than 50%. # # We sample 1000 times from the posterior and take the average; # it should come out that the true fairness is now most likely # to be around 37%. self.maxDiff = None queries = [beta()] observations = { flip(0): tensor(0.0), flip(1): tensor(0.0), flip(2): tensor(1.0), flip(3): tensor(0.0), } num_samples = 1000 inference = BMGInference() mcsamples = inference.infer(queries, observations, num_samples) samples = mcsamples[beta()] observed = samples.mean() expected = 0.37 self.assertAlmostEqual(first=observed, second=expected, delta=0.05) def test_coin_flip_to_dot_cpp_python(self) -> None: self.maxDiff = None queries = [beta()] observations = { flip(0): tensor(0.0), flip(1): tensor(0.0), flip(2): tensor(1.0), flip(3): tensor(0.0), } observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N00[label=2.0]; N01[label=Beta]; N02[label=Sample]; N03[label=Bernoulli]; N04[label=Sample]; N05[label="Observation False"]; N06[label=Sample]; N07[label="Observation False"]; N08[label=Sample]; N09[label="Observation True"]; N10[label=Sample]; N11[label="Observation False"]; N12[label=Query]; N00 -> N01; N00 -> N01; N01 -> N02; N02 -> N03; N02 -> N12; N03 -> N04; N03 -> N06; N03 -> N08; N03 -> N10; N04 -> N05; N06 -> N07; N08 -> N09; N10 -> N11; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_cpp(queries, observations) expected = """ graph::Graph g; uint n0 = g.add_constant_pos_real(2.0); uint n1 = g.add_distribution( graph::DistributionType::BETA, graph::AtomicType::PROBABILITY, std::vector<uint>({n0, n0})); uint n2 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n3 = g.add_distribution( graph::DistributionType::BERNOULLI, graph::AtomicType::BOOLEAN, std::vector<uint>({n2})); uint n4 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n3})); g.observe(n4, false); uint n5 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n3})); g.observe(n5, false); uint n6 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n3})); g.observe(n6, true); uint n7 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n3})); g.observe(n7, false); uint q0 = g.query(n2);""" self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_python(queries, observations) expected = """ from beanmachine import graph from torch import tensor g = graph.Graph() n0 = g.add_constant_pos_real(2.0) n1 = g.add_distribution( graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [n0, n0], ) n2 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) n3 = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [n2], ) n4 = g.add_operator(graph.OperatorType.SAMPLE, [n3]) g.observe(n4, False) n5 = g.add_operator(graph.OperatorType.SAMPLE, [n3]) g.observe(n5, False) n6 = g.add_operator(graph.OperatorType.SAMPLE, [n3]) g.observe(n6, True) n7 = g.add_operator(graph.OperatorType.SAMPLE, [n3]) g.observe(n7, False) q0 = g.query(n2)""" self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/coin_flip_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference.bmg_inference import BMGInference from torch.distributions import HalfCauchy, Normal @bm.random_variable def hc(n): return HalfCauchy(1) @bm.random_variable def normal(): return Normal(0, 1) @bm.functional def logprob(): # Demonstrate that we can apply operators other than # sample to stochastic distributions. normal_sample = normal() # Sample normal_dist_1 = Normal(0, hc(1)) normal_dist_2 = Normal(0, hc(2)) # "instance receiver" form weight_1 = normal_dist_1.log_prob(normal_sample) # "static receiver" form weight_2 = Normal.log_prob(normal_dist_2, normal_sample) # Non-stochastic distribution, stochastic value weight_3 = Normal(2, 3).log_prob(normal_sample) return weight_1 + weight_2 + weight_3 class LogProbTest(unittest.TestCase): def test_logprob(self) -> None: self.maxDiff = None queries = [logprob()] observed = BMGInference().to_dot(queries, {}) expected = """ digraph "graph" { N00[label=1.0]; N01[label=HalfCauchy]; N02[label=Sample]; N03[label=0.0]; N04[label=Normal]; N05[label=Sample]; N06[label=Sample]; N07[label=Normal]; N08[label=LogProb]; N09[label=Normal]; N10[label=LogProb]; N11[label=2.0]; N12[label=3.0]; N13[label=Normal]; N14[label=LogProb]; N15[label="+"]; N16[label=Query]; N00 -> N01; N00 -> N04; N01 -> N02; N01 -> N06; N02 -> N07; N03 -> N04; N03 -> N07; N03 -> N09; N04 -> N05; N05 -> N08; N05 -> N10; N05 -> N14; N06 -> N09; N07 -> N08; N08 -> N15; N09 -> N10; N10 -> N15; N11 -> N13; N12 -> N13; N13 -> N14; N14 -> N15; N15 -> N16; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/log_prob_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Tests for poisson distribution""" import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference import BMGInference from torch import tensor from torch.distributions import Gamma, Poisson @bm.random_variable def poisson_1(): return Poisson(rate=0.5) @bm.random_variable def gamma_1(): return Gamma(1.0, 4.0) @bm.random_variable def poisson_2(): return Poisson(rate=gamma_1()) @bm.random_variable def poisson_3(): return Poisson(rate=-1 * gamma_1()) @bm.random_variable def poisson_4(): return Poisson(rate=tensor([1.0, 2.0])) class distributionPoissonTest(unittest.TestCase): def test_graphs_poisson_with_constant_rate(self) -> None: self.maxDiff = None queries = [poisson_1()] observations = {} observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Poisson]; N2[label=Sample]; N3[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; } """ self.assertEqual(expected.strip(), observed.strip()) observed_cpp = BMGInference().to_cpp(queries, observations) expected_cpp = """ graph::Graph g; uint n0 = g.add_constant_pos_real(0.5); uint n1 = g.add_distribution( graph::DistributionType::POISSON, graph::AtomicType::NATURAL, std::vector<uint>({n0})); uint n2 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint q0 = g.query(n2); """ self.assertEqual(expected_cpp.strip(), observed_cpp.strip()) observed_python = BMGInference().to_python(queries, observations) expected_python = """ from beanmachine import graph from torch import tensor g = graph.Graph() n0 = g.add_constant_pos_real(0.5) n1 = g.add_distribution( graph.DistributionType.POISSON, graph.AtomicType.NATURAL, [n0], ) n2 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) q0 = g.query(n2) """ self.assertEqual(expected_python.strip(), observed_python.strip()) def test_poisson_rate_with_sample_from_distribution(self) -> None: self.maxDiff = None queries = [poisson_2()] observations = {} observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N0[label=1.0]; N1[label=4.0]; N2[label=Gamma]; N3[label=Sample]; N4[label=Poisson]; N5[label=Sample]; N6[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N4; N4 -> N5; N5 -> N6; } """ self.assertEqual(expected.strip(), observed.strip()) def test_poisson_tensor_input(self) -> None: self.maxDiff = None queries = [poisson_4()] observations = {} observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N0[label=1.0]; N1[label=Poisson]; N2[label=Sample]; N3[label=2.0]; N4[label=Poisson]; N5[label=Sample]; N6[label=2]; N7[label=1]; N8[label=ToMatrix]; N9[label=Query]; N0 -> N1; N1 -> N2; N2 -> N8; N3 -> N4; N4 -> N5; N5 -> N8; N6 -> N8; N7 -> N8; N8 -> N9; } """ self.assertEqual(expected.strip(), observed.strip()) def test_poisson_rate_error_reporting(self) -> None: self.maxDiff = None queries = [poisson_3()] observations = {} with self.assertRaises(ValueError) as ex: BMGInference().to_dot(queries, observations) self.assertEqual( str(ex.exception), "The rate of a Poisson is required to be a positive real but is a negative real.\n" "The Poisson was created in function call poisson_3().", msg="Poisson distribution with non-positive real rates should throw an exception.", )
beanmachine-main
tests/ppl/compiler/distribution_poisson_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """A basic unit test for the Python interface of the BMG C++ Graph.infer method""" import unittest import beanmachine.ppl as bm from beanmachine.graph import InferenceType from beanmachine.ppl.inference import BMGInference from torch import tensor from torch.distributions import Bernoulli, Dirichlet @bm.functional def c(): return tensor(2.5) @bm.functional def c2(): return tensor([1.5, -2.5]) @bm.random_variable def flip(): return Bernoulli(0.5) @bm.functional def flip2(): return flip() @bm.functional def flip3(): return flip() + 0 @bm.functional def flip4(): return 0 + flip() @bm.functional def always_false_1(): return 1 < flip() @bm.functional def always_false_2(): return flip() < 0 @bm.functional def invalid_tensor_1(): return tensor([]) @bm.functional def invalid_tensor_2(): return tensor([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]]) class BMGInferInterfaceTest(unittest.TestCase): def test_infer_interface_constant_functional(self) -> None: self.maxDiff = None # First, let's check expected behavior from a regular BM inference method samples = bm.SingleSiteNewtonianMonteCarlo().infer([c(), c2()], {}, 1, 1) observed = samples[c()] expected = "tensor([[2.5000]])" self.assertEqual(expected.strip(), str(observed).strip()) observed = samples[c2()] expected = "tensor([[[ 1.5000, -2.5000]]])" # Note, no ", dtype=torch.float64)" self.assertEqual(expected.strip(), str(observed).strip()) # Now let's do this in BMG Inference samples = BMGInference().infer([c(), c2()], {}, 1, 1) observed = samples[c()] expected = "tensor([[2.5000]])" self.assertEqual(expected.strip(), str(observed).strip()) observed = samples[c2()] expected = "tensor([[[ 1.5000, -2.5000]]], dtype=torch.float64)" self.assertEqual(expected.strip(), str(observed).strip()) # Again, let's check expected behavior from a regular BM inference method samples = bm.SingleSiteNewtonianMonteCarlo().infer([c(), c2()], {}, 1, 2) observed = samples[c()] expected = """ tensor([[2.5000], [2.5000]])""" self.assertEqual(expected.strip(), str(observed).strip()) observed = samples[c2()] expected = """ tensor([[[ 1.5000, -2.5000]], [[ 1.5000, -2.5000]]])""" # Note, no ", dtype=torch.float64)" self.assertEqual(expected.strip(), str(observed).strip()) # And again, in BMG inference samples = BMGInference().infer([c(), c2()], {}, 1, 2) observed = samples[c()] expected = """ tensor([[2.5000], [2.5000]])""" self.assertEqual(expected.strip(), str(observed).strip()) observed = samples[c2()] expected = """ tensor([[[ 1.5000, -2.5000]], [[ 1.5000, -2.5000]]], dtype=torch.float64)""" self.assertEqual(expected.strip(), str(observed).strip()) def test_infer_interface_redundant_functionals_1(self) -> None: self.maxDiff = None samples = BMGInference().infer([flip(), flip2()], {}, 10) f = samples[flip()] f2 = samples[flip2()] self.assertEqual(str(f), str(f2)) samples = BMGInference().infer([always_false_1(), always_false_2()], {}, 2, 1) af1 = samples[always_false_1()] af2 = samples[always_false_2()] expected = "tensor([[False, False]])" self.assertEqual(expected, str(af1)) self.assertEqual(expected, str(af2)) def test_infer_interface_redundant_functionals_2(self) -> None: self.maxDiff = None samples = BMGInference().infer([flip3(), flip4()], {}, 10) f3 = samples[flip3()] f4 = samples[flip4()] self.assertEqual(str(f3), str(f4)) def test_infer_interface_burn_in(self) -> None: # Check default case when num_adaptive_samples = 0 num_samples = 25 num_adaptive_samples = 0 samples = BMGInference().infer([c(), c2()], {}, num_samples, 1) observed = len(samples[c()][0]) expected = num_samples self.assertEqual(expected, observed) # Check case when num_adaptive_samples = 10 num_samples = 25 num_adaptive_samples = 10 samples = BMGInference().infer( [c(), c2()], {}, num_samples, 1, num_adaptive_samples=num_adaptive_samples ) observed = len(samples[c()][0]) expected = num_samples self.assertEqual(expected, observed) def test_infer_interface_nuts(self) -> None: # Check default case when num_adaptive_samples = 0 num_samples = 25 num_adaptive_samples = 0 samples = BMGInference().infer( [c(), c2()], {}, num_samples, 1, inference_type=InferenceType.NUTS ) observed = len(samples[c()][0]) expected = num_samples self.assertEqual(expected, observed) # Check case when num_adaptive_samples = 10 num_samples = 25 num_adaptive_samples = 10 samples = BMGInference().infer( [c(), c2()], {}, num_samples, 1, num_adaptive_samples=num_adaptive_samples, inference_type=InferenceType.NUTS, ) observed = len(samples[c()][0]) expected = num_samples self.assertEqual(expected, observed) class SampleModel: @bm.random_variable def a(self): return Dirichlet(tensor([0.5, 0.5])) @bm.functional def b(self): return self.a()[2] ## The index 2 is intentionally out of bounds def test_infer_interface_runtime_error(self) -> None: model = self.SampleModel() with self.assertRaisesRegex(RuntimeError, "Error during BMG inference.*"): BMGInference().infer([model.a(), model.b()], {}, 10, 4)
beanmachine-main
tests/ppl/compiler/bmg_infer_interface_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """End-to-end test for an example use of the normal distribution""" import logging import unittest import beanmachine.ppl as bm import torch # from torch import manual_seed, tensor import torch.distributions as dist # from torch.distributions import Bernoulli, Normal, Uniform from beanmachine.ppl.inference.bmg_inference import BMGInference # TODO: Check imports for consistency # This makes the results deterministic and reproducible. logging.getLogger("beanmachine").setLevel(50) torch.manual_seed(12) # Model @bm.random_variable def x(): """ A random variable drawn from a half normal distribution """ return dist.HalfNormal(1000) num_samples = ( 2 ###000 - Sample size reduced since it should not affect compilation issues ) num_chains = 4 observations = {} ### This means we will just get the distribution as declared queries = [x()] class distributionHalfNormalTest(unittest.TestCase): def test_distribution_half_normal_e2e(self) -> None: """Check BM and BMG inference both terminate""" self.maxDiff = None # Inference with BM # Note: No explicit seed here (in original tutorial model). Should we add one? amh = bm.SingleSiteAncestralMetropolisHastings() # Added local binding bm_samples = amh.infer( queries=queries, observations=observations, num_samples=num_samples, num_chains=num_chains, ) self.assertTrue( bm_samples.get_num_samples() == num_samples, msg="Got wrong number of samples back from BM inference", ) # Inference with BMG bmg_samples = BMGInference().infer( queries=queries, observations=observations, num_samples=num_samples, num_chains=1, # TODO[Walid]: 1 should be replaced by num_chains ) self.assertTrue( bmg_samples.get_num_samples() == num_samples, msg="Got wrong number of samples back from BMG inference", ) def test_distribution_half_normal_to_dot_cpp_python( self, ) -> None: self.maxDiff = None observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N0[label=1000.0]; N1[label=HalfNormal]; N2[label=Sample]; N3[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_cpp(queries, observations) expected = """ graph::Graph g; uint n0 = g.add_constant_pos_real(1000.0); uint n1 = g.add_distribution( graph::DistributionType::HALF_NORMAL, graph::AtomicType::POS_REAL, std::vector<uint>({n0})); uint n2 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint q0 = g.query(n2); """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_python(queries, observations) expected = """ from beanmachine import graph from torch import tensor g = graph.Graph() n0 = g.add_constant_pos_real(1000.0) n1 = g.add_distribution( graph.DistributionType.HALF_NORMAL, graph.AtomicType.POS_REAL, [n0], ) n2 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) q0 = g.query(n2) """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/distribution_half_normal_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """End-to-end test for an example use of the normal distribution""" import logging import unittest import beanmachine.ppl as bm import torch # from torch import manual_seed, tensor import torch.distributions as dist # from torch.distributions import Bernoulli, Normal, Uniform from beanmachine.ppl.inference.bmg_inference import BMGInference # TODO: Check imports for consistency # This makes the results deterministic and reproducible. logging.getLogger("beanmachine").setLevel(50) torch.manual_seed(12) # Model @bm.random_variable def x(): """ A random variable drawn from a normal (Gaussian) distribution """ return dist.Normal(0, 1000) num_samples = ( 2 ###000 - Sample size reduced since it should not affect compilation issues ) num_chains = 4 observations = {} ### This means we will just get the distribution as declared queries = [x()] class distributionNormalTest(unittest.TestCase): def test_distribution_normal_e2e(self) -> None: """Check BM and BMG inference both terminate""" self.maxDiff = None # Inference with BM # Note: No explicit seed here (in original tutorial model). Should we add one? amh = bm.SingleSiteAncestralMetropolisHastings() # Added local binding bm_samples = amh.infer( queries=queries, observations=observations, num_samples=num_samples, num_chains=num_chains, ) self.assertTrue( bm_samples.get_num_samples() == num_samples, msg="Got wrong number of samples back from BM inference", ) # Inference with BMG bmg_samples = BMGInference().infer( queries=queries, observations=observations, num_samples=num_samples, num_chains=1, # TODO[Walid]: 1 should be num_chains ) self.assertTrue( bmg_samples.get_num_samples() == num_samples, msg="Got wrong number of samples back from BMG inference", ) def test_distribution_normal_to_dot_cpp_python( self, ) -> None: self.maxDiff = None observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1000.0]; N2[label=Normal]; N3[label=Sample]; N4[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N4; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_cpp(queries, observations) expected = """ graph::Graph g; uint n0 = g.add_constant_real(0.0); uint n1 = g.add_constant_pos_real(1000.0); uint n2 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n0, n1})); uint n3 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n2})); uint q0 = g.query(n3); """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_python(queries, observations) expected = """ from beanmachine import graph from torch import tensor g = graph.Graph() n0 = g.add_constant_real(0.0) n1 = g.add_constant_pos_real(1000.0) n2 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n0, n1], ) n3 = g.add_operator(graph.OperatorType.SAMPLE, [n2]) q0 = g.query(n3) """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/distribution_normal_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Tests for ast_tools.py""" import ast import unittest import beanmachine.ppl.compiler.ast_tools as ast_tools class ASTToolsTest(unittest.TestCase): def disabled_test_ast_tools_print_tree(self) -> None: # PYTHON VERSIONING ISSUE # TODO: This test is disabled because it has different output on # different versions of Python. Enable the test once we have sorted # out what our required version of Python is. """test_ast_tools_print_tree""" node = ast.parse("2 + 3") observed = ast_tools.print_tree(node, False) expected = """ Module +-list +-Expr +-BinOp +-Num | +-2 +-Add +-Num +-3 """ self.maxDiff = None self.assertEqual(observed.strip(), expected.strip()) def disabled_test_ast_tools_print_graph(self) -> None: """test_ast_tools_print_graph""" # PYTHON VERSIONING ISSUE # TODO: This test is disabled because it has different output on # different versions of Python. Enable the test once we have sorted # out what our required version of Python is. node = ast.parse("2 + 3") observed = ast_tools.print_graph(node) expected = """ digraph "graph" { N0[label=Module]; N1[label=list]; N2[label=Expr]; N3[label=BinOp]; N4[label=Num]; N5[label=Add]; N6[label=Num]; N7[label=3]; N8[label=2]; N0 -> N1[label=body]; N1 -> N2[label=0]; N2 -> N3[label=value]; N3 -> N4[label=left]; N3 -> N5[label=op]; N3 -> N6[label=right]; N4 -> N8[label=n]; N6 -> N7[label=n]; }""" self.maxDiff = None self.assertEqual(observed.strip(), expected.strip()) def disabled_test_ast_tools_print_python(self) -> None: """test_ast_tools_print_python""" # PYTHON VERSIONING ISSUE # TODO: This test is disabled because it has different output on # different versions of Python. Enable the test once we have sorted # out what our required version of Python is. node = ast.parse("x = f(2 + 3)") observed = ast_tools.print_python(node) expected = """ Module( body=[ Assign( targets=[Name(id="x", ctx=Store())], value=Call( func=Name(id="f", ctx=Load()), args=[BinOp(left=Num(n=2), op=Add(), right=Num(n=3))], keywords=[], ), ) ] ) """ self.maxDiff = None self.assertEqual(observed.strip(), expected.strip())
beanmachine-main
tests/ppl/compiler/ast_tools_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """End-to-end test for 1D GMM with K > 2 number of components""" import logging import unittest # Comments after imports suggest alternative comment style (for original tutorial) import beanmachine.ppl as bm import torch # from torch import manual_seed, tensor import torch.distributions as dist # from torch.distributions import Bernoulli, Normal, Uniform from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor # This makes the results deterministic and reproducible. logging.getLogger("beanmachine").setLevel(50) torch.manual_seed(42) # Model class GaussianMixtureModel(object): def __init__(self, k): self.K = k @bm.random_variable def alpha(self, k): return dist.Dirichlet(5 * torch.ones(k)) @bm.random_variable def mu(self, c): return dist.Normal(0, 10) @bm.random_variable def sigma(self, c): return dist.Gamma(1, 10) @bm.random_variable def component(self, i): alpha = self.alpha(self.K) return dist.Categorical(alpha) @bm.random_variable def y(self, i): c = self.component(i) return dist.Normal(self.mu(c), self.sigma(c)) # Creating sample data n = 6 # num observations k = 4 # true number of clusters gmm = GaussianMixtureModel(k=k) ground_truth = { **{ gmm.alpha(k): torch.ones(k) * 1.0 / k, }, **{gmm.mu(i): tensor(i % 2).float() for i in range(k)}, **{gmm.sigma(i): tensor(0.1) for i in range(k)}, **{gmm.component(i): tensor(i % k).float() for i in range(n)}, } # [Visualization code in tutorial skipped] # Inference parameters num_samples = ( 1 ###00 Sample size should not affect (the ability to find) compilation issues. ) queries = ( [gmm.alpha(gmm.K)] + [gmm.component(j) for j in range(n)] + [gmm.mu(i) for i in range(k)] + [gmm.sigma(i) for i in range(k)] ) observations = { gmm.y(i): ground_truth[gmm.mu(ground_truth[gmm.component(i)].item())] for i in range(n) } class tutorialGMMwith1DimensionsAnd4Components(unittest.TestCase): def test_tutorial_GMM_with_1_dimensions_and_4_components(self) -> None: """Check BM and BMG inference both terminate""" self.maxDiff = None # Inference with BM torch.manual_seed( 42 ) # Note: Second time we seed. Could be a good tutorial style mh = bm.CompositionalInference({...: bm.SingleSiteNewtonianMonteCarlo()}) mh.infer( queries, observations, num_samples=num_samples, num_chains=1, ) self.assertTrue(True, msg="We just want to check this point is reached") def test_tutorial_GMM_with_1_dimensions_and_4_components_to_dot_cpp_python( self, ) -> None: self.maxDiff = None observed = BMGInference().to_dot(queries, observations) expected = """digraph "graph" { N00[label="[5.0,5.0,5.0,5.0]"]; N01[label=Dirichlet]; N02[label=Sample]; N03[label=Categorical]; N04[label=Sample]; N05[label=0.0]; N06[label=10.0]; N07[label=Normal]; N08[label=Sample]; N09[label=Sample]; N10[label=Sample]; N11[label=Sample]; N12[label=1.0]; N13[label=Gamma]; N14[label=Sample]; N15[label=Sample]; N16[label=Sample]; N17[label=Sample]; N18[label=Choice]; N19[label=Choice]; N20[label=Normal]; N21[label=Sample]; N22[label="Observation 0.0"]; N23[label=Sample]; N24[label=Choice]; N25[label=Choice]; N26[label=Normal]; N27[label=Sample]; N28[label="Observation 1.0"]; N29[label=Sample]; N30[label=Choice]; N31[label=Choice]; N32[label=Normal]; N33[label=Sample]; N34[label="Observation 0.0"]; N35[label=Sample]; N36[label=Choice]; N37[label=Choice]; N38[label=Normal]; N39[label=Sample]; N40[label="Observation 1.0"]; N41[label=Sample]; N42[label=Choice]; N43[label=Choice]; N44[label=Normal]; N45[label=Sample]; N46[label="Observation 0.0"]; N47[label=Sample]; N48[label=Choice]; N49[label=Choice]; N50[label=Normal]; N51[label=Sample]; N52[label="Observation 1.0"]; N53[label=Query]; N54[label=Query]; N55[label=Query]; N56[label=Query]; N57[label=Query]; N58[label=Query]; N59[label=Query]; N60[label=Query]; N61[label=Query]; N62[label=Query]; N63[label=Query]; N64[label=Query]; N65[label=Query]; N66[label=Query]; N67[label=Query]; N00 -> N01; N01 -> N02; N02 -> N03; N02 -> N53; N03 -> N04; N03 -> N23; N03 -> N29; N03 -> N35; N03 -> N41; N03 -> N47; N04 -> N18; N04 -> N19; N04 -> N54; N05 -> N07; N06 -> N07; N06 -> N13; N07 -> N08; N07 -> N09; N07 -> N10; N07 -> N11; N08 -> N18; N08 -> N24; N08 -> N30; N08 -> N36; N08 -> N42; N08 -> N48; N08 -> N60; N09 -> N18; N09 -> N24; N09 -> N30; N09 -> N36; N09 -> N42; N09 -> N48; N09 -> N61; N10 -> N18; N10 -> N24; N10 -> N30; N10 -> N36; N10 -> N42; N10 -> N48; N10 -> N62; N11 -> N18; N11 -> N24; N11 -> N30; N11 -> N36; N11 -> N42; N11 -> N48; N11 -> N63; N12 -> N13; N13 -> N14; N13 -> N15; N13 -> N16; N13 -> N17; N14 -> N19; N14 -> N25; N14 -> N31; N14 -> N37; N14 -> N43; N14 -> N49; N14 -> N64; N15 -> N19; N15 -> N25; N15 -> N31; N15 -> N37; N15 -> N43; N15 -> N49; N15 -> N65; N16 -> N19; N16 -> N25; N16 -> N31; N16 -> N37; N16 -> N43; N16 -> N49; N16 -> N66; N17 -> N19; N17 -> N25; N17 -> N31; N17 -> N37; N17 -> N43; N17 -> N49; N17 -> N67; N18 -> N20; N19 -> N20; N20 -> N21; N21 -> N22; N23 -> N24; N23 -> N25; N23 -> N55; N24 -> N26; N25 -> N26; N26 -> N27; N27 -> N28; N29 -> N30; N29 -> N31; N29 -> N56; N30 -> N32; N31 -> N32; N32 -> N33; N33 -> N34; N35 -> N36; N35 -> N37; N35 -> N57; N36 -> N38; N37 -> N38; N38 -> N39; N39 -> N40; N41 -> N42; N41 -> N43; N41 -> N58; N42 -> N44; N43 -> N44; N44 -> N45; N45 -> N46; N47 -> N48; N47 -> N49; N47 -> N59; N48 -> N50; N49 -> N50; N50 -> N51; N51 -> N52; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_cpp(queries, observations) expected = """graph::Graph g; Eigen::MatrixXd m0(4, 1); m0 << 5.0, 5.0, 5.0, 5.0; uint n0 = g.add_constant_pos_matrix(m0); uint n1 = g.add_distribution( graph::DistributionType::DIRICHLET, graph::ValueType( graph::VariableType::COL_SIMPLEX_MATRIX, graph::AtomicType::PROBABILITY, 4, 1 ), std::vector<uint>({n0})); uint n2 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n3 = g.add_distribution( graph::DistributionType::CATEGORICAL, graph::AtomicType::NATURAL, std::vector<uint>({n2})); uint n4 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n3})); uint n5 = g.add_constant_real(0.0); uint n6 = g.add_constant_pos_real(10.0); uint n7 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n5, n6})); uint n8 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n7})); uint n9 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n7})); uint n10 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n7})); uint n11 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n7})); uint n12 = g.add_constant_pos_real(1.0); uint n13 = g.add_distribution( graph::DistributionType::GAMMA, graph::AtomicType::POS_REAL, std::vector<uint>({n12, n6})); uint n14 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n13})); uint n15 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n13})); uint n16 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n13})); uint n17 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n13})); uint n18 = g.add_operator( graph::OperatorType::CHOICE, std::vector<uint>({n4, n8, n9, n10, n11})); uint n19 = g.add_operator( graph::OperatorType::CHOICE, std::vector<uint>({n4, n14, n15, n16, n17})); uint n20 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n18, n19})); uint n21 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n20})); g.observe(n21, 0.0); uint n22 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n3})); uint n23 = g.add_operator( graph::OperatorType::CHOICE, std::vector<uint>({n22, n8, n9, n10, n11})); uint n24 = g.add_operator( graph::OperatorType::CHOICE, std::vector<uint>({n22, n14, n15, n16, n17})); uint n25 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n23, n24})); uint n26 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n25})); g.observe(n26, 1.0); uint n27 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n3})); uint n28 = g.add_operator( graph::OperatorType::CHOICE, std::vector<uint>({n27, n8, n9, n10, n11})); uint n29 = g.add_operator( graph::OperatorType::CHOICE, std::vector<uint>({n27, n14, n15, n16, n17})); uint n30 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n28, n29})); uint n31 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n30})); g.observe(n31, 0.0); uint n32 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n3})); uint n33 = g.add_operator( graph::OperatorType::CHOICE, std::vector<uint>({n32, n8, n9, n10, n11})); uint n34 = g.add_operator( graph::OperatorType::CHOICE, std::vector<uint>({n32, n14, n15, n16, n17})); uint n35 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n33, n34})); uint n36 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n35})); g.observe(n36, 1.0); uint n37 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n3})); uint n38 = g.add_operator( graph::OperatorType::CHOICE, std::vector<uint>({n37, n8, n9, n10, n11})); uint n39 = g.add_operator( graph::OperatorType::CHOICE, std::vector<uint>({n37, n14, n15, n16, n17})); uint n40 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n38, n39})); uint n41 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n40})); g.observe(n41, 0.0); uint n42 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n3})); uint n43 = g.add_operator( graph::OperatorType::CHOICE, std::vector<uint>({n42, n8, n9, n10, n11})); uint n44 = g.add_operator( graph::OperatorType::CHOICE, std::vector<uint>({n42, n14, n15, n16, n17})); uint n45 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n43, n44})); uint n46 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n45})); g.observe(n46, 1.0); uint q0 = g.query(n2); uint q1 = g.query(n4); uint q2 = g.query(n22); uint q3 = g.query(n27); uint q4 = g.query(n32); uint q5 = g.query(n37); uint q6 = g.query(n42); uint q7 = g.query(n8); uint q8 = g.query(n9); uint q9 = g.query(n10); uint q10 = g.query(n11); uint q11 = g.query(n14); uint q12 = g.query(n15); uint q13 = g.query(n16); uint q14 = g.query(n17); """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/tutorial_GMM_with_1_dimensions_and_4_components_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.gen_bmg_cpp import to_bmg_cpp from beanmachine.ppl.compiler.gen_bmg_graph import to_bmg_graph from beanmachine.ppl.compiler.gen_bmg_python import to_bmg_python from beanmachine.ppl.compiler.gen_dot import to_dot from beanmachine.ppl.model.rv_identifier import RVIdentifier def _rv_id() -> RVIdentifier: return RVIdentifier(lambda a, b: a, (1, 1)) class ColumnIndexTest(unittest.TestCase): def test_column_index_1(self) -> None: self.maxDiff = None bmg = BMGraphBuilder() t = bmg.add_natural(2) o = bmg.add_natural(1) z = bmg.add_natural(0) n = bmg.add_normal(z, o) ns = bmg.add_sample(n) e = bmg.add_exp(ns) h = bmg.add_probability(0.5) b = bmg.add_bernoulli(h) bs = bmg.add_sample(b) m = bmg.add_to_matrix(t, t, e, ns, ns, ns) ci = bmg.add_column_index(m, bs) lsev = bmg.add_logsumexp_vector(ci) bmg.add_query(lsev, _rv_id()) observed = to_dot( bmg, node_types=True, edge_requirements=True, after_transform=True, label_edges=True, ) expected = """ digraph "graph" { N00[label="0.0:R"]; N01[label="1.0:R+"]; N02[label="Normal:R"]; N03[label="Sample:R"]; N04[label="0.5:P"]; N05[label="Bernoulli:B"]; N06[label="Sample:B"]; N07[label="2:N"]; N08[label="Exp:R+"]; N09[label="ToReal:R"]; N10[label="ToMatrix:MR[2,2]"]; N11[label="1:N"]; N12[label="0:N"]; N13[label="if:N"]; N14[label="ColumnIndex:MR[2,1]"]; N15[label="LogSumExp:R"]; N16[label="Query:R"]; N00 -> N02[label="mu:R"]; N01 -> N02[label="sigma:R+"]; N02 -> N03[label="operand:R"]; N03 -> N08[label="operand:R"]; N03 -> N10[label="1:R"]; N03 -> N10[label="2:R"]; N03 -> N10[label="3:R"]; N04 -> N05[label="probability:P"]; N05 -> N06[label="operand:B"]; N06 -> N13[label="condition:B"]; N07 -> N10[label="columns:N"]; N07 -> N10[label="rows:N"]; N08 -> N09[label="operand:<=R"]; N09 -> N10[label="0:R"]; N10 -> N14[label="left:MR[2,2]"]; N11 -> N13[label="consequence:N"]; N12 -> N13[label="alternative:N"]; N13 -> N14[label="right:N"]; N14 -> N15[label="operand:MR[2,1]"]; N15 -> N16[label="operator:any"]; } """ self.assertEqual(expected.strip(), observed.strip()) observed = to_bmg_cpp(bmg).code expected = """ graph::Graph g; uint n0 = g.add_constant_real(0.0); uint n1 = g.add_constant_pos_real(1.0); uint n2 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n0, n1})); uint n3 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n2})); uint n4 = g.add_constant_probability(0.5); uint n5 = g.add_distribution( graph::DistributionType::BERNOULLI, graph::AtomicType::BOOLEAN, std::vector<uint>({n4})); uint n6 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n5})); uint n7 = g.add_constant_natural(2); uint n8 = g.add_operator( graph::OperatorType::EXP, std::vector<uint>({n3})); uint n9 = g.add_operator( graph::OperatorType::TO_REAL, std::vector<uint>({n8})); uint n10 = g.add_operator( graph::OperatorType::TO_MATRIX, std::vector<uint>({n7, n7, n9, n3, n3, n3})); uint n11 = g.add_constant_natural(1); uint n12 = g.add_constant_natural(0); uint n13 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n6, n11, n12})); uint n14 = g.add_operator( graph::OperatorType::COLUMN_INDEX, std::vector<uint>({n10, n13})); uint n15 = g.add_operator( graph::OperatorType::LOGSUMEXP_VECTOR, std::vector<uint>({n14})); uint q0 = g.query(n15); """ self.assertEqual(expected.strip(), observed.strip()) observed = to_bmg_python(bmg).code expected = """ from beanmachine import graph from torch import tensor g = graph.Graph() n0 = g.add_constant_real(0.0) n1 = g.add_constant_pos_real(1.0) n2 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n0, n1], ) n3 = g.add_operator(graph.OperatorType.SAMPLE, [n2]) n4 = g.add_constant_probability(0.5) n5 = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [n4], ) n6 = g.add_operator(graph.OperatorType.SAMPLE, [n5]) n7 = g.add_constant_natural(2) n8 = g.add_operator(graph.OperatorType.EXP, [n3]) n9 = g.add_operator(graph.OperatorType.TO_REAL, [n8]) n10 = g.add_operator( graph.OperatorType.TO_MATRIX, [n7, n7, n9, n3, n3, n3], ) n11 = g.add_constant_natural(1) n12 = g.add_constant_natural(0) n13 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n6, n11, n12], ) n14 = g.add_operator(graph.OperatorType.COLUMN_INDEX, [n10, n13]) n15 = g.add_operator(graph.OperatorType.LOGSUMEXP_VECTOR, [n14]) q0 = g.query(n15) """ self.assertEqual(expected.strip(), observed.strip()) observed = to_bmg_graph(bmg).graph.to_dot() expected = """ digraph "graph" { N0[label="0"]; N1[label="1"]; N2[label="Normal"]; N3[label="~"]; N4[label="0.5"]; N5[label="Bernoulli"]; N6[label="~"]; N7[label="2"]; N8[label="exp"]; N9[label="ToReal"]; N10[label="ToMatrix"]; N11[label="1"]; N12[label="0"]; N13[label="IfThenElse"]; N14[label="ColumnIndex"]; N15[label="LogSumExp"]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N8; N3 -> N10; N3 -> N10; N3 -> N10; N4 -> N5; N5 -> N6; N6 -> N13; N7 -> N10; N7 -> N10; N8 -> N9; N9 -> N10; N10 -> N14; N11 -> N13; N12 -> N13; N13 -> N14; N14 -> N15; Q0[label="Query"]; N15 -> Q0; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/column_index_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl.compiler.bmg_types as bt import torch from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.lattice_typer import LatticeTyper from beanmachine.ppl.model.rv_identifier import RVIdentifier from torch import Size def _rv_id() -> RVIdentifier: return RVIdentifier(lambda a, b: a, (1, 1)) class LatticeTyperTest(unittest.TestCase): def test_lattice_typer_matrix_ops(self) -> None: self.maxDiff = None bmg = BMGraphBuilder() typer = LatticeTyper() # create non constant real matrix zeros = bmg.add_real_matrix(torch.zeros(2, 2)) ones = bmg.add_pos_real_matrix(torch.ones(2, 2)) tensor_elements = [] for row in range(0, 2): row_node = bmg.add_natural(row) row_mu = bmg.add_column_index(zeros, row_node) row_sigma = bmg.add_column_index(ones, row_node) for column in range(0, 2): index_node = bmg.add_natural(column) index_mu = bmg.add_vector_index(row_mu, index_node) index_sigma = bmg.add_vector_index(row_sigma, index_node) normal = bmg.add_normal(index_mu, index_sigma) sample = bmg.add_sample(normal) tensor_elements.append(sample) real_matrix = bmg.add_tensor(Size([2, 2]), *tensor_elements) # create non constant bool matrix probs = bmg.add_real_matrix(torch.tensor([[0.75, 0.25], [0.125, 0.875]])) tensor_elements = [] for row in range(0, 2): row_node = bmg.add_natural(row) row_prob = bmg.add_column_index(probs, row_node) for column in range(0, 2): col_index = bmg.add_natural(column) prob = bmg.add_vector_index(row_prob, col_index) bernoulli = bmg.add_bernoulli(prob) sample = bmg.add_sample(bernoulli) tensor_elements.append(sample) bool_matrix = bmg.add_tensor(Size([2, 2]), *tensor_elements) neg_real = bmg.add_neg_real_matrix(torch.tensor([[-1.2, -1.3], [-4.7, -1.2]])) pos_real = bmg.add_matrix_exp(real_matrix) add_pos_to_reg = bmg.add_matrix_addition(pos_real, neg_real) mult_pos_to_neg = bmg.add_elementwise_multiplication(pos_real, neg_real) sum_bool = bmg.add_matrix_sum(bool_matrix) bmg.add_query(sum_bool, _rv_id()) tpe_neg_real = typer[neg_real] tpe_real = typer[real_matrix] tpe_pos_real = typer[pos_real] tpe_add = typer[add_pos_to_reg] tpe_mult = typer[mult_pos_to_neg] tpe_sum = typer[sum_bool] self.assertTrue(isinstance(tpe_real, bt.RealMatrix)) self.assertTrue(isinstance(tpe_neg_real, bt.NegativeRealMatrix)) self.assertTrue(isinstance(tpe_pos_real, bt.PositiveRealMatrix)) self.assertTrue(isinstance(tpe_add, bt.RealMatrix)) self.assertTrue(isinstance(tpe_mult, bt.RealMatrix)) self.assertTrue(isinstance(tpe_sum, bt.BooleanMatrix)) def test_lattice_typer_1(self) -> None: self.maxDiff = None bmg = BMGraphBuilder() typer = LatticeTyper() # Lattice type of an untyped constant is based on its value. c0 = bmg.add_constant(0.0) self.assertEqual(bt.Zero, typer[c0]) c1 = bmg.add_constant(1.0) self.assertEqual(bt.One, typer[c1]) c2 = bmg.add_constant(2.0) self.assertEqual(bt.Natural, typer[c2]) c3 = bmg.add_constant(1.5) self.assertEqual(bt.PositiveReal, typer[c3]) c4 = bmg.add_constant(-1.5) self.assertEqual(bt.NegativeReal, typer[c4]) c5 = bmg.add_constant(0.5) self.assertEqual(bt.Probability, typer[c5]) # BMG type of tensor is given assuming that when we emit it into # the BMG graph, it will be transposed into column-major form. # In BMG, it will be [[1.5], [-1.5]] and therefore this tensor is # typed as having two rows, one column, not one row, two columns # as it does in torch. c6 = bmg.add_constant(torch.tensor([1.5, -1.5])) self.assertEqual(bt.Real.with_dimensions(2, 1), typer[c6]) # Lattice type of a typed constant is based on its type, # not its value. This real node is a real, even though its # value fits into a natural. c7 = bmg.add_real(2.0) self.assertEqual(bt.Real, typer[c7]) # Lattice type of distributions is fixed: d0 = bmg.add_beta(c2, c2) prob = bmg.add_sample(d0) self.assertEqual(bt.Probability, typer[prob]) d1 = bmg.add_bernoulli(prob) bo = bmg.add_sample(d1) self.assertEqual(bt.Boolean, typer[bo]) d2 = bmg.add_binomial(c2, prob) nat = bmg.add_sample(d2) self.assertEqual(bt.Natural, typer[nat]) d3 = bmg.add_halfcauchy(c3) posr = bmg.add_sample(d3) self.assertEqual(bt.PositiveReal, typer[posr]) negr = bmg.add_negate(posr) self.assertEqual(bt.NegativeReal, typer[negr]) d4 = bmg.add_normal(c0, c1) re = bmg.add_sample(d4) self.assertEqual(bt.Real, typer[re]) d5 = bmg.add_poisson(c1) re = bmg.add_sample(d5) self.assertEqual(bt.Natural, typer[nat]) # Lattice type of unsupported distributions and all descendents # is "untypable". d5 = bmg.add_chi2(c2) unt1 = bmg.add_sample(d5) unt2 = bmg.add_addition(unt1, unt1) self.assertEqual(bt.Untypable, typer[unt1]) self.assertEqual(bt.Untypable, typer[unt2]) # Spot check some operators. add1 = bmg.add_addition(prob, nat) self.assertEqual(bt.PositiveReal, typer[add1]) pow1 = bmg.add_power(prob, posr) self.assertEqual(bt.Probability, typer[pow1]) # TODO: Add more operators
beanmachine-main
tests/ppl/compiler/lattice_typer_test.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Suppose we have a mixture of k normal distributions each with standard # deviation equal to 1, but different means. Our prior on means is that # mean(0), ... mean(k) are normally distributed. # # To make samples mixed(0), ... from this distribution we first choose which # mean we want with category(0), ..., use that to sample mean(category(0)) # to get the mean, and then use that mean to sample from a normal distribution. # import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference import BMGInference from torch import tensor from torch.distributions import Categorical, Normal @bm.random_variable def mean(k): # Means of the components are normally distributed return Normal(0, 1) @bm.random_variable def category(item): # Choose a category, 0, 1 or 2 with ratio 1:3:4. return Categorical(tensor([1.0, 3.0, 4.0])) @bm.random_variable def mixed(item): return Normal(mean(category(item)), 2) class GaussianMixtureModelTest(unittest.TestCase): def test_gmm_to_dot(self) -> None: self.maxDiff = None queries = [mixed(0)] observations = {} # Here we use a categorical distribution to choose from three possible # samples. # # TODO: The inference step on categorical distributions in BMG is not # yet implemented because the gradients are not yet computed correctly # and because BMG NMC does not yet implement a discrete sampler. Once # that work is complete, update this test to actually do inference. observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N00[label="[0.125,0.375,0.5]"]; N01[label=Categorical]; N02[label=Sample]; N03[label=0.0]; N04[label=1.0]; N05[label=Normal]; N06[label=Sample]; N07[label=Sample]; N08[label=Sample]; N09[label=Choice]; N10[label=2.0]; N11[label=Normal]; N12[label=Sample]; N13[label=Query]; N00 -> N01; N01 -> N02; N02 -> N09; N03 -> N05; N04 -> N05; N05 -> N06; N05 -> N07; N05 -> N08; N06 -> N09; N07 -> N09; N08 -> N09; N09 -> N11; N10 -> N11; N11 -> N12; N12 -> N13; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/gaussian_mixture_model_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference.bmg_inference import BMGInference from torch.distributions import Bernoulli, Beta # If we use a positive real valued *operator* in a context where # a probability is required, we allow it. But we don't allow constants. # # For example, if we have a probability divided by two, that's still a # probability. But adding it to another probability results in a positive # real even though we know it is still between 0.0 and 1.0. # # What we do in this situation is automatically insert a "to probability" # operator that coerces the positive real to a probability. @bm.random_variable def beta(n): return Beta(2.0, 2.0) @bm.random_variable def flip(): return Bernoulli(beta(0) * 0.5 + 0.5) # However, we should still reject constants that are out of bounds. @bm.random_variable def bad_flip(): return Bernoulli(2.5) # Similarly for log-probabilities which are negative reals. def log1mexp(x): return (1 - x.exp()).log() @bm.functional def to_neg_real(): pr1 = beta(1) * 0.5 + 0.5 # positive real pr2 = beta(2) * 0.5 + 0.5 # positive real lg1 = pr1.log() # real lg2 = pr2.log() # real # Because we think pr1 and pr2 are positive reals instead of probabilities, # we also think that lg1 and lg2 are reals instead of negative reals. inv = log1mexp(lg1 + lg2) # needs a negative real # We should insert a TO_NEG_REAL node on the sum above. return inv class ToProbabilityTest(unittest.TestCase): def test_to_probability_1(self) -> None: self.maxDiff = None bmg = BMGInference() observed = bmg.to_dot([flip()], {}) expected = """ digraph "graph" { N00[label=2.0]; N01[label=Beta]; N02[label=Sample]; N03[label=0.5]; N04[label="*"]; N05[label=ToPosReal]; N06[label=0.5]; N07[label="+"]; N08[label=ToProb]; N09[label=Bernoulli]; N10[label=Sample]; N11[label=Query]; N00 -> N01; N00 -> N01; N01 -> N02; N02 -> N04; N03 -> N04; N04 -> N05; N05 -> N07; N06 -> N07; N07 -> N08; N08 -> N09; N09 -> N10; N10 -> N11; } """ self.assertEqual(observed.strip(), expected.strip()) def test_to_probability_2(self) -> None: self.maxDiff = None bmg = BMGInference() # TODO: Raise a better error than a generic ValueError with self.assertRaises(ValueError) as ex: bmg.infer([bad_flip()], {}, 10) expected = """ The probability of a Bernoulli is required to be a probability but is a positive real. The Bernoulli was created in function call bad_flip().""" self.assertEqual(expected.strip(), str(ex.exception).strip()) def test_to_neg_real_1(self) -> None: self.maxDiff = None observed = BMGInference().to_dot([to_neg_real()], {}) expected = """ digraph "graph" { N00[label=2.0]; N01[label=Beta]; N02[label=Sample]; N03[label=Sample]; N04[label=0.5]; N05[label="*"]; N06[label=ToPosReal]; N07[label=0.5]; N08[label="+"]; N09[label=Log]; N10[label="*"]; N11[label=ToPosReal]; N12[label="+"]; N13[label=Log]; N14[label="+"]; N15[label=ToNegReal]; N16[label=Log1mexp]; N17[label=Query]; N00 -> N01; N00 -> N01; N01 -> N02; N01 -> N03; N02 -> N05; N03 -> N10; N04 -> N05; N04 -> N10; N05 -> N06; N06 -> N08; N07 -> N08; N07 -> N12; N08 -> N09; N09 -> N14; N10 -> N11; N11 -> N12; N12 -> N13; N13 -> N14; N14 -> N15; N15 -> N16; N16 -> N17; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/to_probability_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.compiler.copy_and_replace import copy_and_replace from beanmachine.ppl.compiler.devectorizer_transformer import Devectorizer from beanmachine.ppl.compiler.gen_dot import to_dot from beanmachine.ppl.compiler.runtime import BMGRuntime from torch import mm, tensor from torch.distributions import Bernoulli, Gamma, HalfCauchy, Normal, StudentT @bm.random_variable def norm_tensor(n): return Normal(tensor([0.0, 0.5]), tensor([0.6, 1.0])) class DevectorizeTransformerTest(unittest.TestCase): def test_needs_transform_because_parent_cannot_be_merged(self) -> None: self.maxDiff = None bmg = BMGRuntime().accumulate_graph([norm_tensor(0)], {}) transformed_graph, error_report = copy_and_replace( bmg, lambda c, s: Devectorizer(c, s) ) observed = to_dot(transformed_graph) expected = """ digraph "graph" { N00[label="[0.0,0.5]"]; N01[label=0]; N02[label=index]; N03[label="[0.6000000238418579,1.0]"]; N04[label=index]; N05[label=Normal]; N06[label=Sample]; N07[label=1]; N08[label=index]; N09[label=index]; N10[label=Normal]; N11[label=Sample]; N12[label=Tensor]; N13[label=Query]; N00 -> N02[label=left]; N00 -> N08[label=left]; N01 -> N02[label=right]; N01 -> N04[label=right]; N02 -> N05[label=mu]; N03 -> N04[label=left]; N03 -> N09[label=left]; N04 -> N05[label=sigma]; N05 -> N06[label=operand]; N06 -> N12[label=left]; N07 -> N08[label=right]; N07 -> N09[label=right]; N08 -> N10[label=mu]; N09 -> N10[label=sigma]; N10 -> N11[label=operand]; N11 -> N12[label=right]; N12 -> N13[label=operator]; } """ self.assertEqual(expected.strip(), observed.strip()) def test_transform_multiple_operands(self) -> None: _y_obs = tensor([33.3, 68.3]) @bm.random_variable def sigma_out(): return Gamma(1, 1) @bm.functional def multiple_operands(): mu = norm_tensor(0) ns = Normal(mu, sigma_out()) return ns.log_prob(_y_obs) self.maxDiff = None bmg = BMGRuntime().accumulate_graph([multiple_operands()], {}) transformed_graph, error_report = copy_and_replace( bmg, lambda c, s: Devectorizer(c, s) ) observed = to_dot(transformed_graph) expected = """ digraph "graph" { N00[label="[0.0,0.5]"]; N01[label=0]; N02[label=index]; N03[label="[0.6000000238418579,1.0]"]; N04[label=index]; N05[label=Normal]; N06[label=Sample]; N07[label=1.0]; N08[label=Gamma]; N09[label=Sample]; N10[label=Normal]; N11[label="[33.29999923706055,68.30000305175781]"]; N12[label=index]; N13[label=LogProb]; N14[label=1]; N15[label=index]; N16[label=index]; N17[label=Normal]; N18[label=Sample]; N19[label=Normal]; N20[label=index]; N21[label=LogProb]; N22[label=Tensor]; N23[label=Query]; N00 -> N02[label=left]; N00 -> N15[label=left]; N01 -> N02[label=right]; N01 -> N04[label=right]; N01 -> N12[label=right]; N02 -> N05[label=mu]; N03 -> N04[label=left]; N03 -> N16[label=left]; N04 -> N05[label=sigma]; N05 -> N06[label=operand]; N06 -> N10[label=mu]; N07 -> N08[label=concentration]; N07 -> N08[label=rate]; N08 -> N09[label=operand]; N09 -> N10[label=sigma]; N09 -> N19[label=sigma]; N10 -> N13[label=distribution]; N11 -> N12[label=left]; N11 -> N20[label=left]; N12 -> N13[label=value]; N13 -> N22[label=left]; N14 -> N15[label=right]; N14 -> N16[label=right]; N14 -> N20[label=right]; N15 -> N17[label=mu]; N16 -> N17[label=sigma]; N17 -> N18[label=operand]; N18 -> N19[label=mu]; N19 -> N21[label=distribution]; N20 -> N21[label=value]; N21 -> N22[label=right]; N22 -> N23[label=operator]; } """ self.assertEqual(expected.strip(), observed.strip()) def test_needs_merge(self) -> None: @bm.functional def foo(): return mm(tensor([2.0, 7.5]), norm_tensor(0)) self.maxDiff = None bmg = BMGRuntime().accumulate_graph([foo()], {}) transformed_graph, error_report = copy_and_replace( bmg, lambda c, s: Devectorizer(c, s) ) observed = to_dot(transformed_graph) expected = """ digraph "graph" { N00[label="[2.0,7.5]"]; N01[label="[0.0,0.5]"]; N02[label=0]; N03[label=index]; N04[label="[0.6000000238418579,1.0]"]; N05[label=index]; N06[label=Normal]; N07[label=Sample]; N08[label=1]; N09[label=index]; N10[label=index]; N11[label=Normal]; N12[label=Sample]; N13[label=Tensor]; N14[label="@"]; N15[label=Query]; N00 -> N14[label=left]; N01 -> N03[label=left]; N01 -> N09[label=left]; N02 -> N03[label=right]; N02 -> N05[label=right]; N03 -> N06[label=mu]; N04 -> N05[label=left]; N04 -> N10[label=left]; N05 -> N06[label=sigma]; N06 -> N07[label=operand]; N07 -> N13[label=left]; N08 -> N09[label=right]; N08 -> N10[label=right]; N09 -> N11[label=mu]; N10 -> N11[label=sigma]; N11 -> N12[label=operand]; N12 -> N13[label=right]; N13 -> N14[label=right]; N14 -> N15[label=operator]; } """ self.assertEqual(expected.strip(), observed.strip()) def test_broadcast(self) -> None: @bm.random_variable def flip_const_2_3(): return Bernoulli(tensor([[0.25, 0.75, 0.5], [0.125, 0.875, 0.625]])) @bm.random_variable def normal_2_3(): mus = flip_const_2_3() # 2 x 3 tensor of 0 or 1 sigmas = tensor([2.0, 3.0, 4.0]) return Normal(mus, sigmas) @bm.random_variable def hc_3(): return HalfCauchy(tensor([1.0, 2.0, 3.0])) @bm.random_variable def studentt_2_3(): return StudentT(hc_3(), normal_2_3(), hc_3()) self.maxDiff = None bmg = BMGRuntime().accumulate_graph([studentt_2_3()], {}) transformed_graph, error_report = copy_and_replace( bmg, lambda c, s: Devectorizer(c, s) ) observed = to_dot(transformed_graph) expected = """ digraph "graph" { N00[label="[1.0,2.0,3.0]"]; N01[label=0]; N02[label=index]; N03[label=HalfCauchy]; N04[label=Sample]; N05[label="[[0.25,0.75,0.5],\\\\n[0.125,0.875,0.625]]"]; N06[label=index]; N07[label=index]; N08[label=Bernoulli]; N09[label=Sample]; N10[label="[2.0,3.0,4.0]"]; N11[label=index]; N12[label=Normal]; N13[label=Sample]; N14[label=StudentT]; N15[label=Sample]; N16[label=1]; N17[label=index]; N18[label=HalfCauchy]; N19[label=Sample]; N20[label=index]; N21[label=Bernoulli]; N22[label=Sample]; N23[label=index]; N24[label=Normal]; N25[label=Sample]; N26[label=StudentT]; N27[label=Sample]; N28[label=2]; N29[label=index]; N30[label=HalfCauchy]; N31[label=Sample]; N32[label=index]; N33[label=Bernoulli]; N34[label=Sample]; N35[label=index]; N36[label=Normal]; N37[label=Sample]; N38[label=StudentT]; N39[label=Sample]; N40[label=index]; N41[label=index]; N42[label=Bernoulli]; N43[label=Sample]; N44[label=Normal]; N45[label=Sample]; N46[label=StudentT]; N47[label=Sample]; N48[label=index]; N49[label=Bernoulli]; N50[label=Sample]; N51[label=Normal]; N52[label=Sample]; N53[label=StudentT]; N54[label=Sample]; N55[label=index]; N56[label=Bernoulli]; N57[label=Sample]; N58[label=Normal]; N59[label=Sample]; N60[label=StudentT]; N61[label=Sample]; N62[label=Tensor]; N63[label=Query]; N00 -> N02[label=left]; N00 -> N17[label=left]; N00 -> N29[label=left]; N01 -> N02[label=right]; N01 -> N06[label=right]; N01 -> N07[label=right]; N01 -> N11[label=right]; N01 -> N41[label=right]; N02 -> N03[label=scale]; N03 -> N04[label=operand]; N04 -> N14[label=df]; N04 -> N14[label=scale]; N04 -> N46[label=df]; N04 -> N46[label=scale]; N05 -> N06[label=left]; N05 -> N40[label=left]; N06 -> N07[label=left]; N06 -> N20[label=left]; N06 -> N32[label=left]; N07 -> N08[label=probability]; N08 -> N09[label=operand]; N09 -> N12[label=mu]; N10 -> N11[label=left]; N10 -> N23[label=left]; N10 -> N35[label=left]; N11 -> N12[label=sigma]; N11 -> N44[label=sigma]; N12 -> N13[label=operand]; N13 -> N14[label=loc]; N14 -> N15[label=operand]; N15 -> N62[label=0]; N16 -> N17[label=right]; N16 -> N20[label=right]; N16 -> N23[label=right]; N16 -> N40[label=right]; N16 -> N48[label=right]; N17 -> N18[label=scale]; N18 -> N19[label=operand]; N19 -> N26[label=df]; N19 -> N26[label=scale]; N19 -> N53[label=df]; N19 -> N53[label=scale]; N20 -> N21[label=probability]; N21 -> N22[label=operand]; N22 -> N24[label=mu]; N23 -> N24[label=sigma]; N23 -> N51[label=sigma]; N24 -> N25[label=operand]; N25 -> N26[label=loc]; N26 -> N27[label=operand]; N27 -> N62[label=1]; N28 -> N29[label=right]; N28 -> N32[label=right]; N28 -> N35[label=right]; N28 -> N55[label=right]; N29 -> N30[label=scale]; N30 -> N31[label=operand]; N31 -> N38[label=df]; N31 -> N38[label=scale]; N31 -> N60[label=df]; N31 -> N60[label=scale]; N32 -> N33[label=probability]; N33 -> N34[label=operand]; N34 -> N36[label=mu]; N35 -> N36[label=sigma]; N35 -> N58[label=sigma]; N36 -> N37[label=operand]; N37 -> N38[label=loc]; N38 -> N39[label=operand]; N39 -> N62[label=2]; N40 -> N41[label=left]; N40 -> N48[label=left]; N40 -> N55[label=left]; N41 -> N42[label=probability]; N42 -> N43[label=operand]; N43 -> N44[label=mu]; N44 -> N45[label=operand]; N45 -> N46[label=loc]; N46 -> N47[label=operand]; N47 -> N62[label=3]; N48 -> N49[label=probability]; N49 -> N50[label=operand]; N50 -> N51[label=mu]; N51 -> N52[label=operand]; N52 -> N53[label=loc]; N53 -> N54[label=operand]; N54 -> N62[label=4]; N55 -> N56[label=probability]; N56 -> N57[label=operand]; N57 -> N58[label=mu]; N58 -> N59[label=operand]; N59 -> N60[label=loc]; N60 -> N61[label=operand]; N61 -> N62[label=5]; N62 -> N63[label=operator]; } """ self.assertEqual(expected.strip(), observed.strip()) def test_failure(self) -> None: # in order to devectorize correctly, all sizes must be known. # note that "ns.log_prob" has an unsizable node since we are asking # what the log prob of a tensor of size 3 is with respect to a distribution # whose samples are of size 2. _y_obs = tensor([33.3, 68.3, 6.7]) @bm.random_variable def sigma_out(): return Gamma(1, 1) @bm.functional def unsizable(): mu = norm_tensor(0) ns = Normal(mu, sigma_out()) return ns.log_prob(_y_obs) self.maxDiff = None bmg = BMGRuntime().accumulate_graph([unsizable()], {}) transformed_graph, error_report = copy_and_replace( bmg, lambda c, s: Devectorizer(c, s) ) if len(error_report.errors) == 1: error = error_report.errors[0].__str__() expected = """ The node log_prob cannot be sized.The operand sizes may be incompatible or the size may not be computable at compile time. The operand sizes are: [torch.Size([2]), torch.Size([3])] The unsizable node was created in function call unsizable(). """ self.assertEqual(expected.strip(), error.strip()) else: self.fail( "A single error message should have been generated since the sizer cannot size every node" )
beanmachine-main
tests/ppl/compiler/devectorizer_transformer_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm import torch from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor from torch.distributions import Bernoulli, Cauchy, Normal @bm.random_variable def flip(): return Bernoulli(0.5) @bm.random_variable def norm(n): return Normal(0, 1) @bm.functional def do_it(): return norm(flip()) @bm.functional def bad_functional(): return 123 @bm.random_variable def no_distribution_rv(): return 123 @bm.random_variable def unsupported_distribution_rv(): return Cauchy(1.0, 2.0) @bm.functional def missing_tensor_instance_function(): # What happens if we call a function on a tensor instance # that does not exist at all? return norm(1).not_a_real_function() @bm.functional def unsupported_tensor_instance_function_1(): # Tensor instance function exists but we do not handle it. return norm(1).arccos() @bm.functional def unsupported_tensor_instance_function_2(): # Same as above but called via Tensor: return torch.Tensor.arccos(norm(1)) @bm.functional def unsupported_tensor_instance_function_3(): # Regular receiver, stochastic argument: return torch.tensor(7.0).dot(norm(1)) @bm.functional def unsupported_torch_function(): # Same as above but called via torch: return torch.arccos(norm(1)) @bm.functional def unsupported_torch_submodule_function(): # What if we call an unsupported function in submodule of torch? return torch.special.erf(norm(1)) @bm.functional def missing_distribution_function(): # What happens if we try to get a nonsensical attr from a # stochastic distribution? return Normal(norm(1), 1.0).no_such_function() @bm.functional def unsupported_distribution_function(): return Normal(norm(1), 1.0).entropy() class BMGBadModelsTest(unittest.TestCase): def test_bmg_inference_error_reporting(self): with self.assertRaises(TypeError) as ex: BMGInference().infer(123, {}, 10) self.assertEqual( str(ex.exception), "Parameter 'queries' is required to be a list but is of type int.", ) with self.assertRaises(TypeError) as ex: BMGInference().infer([], 123, 10) self.assertEqual( str(ex.exception), "Parameter 'observations' is required to be a dictionary but is of type int.", ) # Should be flip(): with self.assertRaises(TypeError) as ex: BMGInference().infer([flip], {}, 10) self.assertEqual( str(ex.exception), "A query is required to be a random variable but is of type function.", ) # Should be flip(): with self.assertRaises(TypeError) as ex: BMGInference().infer([flip()], {flip: tensor(True)}, 10) self.assertEqual( str(ex.exception), "An observation is required to be a random variable but is of type function.", ) # Should be a tensor with self.assertRaises(TypeError) as ex: BMGInference().infer([flip()], {flip(): 123.0}, 10) self.assertEqual( str(ex.exception), "An observed value is required to be a tensor but is of type float.", ) # You can't make inferences on rv-of-rv with self.assertRaises(TypeError) as ex: BMGInference().infer([norm(flip())], {}, 10) self.assertEqual( str(ex.exception), "The arguments to a query must not be random variables.", ) # You can't make inferences on rv-of-rv with self.assertRaises(TypeError) as ex: BMGInference().infer([flip()], {norm(flip()): tensor(123)}, 10) self.assertEqual( str(ex.exception), "The arguments to an observation must not be random variables.", ) # Observations must be of random variables, not # functionals with self.assertRaises(TypeError) as ex: BMGInference().infer([flip()], {do_it(): tensor(123)}, 10) self.assertEqual( str(ex.exception), "An observation must observe a random_variable, not a functional.", ) # A functional must always return a value that can be represented # in the graph. with self.assertRaises(TypeError) as ex: BMGInference().infer([bad_functional()], {}, 10) self.assertEqual( str(ex.exception), "A functional must return a tensor.", ) # TODO: Verify we handle correctly the case where a queried value is # a constant, because that is not directly supported by BMG but # it would be nice to have. # An rv must return a distribution. with self.assertRaises(TypeError) as ex: BMGInference().infer([no_distribution_rv()], {}, 10) self.assertEqual( str(ex.exception), "A random_variable is required to return a distribution.", ) # An rv must return a supported distribution. with self.assertRaises(TypeError) as ex: BMGInference().infer([unsupported_distribution_rv()], {}, 10) self.assertEqual( str(ex.exception), "Distribution 'Cauchy' is not supported by Bean Machine Graph.", ) def test_bad_tensor_operations(self) -> None: with self.assertRaises(ValueError) as ex: BMGInference().infer([unsupported_tensor_instance_function_1()], {}, 1) expected = """ Function arccos is not supported by Bean Machine Graph. """ self.assertEqual(expected.strip(), str(ex.exception).strip()) with self.assertRaises(ValueError) as ex: BMGInference().infer([unsupported_tensor_instance_function_2()], {}, 1) self.assertEqual(expected.strip(), str(ex.exception).strip()) with self.assertRaises(ValueError) as ex: BMGInference().infer([unsupported_torch_function()], {}, 1) self.assertEqual(expected.strip(), str(ex.exception).strip()) expected = """ Function dot is not supported by Bean Machine Graph. """ with self.assertRaises(ValueError) as ex: BMGInference().infer([unsupported_tensor_instance_function_3()], {}, 1) self.assertEqual(expected.strip(), str(ex.exception).strip()) # I have no idea why torch gives the name of torch.special.erf as # "special_erf" rather than "erf", but it does. expected = """ Function special_erf is not supported by Bean Machine Graph. """ with self.assertRaises(ValueError) as ex: BMGInference().infer([unsupported_torch_submodule_function()], {}, 1) self.assertEqual(expected.strip(), str(ex.exception).strip()) with self.assertRaises(ValueError) as ex: BMGInference().infer([missing_tensor_instance_function()], {}, 1) expected = """ Function not_a_real_function is not supported by Bean Machine Graph. """ self.assertEqual(expected.strip(), str(ex.exception).strip()) with self.assertRaises(ValueError) as ex: BMGInference().infer([missing_distribution_function()], {}, 1) expected = """ Function no_such_function is not supported by Bean Machine Graph. """ self.assertEqual(expected.strip(), str(ex.exception).strip()) with self.assertRaises(ValueError) as ex: BMGInference().infer([unsupported_distribution_function()], {}, 1) expected = """ Function entropy is not supported by Bean Machine Graph. """ self.assertEqual(expected.strip(), str(ex.exception).strip())
beanmachine-main
tests/ppl/compiler/bmg_bad_models_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.error_report import BadMatrixMultiplication from beanmachine.ppl.compiler.size_assessment import SizeAssessment from beanmachine.ppl.compiler.sizer import Size, Sizer class SizeAssessmentTests(unittest.TestCase): def test_matrix_mult(self): bmg = BMGraphBuilder() assessor = SizeAssessment(Sizer()) probs = bmg.add_real_matrix( torch.tensor([[0.5, 0.125, 0.125], [0.0625, 0.0625, 0.875]]) ) tensor_elements = [] for row in range(0, 2): row_node = bmg.add_natural(row) row_prob = bmg.add_column_index(probs, row_node) for column in range(0, 3): col_index = bmg.add_natural(column) prob = bmg.add_vector_index(row_prob, col_index) bernoulli = bmg.add_bernoulli(prob) sample = bmg.add_sample(bernoulli) tensor_elements.append(sample) matrix2by3_rhs = bmg.add_tensor(Size([2, 3]), *tensor_elements) # invalid matrix2by3 = bmg.add_real_matrix( torch.tensor([[0.21, 0.27, 0.3], [0.5, 0.6, 0.1]]) ) matrix1by3 = bmg.add_real_matrix(torch.tensor([[0.1, 0.2, 0.3]])) matrix3 = bmg.add_real_matrix(torch.tensor([0.1, 0.2, 0.9])) scalar = bmg.add_real(4.5) mm_invalid = bmg.add_matrix_multiplication(matrix2by3_rhs, matrix2by3) error_size_mismatch = assessor.size_error(mm_invalid, bmg) self.assertIsInstance(error_size_mismatch, BadMatrixMultiplication) expectation = """ The model uses a matrix multiplication (@) operation unsupported by Bean Machine Graph. The dimensions of the operands are 2x3 and 2x3. """ self.assertEqual(expectation.strip(), error_size_mismatch.__str__().strip()) broadcast_not_supported_yet = bmg.add_matrix_multiplication( matrix2by3_rhs, matrix1by3 ) error_broadcast_not_supported_yet = assessor.size_error( broadcast_not_supported_yet, bmg ) expectation = """ The model uses a matrix multiplication (@) operation unsupported by Bean Machine Graph. The dimensions of the operands are 2x3 and 1x3. """ self.assertEqual( expectation.strip(), error_broadcast_not_supported_yet.__str__().strip() ) errors = [ assessor.size_error(bmg.add_matrix_multiplication(matrix2by3_rhs, mm), bmg) for mm in [matrix3, scalar] ] for error in errors: self.assertIsNone(error)
beanmachine-main
tests/ppl/compiler/size_assessment_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.compiler.gen_dot import to_dot from beanmachine.ppl.compiler.runtime import BMGRuntime from torch import tensor from torch.distributions import Bernoulli, Beta # We need to be able to tell what size the tensor is # when a model operates on multi-valued tensors. @bm.random_variable def coin(): return Beta(tensor([[1.0, 2.0]]), 3.0) @bm.random_variable def flip(): return Bernoulli(coin()) class SizerTest(unittest.TestCase): def test_sizer_1(self) -> None: self.maxDiff = None queries = [flip()] observations = {} bmg = BMGRuntime().accumulate_graph(queries, observations) observed = to_dot(bmg, node_sizes=True) expected = """ digraph "graph" { N0[label="[[1.0,2.0]]:[1,2]"]; N1[label="[[3.0,3.0]]:[1,2]"]; N2[label="Beta:[1,2]"]; N3[label="Sample:[1,2]"]; N4[label="Bernoulli:[1,2]"]; N5[label="Sample:[1,2]"]; N6[label="Query:[1,2]"]; N0 -> N2[label=alpha]; N1 -> N2[label=beta]; N2 -> N3[label=operand]; N3 -> N4[label=probability]; N4 -> N5[label=operand]; N5 -> N6[label=operator]; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/sizer_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm import torch import torch.distributions as dist from beanmachine.ppl.inference import BMGInference trials = torch.tensor([29854.0, 2016.0]) pos = torch.tensor([4.0, 0.0]) buck_rep = torch.tensor([0.0006, 0.01]) n_buckets = len(trials) def log1mexp(x): return (1 - x.exp()).log() @bm.random_variable def eta(): # k reals return dist.Normal(0.0, 1.0).expand((n_buckets,)) @bm.random_variable def alpha(): # atomic R+ return dist.half_normal.HalfNormal(5.0) @bm.random_variable def sigma(): # atomic R+ return dist.half_normal.HalfNormal(1.0) @bm.random_variable def length_scale(): # R+ return dist.half_normal.HalfNormal(0.1) @bm.functional def cholesky(): # k by k reals delta = 1e-3 alpha_sq = alpha() * alpha() rho_sq = length_scale() * length_scale() cov = (buck_rep - buck_rep.unsqueeze(-1)) ** 2 cov = alpha_sq * torch.exp(-cov / (2 * rho_sq)) cov += torch.eye(buck_rep.size(0)) * delta return torch.linalg.cholesky(cov) @bm.random_variable def prev(): # k reals return dist.Normal(torch.matmul(cholesky(), eta()), sigma()) @bm.random_variable def bucket_prob(): # atomic bool phi_prev = dist.Normal(0, 1).cdf(prev()) # k probs log_prob = pos * torch.log(phi_prev) log_prob += (trials - pos) * torch.log1p(-phi_prev) joint_log_prob = log_prob.sum() # Convert the joint log prob to a log-odds. logit_prob = joint_log_prob - log1mexp(joint_log_prob) return dist.Bernoulli(logits=logit_prob) class GEPTest(unittest.TestCase): def test_gep_model_compilation(self) -> None: self.maxDiff = None queries = [prev()] observations = {bucket_prob(): torch.tensor([1.0])} # Demonstrate that compiling to an actual BMG graph # generates a graph which type checks. g, _ = BMGInference().to_graph(queries, observations) observed = g.to_dot() # TODO: We're not generating matrix complement or matrix log here after the matrix phi. # It seems like we could be; what's going wrong in the tensorizer? expected = """ digraph "graph" { N0[label="5"]; N1[label="HalfNormal"]; N2[label="~"]; N3[label="0.1"]; N4[label="HalfNormal"]; N5[label="~"]; N6[label="0"]; N7[label="1"]; N8[label="Normal"]; N9[label="~"]; N10[label="Normal"]; N11[label="~"]; N12[label="HalfNormal"]; N13[label="~"]; N14[label="*"]; N15[label="2"]; N16[label="*"]; N17[label="-1"]; N18[label="^"]; N19[label="ToReal"]; N20[label="matrix"]; N21[label="MatrixScale"]; N22[label="MatrixExp"]; N23[label="MatrixScale"]; N24[label="matrix"]; N25[label="MatrixAdd"]; N26[label="Cholesky"]; N27[label="2"]; N28[label="1"]; N29[label="ToMatrix"]; N30[label="MatrixMultiply"]; N31[label="0"]; N32[label="Index"]; N33[label="Normal"]; N34[label="~"]; N35[label="Index"]; N36[label="Normal"]; N37[label="~"]; N38[label="matrix"]; N39[label="ToMatrix"]; N40[label="MatrixPhi"]; N41[label="MatrixLog"]; N42[label="ToReal"]; N43[label="ElementwiseMultiply"]; N44[label="matrix"]; N45[label="Index"]; N46[label="Complement"]; N47[label="Log"]; N48[label="Index"]; N49[label="Complement"]; N50[label="Log"]; N51[label="ToMatrix"]; N52[label="ToReal"]; N53[label="ElementwiseMultiply"]; N54[label="MatrixAdd"]; N55[label="MatrixSum"]; N56[label="ToNegReal"]; N57[label="Log1mExp"]; N58[label="Negate"]; N59[label="ToReal"]; N60[label="+"]; N61[label="BernoulliLogit"]; N62[label="~"]; N0 -> N1; N1 -> N2; N2 -> N14; N2 -> N14; N3 -> N4; N4 -> N5; N5 -> N16; N5 -> N16; N6 -> N8; N6 -> N10; N7 -> N8; N7 -> N10; N7 -> N12; N8 -> N9; N9 -> N29; N10 -> N11; N11 -> N29; N12 -> N13; N13 -> N33; N13 -> N36; N14 -> N23; N15 -> N16; N16 -> N18; N17 -> N18; N18 -> N19; N19 -> N21; N20 -> N21; N21 -> N22; N22 -> N23; N23 -> N25; N24 -> N25; N25 -> N26; N26 -> N30; N27 -> N29; N27 -> N39; N27 -> N51; N28 -> N29; N28 -> N35; N28 -> N39; N28 -> N48; N28 -> N51; N29 -> N30; N30 -> N32; N30 -> N35; N31 -> N32; N31 -> N45; N32 -> N33; N33 -> N34; N34 -> N39; N35 -> N36; N36 -> N37; N37 -> N39; N38 -> N43; N39 -> N40; N40 -> N41; N40 -> N45; N40 -> N48; N41 -> N42; N42 -> N43; N43 -> N54; N44 -> N53; N45 -> N46; N46 -> N47; N47 -> N51; N48 -> N49; N49 -> N50; N50 -> N51; N51 -> N52; N52 -> N53; N53 -> N54; N54 -> N55; N55 -> N56; N55 -> N60; N56 -> N57; N57 -> N58; N58 -> N59; N59 -> N60; N60 -> N61; N61 -> N62; O0[label="Observation"]; N62 -> O0; Q0[label="Query"]; N39 -> Q0; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/gep_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Test performance of multiary multiplication optimization """ import platform import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference import BMGInference from torch.distributions import Normal @bm.random_variable def norm(x): return Normal(0.0, 1.0) @bm.functional def prod_1(counter): prod = 1.0 for i in range(counter): prod = prod * norm(i) return prod @bm.functional def prod_2(): return prod_1(100) * prod_1(50) def get_report(skip_optimizations): observations = {} queries = [prod_2()] number_samples = 1000 _, perf_report = BMGInference()._infer( queries, observations, number_samples, skip_optimizations=skip_optimizations ) return perf_report class BinaryVsMultiaryMultiplicationPerformanceTest(unittest.TestCase): def test_perf_num_nodes_edges(self) -> None: """ Test to check if Multiary multiplication optimization reduces the number of nodes and number of edges using the performance report returned by BMGInference. """ if platform.system() == "Windows": self.skipTest("Disabling *_perf_test.py until flakiness is resolved") self.maxDiff = None skip_optimizations = { "beta_bernoulli_conjugate_fixer", "beta_binomial_conjugate_fixer", "normal_normal_conjugate_fixer", } report_w_optimization = get_report(skip_optimizations) self.assertEqual(report_w_optimization.node_count, 105) self.assertEqual(report_w_optimization.edge_count, 204) skip_optimizations = { "multiary_multiplication_fixer", "beta_bernoulli_conjugate_fixer", "beta_binomial_conjugate_fixer", "normal_normal_conjugate_fixer", } report_wo_optimization = get_report(skip_optimizations) self.assertEqual(report_wo_optimization.node_count, 203) self.assertEqual(report_wo_optimization.edge_count, 302)
beanmachine-main
tests/ppl/compiler/binary_vs_multiary_multiplication_perf_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Tests for bmg_types.py""" import unittest from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.bmg_types import ( _lookup, Boolean, BooleanMatrix, bottom, Natural, NaturalMatrix, NegativeReal, NegativeRealMatrix, One, OneHotMatrix, PositiveReal, PositiveRealMatrix, Probability, ProbabilityMatrix, Real, RealMatrix, SimplexMatrix, supremum, Tensor, type_of_value, Zero, ZeroMatrix, ) from beanmachine.ppl.compiler.gen_dot import to_dot from beanmachine.ppl.model.rv_identifier import RVIdentifier from torch import tensor def _rv_id() -> RVIdentifier: return RVIdentifier(lambda a, b: a, (1, 1)) class BMGTypesTest(unittest.TestCase): def test_lookup_table(self) -> None: """Tests _lookup_table for some basic properties""" # This is a simple example of "property based testing" # Since the search space is finite, we can do it exhaustively lookup_table = _lookup() keys = lookup_table.keys() for key in keys: a, b = key class_1 = lookup_table[(a, b)] class_2 = lookup_table[(b, a)] # symmertry self.assertEqual( class_1(1, 1), class_2(1, 1), msg="Table breaks symmertry when inputs are (" + str(a) + "," + str(b) + ")", ) return def test_supremum(self) -> None: """test_supremum""" # Degenerate case -- supremum of no types is bottom because it # is the smallest type that is larger than every type in the # empty list. self.assertEqual(bottom, supremum()) # Supremum of one type is that type self.assertEqual(Probability, supremum(Probability)) # A few cases for single-valued types. self.assertEqual(PositiveReal, supremum(Probability, Natural)) self.assertEqual(Real, supremum(Natural, Probability, Real)) self.assertEqual(Tensor, supremum(Real, Tensor, Natural, Boolean)) self.assertEqual(Real, supremum(NegativeReal, PositiveReal)) self.assertEqual(Boolean, supremum(One, Zero)) # Supremum of any two types with different matrix dimensions is Tensor self.assertEqual(Tensor, supremum(RealMatrix(1, 2), RealMatrix(2, 1))) # A few cases for matrices self.assertEqual( ProbabilityMatrix(1, 2), supremum(BooleanMatrix(1, 2), SimplexMatrix(1, 2)) ) self.assertEqual( PositiveRealMatrix(1, 2), supremum(NaturalMatrix(1, 2), SimplexMatrix(1, 2)) ) def test_type_of_value(self) -> None: """test_type_of_value""" self.assertEqual(One, type_of_value(True)) self.assertEqual(Zero, type_of_value(False)) self.assertEqual(Zero, type_of_value(0)) self.assertEqual(One, type_of_value(1)) self.assertEqual(Zero, type_of_value(0.0)) self.assertEqual(One, type_of_value(1.0)) self.assertEqual(Zero, type_of_value(tensor(False))) self.assertEqual(Zero, type_of_value(tensor(0))) self.assertEqual(One, type_of_value(tensor(1))) self.assertEqual(Zero, type_of_value(tensor(0.0))) self.assertEqual(One, type_of_value(tensor(1.0))) self.assertEqual(One, type_of_value(tensor([[True]]))) self.assertEqual(Zero, type_of_value(tensor([[False]]))) self.assertEqual(Zero, type_of_value(tensor([[0]]))) self.assertEqual(One, type_of_value(tensor([[1]]))) self.assertEqual(Zero, type_of_value(tensor([[0.0]]))) self.assertEqual(One, type_of_value(tensor([[1.0]]))) self.assertEqual(Natural, type_of_value(2)) self.assertEqual(Natural, type_of_value(2.0)) self.assertEqual(Natural, type_of_value(tensor(2))) self.assertEqual(Natural, type_of_value(tensor(2.0))) self.assertEqual(Natural, type_of_value(tensor([[2]]))) self.assertEqual(Natural, type_of_value(tensor([[2.0]]))) self.assertEqual(Probability, type_of_value(0.5)) self.assertEqual(Probability, type_of_value(tensor(0.5))) self.assertEqual(Probability, type_of_value(tensor([[0.5]]))) self.assertEqual(PositiveReal, type_of_value(1.5)) self.assertEqual(PositiveReal, type_of_value(tensor(1.5))) self.assertEqual(PositiveReal, type_of_value(tensor([[1.5]]))) self.assertEqual(NegativeReal, type_of_value(-1.5)) self.assertEqual(NegativeReal, type_of_value(tensor(-1.5))) self.assertEqual(NegativeReal, type_of_value(tensor([[-1.5]]))) # 1-d tensor is matrix # Tensors are row-major in torch but column-major in BMG. We give # the BMG type of the tensor as though it were column-major. # This is treated as if it were [[0],[0]], a 2-column 1-row tensor # because that's what we're going to emit into BMG. self.assertEqual(ZeroMatrix(2, 1), type_of_value(tensor([0, 0]))) self.assertEqual(BooleanMatrix(3, 1), type_of_value(tensor([0, 1, 1]))) self.assertEqual(BooleanMatrix(2, 1), type_of_value(tensor([1, 1]))) # 2-d tensor is matrix self.assertEqual(OneHotMatrix(2, 2), type_of_value(tensor([[1, 0], [1, 0]]))) self.assertEqual(BooleanMatrix(2, 2), type_of_value(tensor([[1, 1], [1, 0]]))) self.assertEqual(NaturalMatrix(2, 2), type_of_value(tensor([[1, 3], [1, 0]]))) self.assertEqual( SimplexMatrix(2, 2), type_of_value(tensor([[0.5, 0.5], [0.5, 0.5]])) ) self.assertEqual( ProbabilityMatrix(2, 2), type_of_value(tensor([[0.75, 0.5], [0.5, 0.5]])) ) self.assertEqual( PositiveRealMatrix(2, 2), type_of_value(tensor([[1.75, 0.5], [0.5, 0.5]])) ) self.assertEqual( RealMatrix(2, 2), type_of_value(tensor([[1.75, 0.5], [0.5, -0.5]])) ) self.assertEqual( NegativeRealMatrix(2, 2), type_of_value(tensor([[-1.75, -0.5], [-0.5, -0.5]])), ) # 3-d tensor is Tensor self.assertEqual( Tensor, type_of_value(tensor([[[0, 0], [0, 0]], [[0, 0], [0, 0]]])) ) # Empty tensor is Tensor self.assertEqual(Tensor, type_of_value(tensor([]))) def test_types_in_dot(self) -> None: """test_types_in_dot""" self.maxDiff = None bmg = BMGraphBuilder() one = bmg.add_constant(tensor(1.0)) two = bmg.add_constant(tensor(2.0)) half = bmg.add_constant(tensor(0.5)) beta = bmg.add_beta(two, two) betas = bmg.add_sample(beta) mult = bmg.add_multiplication(half, betas) norm = bmg.add_normal(mult, one) bern = bmg.add_bernoulli(mult) bmg.add_sample(norm) bmg.add_sample(bern) bmg.add_query(mult, _rv_id()) observed = to_dot( bmg, node_types=True, edge_requirements=True, ) expected = """ digraph "graph" { N00[label="0.5:P"]; N01[label="2.0:N"]; N02[label="Beta:P"]; N03[label="Sample:P"]; N04[label="*:P"]; N05[label="1.0:OH"]; N06[label="Normal:R"]; N07[label="Sample:R"]; N08[label="Bernoulli:B"]; N09[label="Sample:B"]; N10[label="Query:P"]; N00 -> N04[label="left:P"]; N01 -> N02[label="alpha:R+"]; N01 -> N02[label="beta:R+"]; N02 -> N03[label="operand:P"]; N03 -> N04[label="right:P"]; N04 -> N06[label="mu:R"]; N04 -> N08[label="probability:P"]; N04 -> N10[label="operator:any"]; N05 -> N06[label="sigma:R+"]; N06 -> N07[label="operand:R"]; N08 -> N09[label="operand:B"]; } """ self.assertEqual(expected.strip(), observed.strip()) def test_matrix_types(self) -> None: """test_matrix_types""" b22 = BooleanMatrix(2, 2) b33 = BooleanMatrix(3, 3) # Reference equality self.assertEqual(b22, BooleanMatrix(2, 2)) self.assertNotEqual(b22, b33) self.assertEqual(b22.short_name, "MB[2,2]") self.assertEqual(b22.long_name, "2 x 2 bool matrix")
beanmachine-main
tests/ppl/compiler/bmg_types_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Tests for rules.py""" import ast import re import unittest from typing import Any import astor from beanmachine.ppl.compiler.ast_patterns import ( add, ast_domain, ast_false, ast_true, attribute, binop, expr, function_def, name, num, ) from beanmachine.ppl.compiler.patterns import ( anyPattern as _default, ListAny, match_every, PredicatePattern, ) from beanmachine.ppl.compiler.rules import ( AllOf, at_least_once, either_or_both, fail, if_then, ignore_div_zero, ignore_runtime_error, list_member_children, ListEdit, make_logger, pattern_rules, PatternRule, projection_rule, remove_from_list, SomeOf, TryMany as many, TryOnce as once, ) def tidy(s: str) -> str: return re.sub(" +", " ", s.replace("\n", " ")).strip() def first_expr(s: str) -> ast.AST: return ast.parse(s).body[0].value _all = ast_domain.all_children some = ast_domain.some_children one = ast_domain.one_child top_down = ast_domain.top_down bottom_up = ast_domain.bottom_up descend_until = ast_domain.descend_until specific_child = ast_domain.specific_child class RulesTest(unittest.TestCase): def test_rules_1(self) -> None: """Tests for rules.py""" remove_plus_zero = pattern_rules( [ (binop(op=add, left=num(n=0)), lambda b: b.right), (binop(op=add, right=num(n=0)), lambda b: b.left), ], "remove_plus_zero", ) self.maxDiff = None m = ast.parse("0; 1; 1+1; 0+1; 1+0; 0+1+0; 0+(1+0); (0+1)+(1+0)") # z = m.body[0].value o = m.body[1].value oo = m.body[2].value zo = m.body[3].value oz = m.body[4].value zo_z = m.body[5].value z_oz = m.body[6].value zo_oz = m.body[7].value rpz_once = once(remove_plus_zero) rpz_many = many(remove_plus_zero) observed = str(rpz_once) expected = """ try_once( first_match( remove_plus_zero( (isinstance(test, BinOp) and isinstance(test.op, Add) and (isinstance(test.left, Num) and test.left.n==0)), remove_plus_zero( (isinstance(test, BinOp) and isinstance(test.op, Add) and (isinstance(test.right, Num) and test.right.n==0)) ) ) """ self.assertEqual(tidy(observed), tidy(expected)) # Note that _all on this list does not recurse down to the # children of the list elements. It runs the rule once on # each list element, adn that's it. result = _all(rpz_once)([oo, zo_z, z_oz, zo_oz]).expect_success() self.assertEqual(ast.dump(result[0]), ast.dump(oo)) self.assertEqual(ast.dump(result[1]), ast.dump(zo)) self.assertEqual(ast.dump(result[2]), ast.dump(oz)) self.assertEqual(ast.dump(result[3]), ast.dump(zo_oz)) # Again, this does not recurse to the children. Rather, it keeps # running the rule until the pattern fails; that is different than # recursing down into the children! result = _all(rpz_many)([oo, zo_z, z_oz, zo_oz]).expect_success() self.assertEqual(ast.dump(result[0]), ast.dump(oo)) self.assertEqual(ast.dump(result[1]), ast.dump(o)) self.assertEqual(ast.dump(result[2]), ast.dump(o)) self.assertEqual(ast.dump(result[3]), ast.dump(zo_oz)) # Now instead of running the rule on all elements of a list, let's # run the rule once on all *children* of a node. Again, this applies the # rule just to the children; it does not recurse down into their # children, and it does not re-run the rule on the result. result = _all(rpz_once)(z_oz).expect_success() self.assertEqual(ast.dump(result), ast.dump(zo)) result = _all(rpz_once)(zo_z).expect_success() self.assertEqual(ast.dump(result), ast.dump(oz)) result = _all(rpz_once)(zo_oz).expect_success() self.assertEqual(ast.dump(result), ast.dump(oo)) # Above we had a test for _all(many(rpz))([oo, zo_z, z_oz, zo_oz]); # we can get the same results with: result = many(some(remove_plus_zero))([oo, zo_z, z_oz, zo_oz]).expect_success() self.assertEqual(ast.dump(result[0]), ast.dump(oo)) self.assertEqual(ast.dump(result[1]), ast.dump(o)) self.assertEqual(ast.dump(result[2]), ast.dump(o)) self.assertEqual(ast.dump(result[3]), ast.dump(zo_oz)) # Both attain a fixpoint. # OneChild applies a rule to members of a list or the children of a node, # until the first success, and then it stops. result = one(remove_plus_zero)([oo, zo_z, z_oz, zo_oz]).expect_success() self.assertEqual(ast.dump(result[0]), ast.dump(oo)) # Rule fails self.assertEqual(ast.dump(result[1]), ast.dump(zo)) # Rule succeeds self.assertEqual(ast.dump(result[2]), ast.dump(z_oz)) # Rule does not run self.assertEqual(ast.dump(result[3]), ast.dump(zo_oz)) # Rule does not run # Testing list editing: # Let's start with a simple test: remove all the zeros from a list # of integers: remove_zeros = PatternRule(0, lambda b: remove_from_list, "remove_zeros") result = _all(once(remove_zeros))( [0, 1, 0, 2, 0, 0, 3, 4, 0, 0] ).expect_success() self.assertEqual(result, [1, 2, 3, 4]) # Let's try some deeper combinations. Here we apply a rule to all # children of a module -- that is, the body. That rule then applies # remove_num_statements once to all members of the body list. remove_num_statements = PatternRule( expr(num()), lambda b: remove_from_list, "remove_num_statements" ) t = ast.parse("0; 1; 2 + 3; 4 + 5 + 6; 7 + 8 * 9;") result = _all(_all(once(remove_num_statements)))(t).expect_success() self.assertEqual( ast.dump(result), ast.dump(ast.parse("2 + 3; 4 + 5 + 6; 7 + 8 * 9;")) ) # Split every statement that is a binop into two statements, # and keep going until you can split no more: split_binops = PatternRule( expr(binop()), lambda b: ListEdit([ast.Expr(b.value.left), ast.Expr(b.value.right)]), "split_binops", ) # This correctly implements those semantics. # The "some" fails when no more work can be done, so the "many" # repeats until a fixpoint is reached for the statement list. result = _all(many(some(split_binops)))(t).expect_success() self.assertEqual( ast.dump(result), ast.dump(ast.parse("0; 1; 2; 3; 4; 5; 6; 7; 8; 9;")) ) # TODO: Unfortunately, this does not attain a fixpoint. # TODO: This seems like it should have the same behaviour as the # TODO: previous, but what happens is: split_binops returns a ListEdit. # TODO: TryMany then checks whether split_binops applies again; # TODO: it does not because a ListEdit is not an Expr(BinOp); it is a # TODO: ListEdit possibly containing an Expr(BinOp). It then returns the # TODO: ListEdit to AllChildren, which splices in the result and goes on # TODO: to the next item in the list. # TODO: # TODO: We have a problem here: should rules which return ListEdits to other # TODO: rules have those other rules automatically distribute their behaviour # TODO: across the ListEdit? Should we disallow a rule from returning a # TODO: ListEdit to anything other than All/Some/One, which are the only # TODO: combinators that know how to splice in the edit? Give this some # TODO: thought. result = _all(_all(many(split_binops)))(t).expect_success() self.assertEqual( ast.dump(result), ast.dump(ast.parse("0; 1; 2; 3; 4 + 5; 6; 7; 8 * 9;")) ) # Test top-down and bottom-up combinators: # The top-down and bottom-up combinators recursively apply a rule to every # node in a tree; top-down rewrites the root and then rewrites all the new # children; bottom-up rewrites the leaves and then the new parents. # What is the difference between bottom-up and top-down traversals? # Consider this example. test = ast.parse("m(0, 1, 2+3, [0+4, 5+0, 0+6+0], {0+(7+0): (0+8)+(9+0)})") expected = ast.parse("m(0, 1, 2+3, [4, 5, 6], {7: 8+9})") result = bottom_up(rpz_once)(test).expect_success() self.assertEqual(ast.dump(result), ast.dump(expected)) # As we'd expect, the bottom-up traversal eliminates all the +0 operations # from the tree. But top-down does not! result = top_down(rpz_once)(test).expect_success() expected = ast.parse("m(0, 1, 2+3, [4, 5, 0+6], {7+0: 8+9})") self.assertEqual(ast.dump(result), ast.dump(expected)) # Why are 0+6+0 and 0+(7+0) not simplified to 6 and 7 by top_down? # Well, think about what top-down does when it encounters 0+6+0. # First it notes that 0+6+0 has the form x+0 and simplifies it to x, # so we have 0+6. Then we recurse on the children, but the children # are not of the form 0+x or x+0, so we're done. I said rpz_once, # not rpz_many, which would keep trying to simplify until proceding # to the children: result = top_down(rpz_many)(test).expect_success() expected = ast.parse("m(0, 1, 2+3, [4, 5, 6], {7: 8+9})") self.assertEqual(ast.dump(result), ast.dump(expected)) # The at_least_once combinator requires that a rule succeed at least # once, and then runs it until it fails. alorpz = at_least_once(remove_plus_zero) self.assertFalse(alorpz(o)) self.assertTrue(alorpz(z_oz)) def test_infinite_loop_detection(self) -> None: # While working on a previous test case I accidentally created a pattern # that has an infinite loop; one of the benefits of a combinator-based # approach to rewriting is we can often detect statically when a particular # combination of rules must produce an infinite loop, and raise an error. # In particular, we know several of the rules always succeed (TryMany, # TryOnce, identity) and if any of these rules are ever passed to TryMany, # we've got an infinite loop right there. # Here's an example. fail always fails, but once always succeeds. Since # once always succeeds, _all(once(anything)) always succeeds, which means # that we've given something that always succeeds to many, and we'll loop # forever. with self.assertRaises(ValueError): _all = ast_domain.all_children _all(many(_all(once(fail)))) def disabled_test_rules_2(self) -> None: """Tests for rules.py""" # PYTHON VERSIONING ISSUE # TODO: This test does not pass in newer versions of Python; for # unknown reasons the two parse trees differ in small details. # Once we understand why, re-enable this test. self.maxDiff = None _all = ast_domain.all_children num_stmt = expr(num()) even = PatternRule( match_every(num_stmt, PredicatePattern(lambda e: e.value.n % 2 == 0)) ) add_one = projection_rule(lambda e: ast.Expr(ast.Num(e.value.n + 1))) t = ast.parse("0; 1; 2; 3; 4; 5 + 6") result = _all(_all(if_then(even, add_one)))(t).expect_success() self.assertEqual(ast.dump(result), ast.dump(ast.parse("1; 1; 3; 3; 5; 5 + 6"))) def disabled_test_find_random_variables(self) -> None: """Find all the functions that have a decorator, delete everything else.""" # PYTHON VERSIONING ISSUE # TODO: This test does not pass in newer versions of Python; for # unknown reasons the two parse trees differ in small details. # Once we understand why, re-enable this test. self.maxDiff = None _all = ast_domain.all_children rule = pattern_rules( [ ( function_def( decorator_list=ListAny(attribute(attr="random_variable")) ), lambda f: ast.FunctionDef( name=f.name, args=f.args, body=[ast.Pass()], returns=None, decorator_list=[], ), ), (_default, lambda x: remove_from_list), ] ) source = """ # foo.py @bm.random_variable def bias() -> Beta: return Beta(1, 1) @bm.random_variable def toss(i) -> Bernoulli: return Bernoulli(bias()) def foo(): return 123 """ expected = """ def bias(): pass def toss(i): pass """ m = ast.parse(source) result = _all(_all(rule))(m).expect_success() self.assertEqual(ast.dump(result), ast.dump(ast.parse(expected))) def test_rules_3(self) -> None: """Tests for rules.py""" self.maxDiff = None # Some nodes, like BoolOp, have the interesting property that they # have both regular children and children in a list, which makes it # inconvenient to apply a rule to all the "logical" children. This # combinator helps with that. t = ast.NameConstant(True) f = ast.NameConstant(False) swap_bools = pattern_rules( [(ast_true, lambda n: f), (ast_false, lambda n: t)], "swap_bools" ) # First we'll try it without the combinator: _all = ast_domain.all_children # "True < False < 1" has this structure: c = ast.Compare(ops=[ast.Lt(), ast.Lt()], left=t, comparators=[f, ast.Num(1)]) result = _all(once(swap_bools))(c).expect_success() # all applies the rule to op, left and comparators; since op and comparators # do not match the pattern, they're unchanged. But we do not recurse # into values, so we only change the first one: expected = "(False < False < 1)" observed = astor.to_source(result) self.assertEqual(observed.strip(), expected.strip()) # This version treats all the ops and values as children, and as # we intend, the rule operates on all the children: result = _all(list_member_children(once(swap_bools)))(c).expect_success() expected = "(False < True < 1)" observed = astor.to_source(result) self.assertEqual(observed.strip(), expected.strip()) def test_rules_4(self) -> None: """Tests for rules.py""" self.maxDiff = None # either_or_both logically takes two rules A and B, and tries to apply # Compose(A, B), A, or B, in that order. The first that succeeds is # the result. zero_to_one = PatternRule(0, lambda n: 1) one_to_two = PatternRule(1, lambda n: 2) eob = either_or_both(zero_to_one, one_to_two) self.assertEqual(eob(0).expect_success(), 2) self.assertEqual(eob(1).expect_success(), 2) self.assertTrue(eob(2).is_fail()) # The some_top_down combinator applies a rule to every node in the tree, # from root to leaves, but ignores nodes for which the rule fails. # It succeeds iff the rule succeeded on any node in the tree. This is # useful because it guarantees that if it succeeds, then it did the most # work it could do applying a rule to a tree. sometd = ast_domain.some_top_down result = sometd(eob)(ast.parse("0 + 1 * 2 + 3")).expect_success() expected = "2 + 2 * 2 + 3" observed = astor.to_source(result) self.assertEqual(observed.strip(), expected.strip()) # If the rule applies to no node, then we fail. self.assertTrue(sometd(eob)(result).is_fail()) # The some_bottom_up combinator is the same as some_top_down but it # works from leaves to root instead of root to leaves. somebu = ast_domain.some_bottom_up result = somebu(eob)(ast.parse("0 + 1 * 2 + 3")).expect_success() expected = "2 + 2 * 2 + 3" observed = astor.to_source(result) self.assertEqual(observed.strip(), expected.strip()) # If the rule applies to no node, then we fail. self.assertTrue(somebu(eob)(result).is_fail()) # SomeOf extends either_or_both to arbitrarily many rules. zero_to_one = PatternRule(0, lambda n: 1) one_to_two = PatternRule(1, lambda n: 2) three_to_four = PatternRule(3, lambda n: 4) so = SomeOf([zero_to_one, one_to_two, three_to_four]) self.assertEqual(so(0).expect_success(), 2) self.assertEqual(so(1).expect_success(), 2) self.assertEqual(so(3).expect_success(), 4) self.assertTrue(so(2).is_fail()) # AllOf extends composition to arbitrarily many rRulesTest two_to_three = PatternRule(2, lambda n: 3) ao1 = AllOf([zero_to_one, one_to_two, two_to_three]) self.assertEqual(ao1(0).expect_success(), 3) self.assertTrue(ao1(1).is_fail()) ao2 = AllOf([zero_to_one, one_to_two, three_to_four]) self.assertTrue(ao2(0).is_fail()) def test_rules_6(self) -> None: """Tests for rules.py""" # Sometimes a rule's projection will fail with an exception through # no fault of our own; it can be expensive or impossible to detect # a coming exception in some cases. In those cases we can use a combinator # which causes rules that throw exceptions to fail rather than throw. def always_throws(x: Any): raise NotImplementedError() self.maxDiff = None d = ignore_div_zero(PatternRule([int, int], lambda l: l[0] / l[1])) self.assertEqual(d([10, 5]).expect_success(), 2) self.assertTrue(d([10, 0]).is_fail()) n = ignore_runtime_error(PatternRule(int, always_throws)) self.assertTrue(n(123).is_fail()) def test_rules_7(self) -> None: """Tests for rules.py""" # descend_until is a handy combinator that descends through the tree, # top down, until a test rule succeeds. It then applies a rule to # the nodes that succeeded but does not further recurse down. It does # this for all matching nodes in the tree starting from the root. self.maxDiff = None # replace all 1 with 2, but only in functions decorated with @frob: t = PatternRule(function_def(decorator_list=ListAny(name(id="frob")))) r = top_down(once(PatternRule(num(1), lambda n: ast.Num(2)))) s = """ 0 1 @frob def f(): 0 1 @frob def g(): 1 1 def h(): 0 1 """ expected = """ 0 1 @frob def f(): 0 2 @frob def g(): 2 2 def h(): 0 1 """ result = descend_until(t, r)(ast.parse(s)).expect_success() observed = astor.to_source(result) self.assertEqual(observed.strip(), expected.strip()) def test_rules_8(self) -> None: """Tests for rules.py""" # specific_child applies a rule to a specified child of the rule # input; the input is required to have such a child. If the rule # succeeds then the output is the input with the rewritten child. self.maxDiff = None # replace all 1 with 2, but only in functions decorated with @frob: log = [] trace = make_logger(log) r = trace( top_down( once( if_then( PatternRule(binop()), trace( specific_child( "left", PatternRule(num(1), lambda n: ast.Num(2)) ) ), ) ) ) ) s = "1 + 1 * 1 + 1" expected = "2 + 2 * 1 + 1" result = r(ast.parse(s)).expect_success() observed = astor.to_source(result) self.assertEqual(observed.strip(), expected.strip()) observed = "\n".join(log) expected = """ Started top_down Started specific_child Finished specific_child Started specific_child Finished specific_child Started specific_child Finished specific_child Finished top_down """ self.assertEqual(observed.strip(), expected.strip()) def test_rules_9(self) -> None: """Tests for rules.py""" # This demonstrates that a rule that produces a list edit will # recursively rewrite that list edit. self.maxDiff = None # Recursively replace any list of the form [True, [another list]] with # the inner list. r = top_down(once(PatternRule([True, list], lambda l: ListEdit(l[1])))) s = [[True, [1]], [True, [[True, [2]], 3]]] expected = "[1, 2, 3]" observed = str(r(s).expect_success()) self.assertEqual(observed.strip(), expected.strip())
beanmachine-main
tests/ppl/compiler/rules_test.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm import torch from beanmachine.ppl.inference import BMGInference from torch import tensor from torch.distributions import Normal @bm.random_variable def norm(x): return Normal(0.0, 1.0) @bm.functional def norm_array(): return tensor([[norm(0), norm(1)], [norm(2), norm(3)]]) @bm.functional def transpose_1(): return torch.transpose(norm_array(), 0, 1) @bm.functional def transpose_2(): return torch.transpose(norm_array(), 1, 0) @bm.functional def transpose_3(): return norm_array().transpose(0, 1) # Fails due to invalid dimensions @bm.functional def unsupported_transpose_1(): return torch.transpose(norm_array(), 3, 2) # Fails due to invalid dimension @bm.functional def unsupported_transpose_2(): return norm_array().transpose(3, 1) # Fails due to invalid (non-int) dimension @bm.functional def unsupported_transpose_3(): return norm_array().transpose(3.2, 1) @bm.functional def scalar_transpose(): return torch.transpose(norm(0), 0, 1) @bm.functional def scalar_transpose_2(): return torch.transpose(tensor([norm(0)]), 0, 1) class TransposeTest(unittest.TestCase): dot_from_normal = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label=Sample]; N06[label=Sample]; N07[label=2]; N08[label=ToMatrix]; N09[label=Transpose]; N10[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N02 -> N04; N02 -> N05; N02 -> N06; N03 -> N08; N04 -> N08; N05 -> N08; N06 -> N08; N07 -> N08; N07 -> N08; N08 -> N09; N09 -> N10; } """.strip() def test_transpose_1(self) -> None: queries = [transpose_1()] dot = BMGInference().to_dot(queries, {}) self.assertEqual(dot.strip(), self.dot_from_normal) def test_transpose_2(self) -> None: queries = [transpose_2()] dot = BMGInference().to_dot(queries, {}) self.assertEqual(dot.strip(), self.dot_from_normal) def test_transpose_3(self) -> None: queries = [transpose_3()] dot = BMGInference().to_dot(queries, {}) self.assertEqual(dot.strip(), self.dot_from_normal) def test_unsupported_transpose_1(self) -> None: with self.assertRaises(ValueError) as ex: BMGInference().infer([unsupported_transpose_1()], {}, 1) expected = """ Unsupported dimension arguments for transpose: 3 and 2 """ self.assertEqual(expected.strip(), str(ex.exception).strip()) def test_unsupported_transpose_2(self) -> None: with self.assertRaises(ValueError) as ex: BMGInference().infer([unsupported_transpose_2()], {}, 1) expected = """ Unsupported dimension arguments for transpose: 3 and 1 """ self.assertEqual(expected.strip(), str(ex.exception).strip()) def test_unsupported_transpose_3(self) -> None: with self.assertRaises(ValueError) as ex: BMGInference().infer([unsupported_transpose_3()], {}, 1) expected = """ Unsupported dimension arguments for transpose: 3.2 and 1 """ self.assertEqual(expected.strip(), str(ex.exception).strip()) def test_scalar_transpose(self) -> None: queries = [scalar_transpose()] dot = BMGInference().to_dot(queries, {}) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N4; } """ self.assertEqual(dot.strip(), expected.strip()) def test_1x1_transpose(self) -> None: queries = [scalar_transpose_2()] dot = BMGInference().to_dot(queries, {}) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N4; } """ self.assertEqual(dot.strip(), expected.strip())
beanmachine-main
tests/ppl/compiler/transpose_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Test performance of multiary addition optimization """ import platform import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference import BMGInference from torch.distributions import Normal @bm.random_variable def norm(x): return Normal(0.0, 1.0) @bm.functional def sum_1(counter): sum = 0.0 for i in range(counter): sum = sum + norm(i) return sum @bm.functional def sum_2(): return sum_1(100) + sum_1(50) def get_report(skip_optimizations): observations = {} queries = [sum_2()] number_samples = 1000 _, perf_report = BMGInference()._infer( queries, observations, number_samples, skip_optimizations=skip_optimizations ) return perf_report class BinaryVsMultiaryAdditionPerformanceTest(unittest.TestCase): def test_perf_num_nodes_edges(self) -> None: """ Test to check if Multiary addition optimization reduces the number of nodes and number of edges using the performance report returned by BMGInference. """ if platform.system() == "Windows": self.skipTest("Disabling *_perf_test.py until flakiness is resolved") self.maxDiff = None skip_optimizations = { "beta_bernoulli_conjugate_fixer", "beta_binomial_conjugate_fixer", "normal_normal_conjugate_fixer", } report_w_optimization = get_report(skip_optimizations) self.assertEqual(report_w_optimization.node_count, 105) self.assertEqual(report_w_optimization.edge_count, 204) skip_optimizations = { "multiary_addition_fixer", "beta_bernoulli_conjugate_fixer", "beta_binomial_conjugate_fixer", "normal_normal_conjugate_fixer", } report_wo_optimization = get_report(skip_optimizations) self.assertEqual(report_wo_optimization.node_count, 203) self.assertEqual(report_wo_optimization.edge_count, 302)
beanmachine-main
tests/ppl/compiler/binary_vs_multiary_addition_perf_test.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl.compiler.bmg_nodes as bn from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.bmg_node_types import is_supported_by_bmg from beanmachine.ppl.compiler.typer_base import TyperBase # To test out the typer base class, here is a very simple typer: it assigns # the "type" True to a node if that node and *all* of its ancestors are supported # node types for BMG, and False otherwise. # # The intention here is to demonstrate that the typer behaves as expected as we # modify the graph and update the typer. class SupportedTyper(TyperBase[bool]): def __init__(self) -> None: TyperBase.__init__(self) def _compute_type_inputs_known(self, node: bn.BMGNode) -> bool: return (isinstance(node, bn.ConstantNode) or is_supported_by_bmg(node)) and all( self[i] for i in node.inputs ) class TyperTest(unittest.TestCase): def test_typer(self) -> None: self.maxDiff = None # We start with this graph: # # 0 1 # | | # NORM # | # ~ 2 # | | # DIV 3 # | | # ADD # | | # EXP NEG # # The DIV node is not supported in BMG. bmg = BMGraphBuilder() c0 = bmg.add_constant(0.0) c1 = bmg.add_constant(1.0) c2 = bmg.add_constant(2.0) c3 = bmg.add_constant(3.0) norm = bmg.add_normal(c0, c1) ns = bmg.add_sample(norm) d = bmg.add_division(ns, c2) a = bmg.add_addition(d, c3) e = bmg.add_exp(a) neg = bmg.add_negate(a) typer = SupportedTyper() # When we ask the typer for a judgment of a node, we should get judgments # of all of its ancestor nodes as well, but we skip computing types of # non-ancestors: self.assertTrue(typer[ns]) # Just type the sample and its ancestors. self.assertTrue(norm in typer) self.assertTrue(c0 in typer) self.assertTrue(c1 in typer) self.assertFalse(d in typer) self.assertFalse(a in typer) self.assertFalse(c2 in typer) self.assertFalse(c3 in typer) self.assertFalse(e in typer) self.assertFalse(neg in typer) # If we then type the exp, all of its ancestors become typed. # Division is not supported in BMG, so the division is marked # as not supported. The division is an ancestor of the addition # and exp, so they are typed as False also. self.assertFalse(typer[e]) # The ancestors of the exp are now all typed. self.assertTrue(a in typer) self.assertTrue(d in typer) self.assertTrue(c3 in typer) self.assertTrue(c2 in typer) # But the negate is still not typed. self.assertFalse(neg in typer) # The types of the division, addition and exp are False: self.assertFalse(typer[d]) self.assertFalse(typer[a]) self.assertFalse(typer[e]) self.assertTrue(typer[c2]) self.assertTrue(typer[c3]) # Now let's mutate the graph by adding some new nodes... c4 = bmg.add_constant(0.5) m = bmg.add_multiplication(ns, c4) # ... and mutating the addition: a.inputs[0] = m # The graph now looks like this: # # 0 1 # | | # NORM # | # ~ 2 # | | | # | DIV # | # | 0.5 # | | # MUL 3 # | | # ADD # | | # EXP NEG # # But we have not yet informed the typer that there was an update. self.assertFalse(typer[a]) typer.update_type(a) self.assertTrue(typer[a]) # This should trigger typing on the untyped ancestors of # the addition: self.assertTrue(m in typer) self.assertTrue(c4 in typer) # It should NOT trigger typing the NEG. We have yet to ask for the type of # that branch, so we do not spent time propagating type information down # the NEG branch. self.assertFalse(neg in typer) # The multiplication and exp should now all be marked as supported also. self.assertTrue(typer[m]) self.assertTrue(typer[e])
beanmachine-main
tests/ppl/compiler/typer_base_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import operator import unittest import beanmachine.ppl as bm import torch from beanmachine.ppl.inference import BMGInference from torch import tensor from torch.distributions import Bernoulli, Binomial, Normal m1 = tensor([[12.0, 13.0], [14.0, 15.0]]) m2 = tensor([[22.0, 23.0], [24.0, 25.0]]) @bm.random_variable def norm_1(): return Normal(0.0, 1.0) @bm.functional def norm(): return torch.tensor([[1.0, 0.0], [0.0, norm_1()]]) @bm.functional def mm(): # Use both the instance and static forms. return torch.mm(m1.mm(norm()), m2) @bm.functional def matmul(): return torch.matmul(m1.matmul(norm()), m2) @bm.functional def infix(): return m1 @ norm() @ m2 @bm.functional def op_matmul(): return operator.matmul(operator.matmul(m1, norm()), m2) # Matrix multiplication of single-valued tensors is turned into ordinary multiplication. @bm.random_variable def trivial_norm_matrix(): return Normal(torch.tensor([0.0]), torch.tensor([1.0])) @bm.functional def trivial(): return trivial_norm_matrix() @ trivial_norm_matrix() @bm.functional def matmul_bad_dimensions(): n = norm() # 2x2 m = torch.eye(3) # 3x3 return n @ m @bm.random_variable def bern(): return Bernoulli(0.5) @bm.random_variable def bino(): return Binomial(5, 0.5) @bm.functional def bool_times_nat(): b1 = torch.tensor([[0, 1], [0, 1]]) b2 = torch.tensor([[0, bern()], [bern(), 1]]) n1 = torch.tensor([[1, bino()], [bino(), 2]]) n2 = torch.tensor([[2, 3], [4, 5]]) return b1 @ b2 @ n1 @ n2 class MatMulTest(unittest.TestCase): def test_matrix_multiplication(self) -> None: self.maxDiff = None expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label="[[12.0,13.0],\\\\n[14.0,15.0]]"]; N05[label=2]; N06[label=1.0]; N07[label=ToMatrix]; N08[label="@"]; N09[label="[[22.0,23.0],\\\\n[24.0,25.0]]"]; N10[label="@"]; N11[label=Query]; N00 -> N02; N00 -> N07; N00 -> N07; N01 -> N02; N02 -> N03; N03 -> N07; N04 -> N08; N05 -> N07; N05 -> N07; N06 -> N07; N07 -> N08; N08 -> N10; N09 -> N10; N10 -> N11; }""" observed = BMGInference().to_dot([mm()], {}) self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([matmul()], {}) self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([infix()], {}) self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([op_matmul()], {}) self.assertEqual(expected.strip(), observed.strip()) expected_trivial = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label="*"]; N5[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N4; N3 -> N4; N4 -> N5; }""" observed = BMGInference().to_dot([trivial()], {}) self.assertEqual(expected_trivial.strip(), observed.strip()) with self.assertRaises(ValueError) as ex: BMGInference().to_dot([matmul_bad_dimensions()], {}) expected = """ The model uses a matrix multiplication (@) operation unsupported by Bean Machine Graph. The dimensions of the operands are 2x2 and 3x3. The unsupported node was created in function call matmul_bad_dimensions().""" self.assertEqual(expected.strip(), str(ex.exception).strip()) expected = """ digraph "graph" { N00[label=0.5]; N01[label=Bernoulli]; N02[label=Sample]; N03[label=5]; N04[label=Binomial]; N05[label=Sample]; N06[label="[[0,1],\\\\n[0,1]]"]; N07[label=2]; N08[label=False]; N09[label=True]; N10[label=ToMatrix]; N11[label=ToRealMatrix]; N12[label="@"]; N13[label=1]; N14[label=ToMatrix]; N15[label=ToRealMatrix]; N16[label="@"]; N17[label="[[2,3],\\\\n[4,5]]"]; N18[label="@"]; N19[label=Query]; N00 -> N01; N00 -> N04; N01 -> N02; N02 -> N10; N02 -> N10; N03 -> N04; N04 -> N05; N05 -> N14; N05 -> N14; N06 -> N12; N07 -> N10; N07 -> N10; N07 -> N14; N07 -> N14; N07 -> N14; N08 -> N10; N09 -> N10; N10 -> N11; N11 -> N12; N12 -> N16; N13 -> N14; N14 -> N15; N15 -> N16; N16 -> N18; N17 -> N18; N18 -> N19; } """ observed = BMGInference().to_dot([bool_times_nat()], {}) self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/matrix_multiplication_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Compare original and conjugate prior transformed Beta-Bernoulli model with operations on Bernoulli samples""" import unittest from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor from .testlib.conjugate_models import BetaBernoulliOpsModel class BetaBernoulliWithOpsConjugateTest(unittest.TestCase): def test_conjugate_graph(self) -> None: self.maxDiff = None model = BetaBernoulliOpsModel(2.0, 2.0) queries = [model.theta(), model.sum_y()] observations = { model.y(0): tensor(0.0), model.y(1): tensor(0.0), model.y(2): tensor(1.0), model.y(3): tensor(0.0), } num_samples = 1000 bmg = BMGInference() # This is the model after beta-bernoulli conjugate rewrite is done skip_optimizations = set() observed_bmg = bmg.to_dot( queries, observations, num_samples, skip_optimizations=skip_optimizations ) expected_bmg = """ digraph "graph" { N00[label=3.0]; N01[label=5.0]; N02[label=Beta]; N03[label=Sample]; N04[label=Bernoulli]; N05[label=Sample]; N06[label=Sample]; N07[label=Sample]; N08[label=Sample]; N09[label=Query]; N10[label=Sample]; N11[label=ToPosReal]; N12[label=ToPosReal]; N13[label=ToPosReal]; N14[label=ToPosReal]; N15[label=ToPosReal]; N16[label="+"]; N17[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N03 -> N04; N03 -> N09; N04 -> N05; N04 -> N06; N04 -> N07; N04 -> N08; N04 -> N10; N05 -> N11; N06 -> N12; N07 -> N13; N08 -> N14; N10 -> N15; N11 -> N16; N12 -> N16; N13 -> N16; N14 -> N16; N15 -> N16; N16 -> N17; } """ self.assertEqual(expected_bmg.strip(), observed_bmg.strip())
beanmachine-main
tests/ppl/compiler/fix_beta_bernoulli_ops_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Tests for gen_builder.py""" import unittest import beanmachine.ppl as bm from beanmachine.ppl.compiler.gen_builder import generate_builder from beanmachine.ppl.compiler.runtime import BMGRuntime from torch.distributions import Normal @bm.random_variable def norm(x): return Normal(0.0, 1.0) @bm.functional def norm_sum(): return norm(1) + norm(2) + norm(3) + norm(4) class GenerateBuilderTest(unittest.TestCase): def test_generate_builder_1(self) -> None: self.maxDiff = None bmg = BMGRuntime().accumulate_graph([norm_sum()], {}) observed = generate_builder(bmg) expected = """ import beanmachine.ppl.compiler.bmg_nodes as bn import torch from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from torch import tensor bmg = BMGraphBuilder() n0 = bmg.add_node(bn.UntypedConstantNode(tensor(0.))) n1 = bmg.add_node(bn.UntypedConstantNode(tensor(1.))) n2 = bmg.add_node(bn.NormalNode(n0, n1)) n3 = bmg.add_node(bn.SampleNode(n2)) n4 = bmg.add_node(bn.SampleNode(n2)) n5 = bmg.add_node(bn.AdditionNode(n3, n4)) n6 = bmg.add_node(bn.SampleNode(n2)) n7 = bmg.add_node(bn.AdditionNode(n5, n6)) n8 = bmg.add_node(bn.SampleNode(n2)) n9 = bmg.add_node(bn.AdditionNode(n7, n8)) n10 = bmg.add_node(bn.Query(n9))""" self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/gen_builder_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Compare original and conjugate prior transformed Beta-Binomial model""" import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import log, tensor from torch.distributions import Binomial, Normal @bm.random_variable def binomial(x): return Binomial(100, logits=log(tensor([0.25]))) # equivalent probability: 0.2 @bm.random_variable def normal(x): return Normal(0.0, 1.0) @bm.random_variable def binomial_normal_logit(): return Binomial(100, logits=tensor([normal(0)])) @bm.functional def add(): return binomial(0) + binomial(1) class BinomialLogitTest(unittest.TestCase): def test_constant_binomial_logit_graph(self) -> None: observations = {} queries_observed = [add()] graph_observed = BMGInference().to_dot(queries_observed, observations) graph_expected = """ digraph "graph" { N0[label=100]; N1[label=0.20000000298023224]; N2[label=Binomial]; N3[label=Sample]; N4[label=Sample]; N5[label=ToPosReal]; N6[label=ToPosReal]; N7[label="+"]; N8[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N2 -> N4; N3 -> N5; N4 -> N6; N5 -> N7; N6 -> N7; N7 -> N8; } """ self.assertEqual(graph_observed.strip(), graph_expected.strip()) def test_binomial_normal_logit_graph(self) -> None: observations = {} queries_observed = [binomial_normal_logit()] graph_observed = BMGInference().to_dot(queries_observed, observations) graph_expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=100]; N5[label=Logistic]; N6[label=Binomial]; N7[label=Sample]; N8[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N5; N4 -> N6; N5 -> N6; N6 -> N7; N7 -> N8; } """ self.assertEqual(graph_observed.strip(), graph_expected.strip())
beanmachine-main
tests/ppl/compiler/fix_binomial_logit_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor from torch.distributions import Bernoulli, Beta, Normal @bm.random_variable def beta(): return Beta(2.0, 2.0) @bm.random_variable def flip(n): return Bernoulli(beta()) @bm.random_variable def flip_2(n): return Bernoulli(beta() * 0.5) @bm.random_variable def normal(n): return Normal(flip_2(n), 1.0) class CoinFlipTest(unittest.TestCase): def test_gen_bm_python_simple(self) -> None: self.maxDiff = None queries = [beta()] observations = { flip(0): tensor(0.0), flip(1): tensor(0.0), flip(2): tensor(1.0), flip(3): tensor(0.0), } observed = BMGInference().to_bm_python(queries, observations) expected = """ import beanmachine.ppl as bm import torch v0 = 2.0 @bm.random_variable def rv0(): \treturn torch.distributions.Beta(v0, v0) v1 = rv0() @bm.random_variable def rv1(i): \treturn torch.distributions.Bernoulli(v1.wrapper(*v1.arguments)) v2 = rv1(1) v3 = rv1(2) v4 = rv1(3) v5 = rv1(4) queries = [v1] observations = {v2 : torch.tensor(0.0),v3 : torch.tensor(0.0),v4 : torch.tensor(1.0),v5 : torch.tensor(0.0)} """ self.assertEqual(expected.strip(), observed.strip()) def test_gen_bm_python_rv_operations(self) -> None: self.maxDiff = None queries = [beta(), normal(0), normal(1)] observations = { flip_2(0): tensor(0.0), } observed = BMGInference().to_bm_python(queries, observations) expected = """ import beanmachine.ppl as bm import torch v0 = 2.0 @bm.random_variable def rv0(): \treturn torch.distributions.Beta(v0, v0) v1 = rv0() v2 = 0.5 @bm.functional def f3(): \treturn torch.multiply(v1.wrapper(*v1.arguments), v2) @bm.random_variable def rv1(i): \treturn torch.distributions.Bernoulli(f3()) v4 = rv1(1) @bm.functional def f5(): \treturn (v4.wrapper(*v4.arguments)) v6 = 1.0 @bm.random_variable def rv2(): \treturn torch.distributions.Normal(f5(), v6) v7 = rv2() v8 = rv1(2) @bm.functional def f9(): \treturn (v8.wrapper(*v8.arguments)) @bm.random_variable def rv3(): \treturn torch.distributions.Normal(f9(), v6) v10 = rv3() queries = [v1,v7,v10] observations = {v4 : torch.tensor(0.0)} """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/gen_bm_python_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Compare original and conjugate prior transformed Beta-Bernoulli model with a hyperparameter given by calling a non-random_variable function.""" import unittest from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor from .testlib.conjugate_models import BetaBernoulliScaleHyperParameters class BetaBernoulliWithScaledHPConjugateTest(unittest.TestCase): def test_beta_bernoulli_conjugate_graph(self) -> None: model = BetaBernoulliScaleHyperParameters(0.5, 1.5) queries = [model.theta()] observations = { model.y(0): tensor(0.0), model.y(1): tensor(0.0), model.y(2): tensor(1.0), model.y(3): tensor(0.0), } num_samples = 1000 bmg = BMGInference() # This is the model after beta-bernoulli conjugate rewrite is done skip_optimizations = set() observed_bmg = bmg.to_dot( queries, observations, num_samples, skip_optimizations=skip_optimizations ) expected_bmg = """ digraph "graph" { N0[label=1.5]; N1[label=6.5]; N2[label=Beta]; N3[label=Sample]; N4[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N4; } """ self.assertEqual(expected_bmg.strip(), observed_bmg.strip())
beanmachine-main
tests/ppl/compiler/fix_beta_bernoulli_const_added_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference import BMGInference from torch.distributions import Normal @bm.random_variable def norm(x): return Normal(0.0, 1.0) @bm.functional def prod_1(counter): prod = 0.0 for i in range(counter): prod = prod * norm(i) return prod @bm.functional def prod_2(): return prod_1(10) class ZeroQueryTypeCheckingBug(unittest.TestCase): def test_query_type_zero(self) -> None: """ Query of a variable of type Zero produces a type checking error. """ self.maxDiff = None # TODO: One of the design principles of BMG is to allow # TODO: for any query, even if you ask it to query constants. # TODO: A potential solution could be to add a warning system so that # TODO: the model's developer becomes aware of the possible error with self.assertRaises(AssertionError) as ex: BMGInference().infer([prod_2()], {}, 1) expected = "" observed = str(ex.exception) self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/query_type_zero_bug_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.gen_bmg_cpp import to_bmg_cpp from beanmachine.ppl.compiler.gen_bmg_graph import to_bmg_graph from beanmachine.ppl.compiler.gen_bmg_python import to_bmg_python from beanmachine.ppl.compiler.gen_dot import to_dot def tidy(s: str) -> str: return "\n".join(c.strip() for c in s.strip().split("\n")).strip() class BMGFactorTest(unittest.TestCase): def test_bmg_factor(self) -> None: bmg = BMGraphBuilder() pos1 = bmg.add_pos_real(2.0) real1 = bmg.add_real(3.0) prob1 = bmg.add_probability(0.4) dist1 = bmg.add_normal(real1, pos1) x = bmg.add_sample(dist1) x_sq = bmg.add_multiplication(x, x) bmg.add_exp_product(x, prob1, x_sq) bmg.add_observation(x, 7.0) observed = to_dot(bmg, label_edges=False) expected = """ digraph "graph" { N0[label=3.0]; N1[label=2.0]; N2[label=Normal]; N3[label=Sample]; N4[label=0.4]; N5[label="*"]; N6[label=ExpProduct]; N7[label="Observation 7.0"]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N5; N3 -> N5; N3 -> N6; N3 -> N7; N4 -> N6; N5 -> N6; } """ self.maxDiff = None self.assertEqual(expected.strip(), observed.strip()) observed = to_bmg_graph(bmg).graph.to_string() expected = """ 0: CONSTANT(real 3) (out nodes: 2) 1: CONSTANT(positive real 2) (out nodes: 2) 2: NORMAL(0, 1) (out nodes: 3) 3: SAMPLE(2) (out nodes: 5, 5, 6) observed to be real 7 4: CONSTANT(probability 0.4) (out nodes: 6) 5: MULTIPLY(3, 3) (out nodes: 6) 6: EXP_PRODUCT(3, 4, 5) (out nodes: ) observed to be unknown """ self.assertEqual(tidy(expected), tidy(observed)) observed = to_bmg_python(bmg).code expected = """ from beanmachine import graph from torch import tensor g = graph.Graph() n0 = g.add_constant_real(3.0) n1 = g.add_constant_pos_real(2.0) n2 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n0, n1], ) n3 = g.add_operator(graph.OperatorType.SAMPLE, [n2]) n4 = g.add_constant_probability(0.4) n5 = g.add_operator(graph.OperatorType.MULTIPLY, [n3, n3]) n6 = g.add_factor( graph.FactorType.EXP_PRODUCT, [n3, n4, n5], ) g.observe(n3, 7.0) """ self.assertEqual(expected.strip(), observed.strip()) observed = to_bmg_cpp(bmg).code expected = """ graph::Graph g; uint n0 = g.add_constant_real(3.0); uint n1 = g.add_constant_pos_real(2.0); uint n2 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n0, n1})); uint n3 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n2})); uint n4 = g.add_constant_probability(0.4); uint n5 = g.add_operator( graph::OperatorType::MULTIPLY, std::vector<uint>({n3, n3})); uint n6 = g.add_factor( graph::FactorType::EXP_PRODUCT, std::vector<uint>({n3, n4, n5})); g.observe(n3, 7.0); """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/bmg_factor_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference import BMGInference from torch import tensor from torch.distributions import Bernoulli # Bean Machine allows queries on functionals that return constants; # BMG does not. It would be nice though if a BM model that queried # a constant worked when using BMGInference the same way that it # does with other inference engines, for several reasons: # # (1) consistency of behaviour across inference engines # (2) testing optimizations; if an optimization ends up producing # a constant, it's nice to be able to query that functional # and see that it does indeed produce a constant. # (3) possible future error reporting; it would be nice to warn the # user that they are querying a constant because this could be # a bug in their model. # (4) model development and debugging; a user might make a dummy functional # that just returns a constant now, intending to replace it with an # actual function later. Or might force a functional to produce a # particular value to see how the model behaves in that case. # # This test verifies that we can query a constant functional. @bm.functional def c(): return tensor(2.5) @bm.functional def c2(): return tensor([1.5, -2.5]) # Two RVIDs but they both refer to the same query node: @bm.random_variable def flip(): return Bernoulli(0.5) @bm.functional def flip2(): return flip() @bm.functional def flip3(): return flip() + 0 @bm.functional def flip4(): return 0 + flip() # Here's a weird case. Normally query nodes are deduplicated but it is # possible to end up with two distinct query nodes both referring to the # same constant because of an optimization. @bm.functional def always_false_1(): return 1 < flip() @bm.functional def always_false_2(): # Boolean comparision optimizer turns both of these into False, # even though the queries were originally on different expressions # and therefore were different nodes. return flip() < 0 # BMG supports constant single values or tensors, but the tensors must # be 1 or 2 dimensional; empty tensors and 3+ dimensional tensors # need to produce an error. @bm.functional def invalid_tensor_1(): return tensor([]) @bm.functional def invalid_tensor_2(): return tensor([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]]) class BMGQueryTest(unittest.TestCase): def test_constant_functional(self) -> None: self.maxDiff = None observed = BMGInference().to_dot([c(), c2()], {}) expected = """ digraph "graph" { N0[label=2.5]; N1[label=Query]; N2[label="[1.5,-2.5]"]; N3[label=Query]; N0 -> N1; N2 -> N3; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_cpp([c(), c2()], {}) # TODO: Is this valid C++? The API for adding constants # has changed but the code generator has not kept up. # Check if this is wrong and fix it. expected = """ graph::Graph g; Eigen::MatrixXd m0(1, 1); m0 << 2.5; uint n0 = g.add_constant_real_matrix(m0); uint q0 = g.query(n0); Eigen::MatrixXd m1(2, 1); m1 << 1.5, -2.5; uint n1 = g.add_constant_real_matrix(m1); uint q1 = g.query(n1); """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_python([c(), c2()], {}) expected = """ from beanmachine import graph from torch import tensor g = graph.Graph() n0 = g.add_constant_real(2.5) q0 = g.query(n0) n1 = g.add_constant_real_matrix(tensor([[1.5],[-2.5]])) q1 = g.query(n1) """ self.assertEqual(expected.strip(), observed.strip()) samples = BMGInference().infer([c(), c2()], {}, 1, 1) observed = samples[c()] expected = "tensor([[2.5000]])" self.assertEqual(expected.strip(), str(observed).strip()) observed = samples[c2()] expected = "tensor([[[ 1.5000, -2.5000]]], dtype=torch.float64)" self.assertEqual(expected.strip(), str(observed).strip()) def test_redundant_functionals(self) -> None: self.maxDiff = None # We see from the graph that we have two distinct RVIDs but they # both refer to the same query. We need to make sure that BMG # inference works, and that we get the dictionary out that we expect. observed = BMGInference().to_dot([flip(), flip2()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=Query]; N4[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; N2 -> N4; } """ self.assertEqual(expected.strip(), str(observed).strip()) samples = BMGInference().infer([flip(), flip2()], {}, 10, 1) f = samples[flip()] f2 = samples[flip2()] self.assertEqual(str(f), str(f2)) # A strange case: two queries on the same constant. observed = BMGInference().to_dot([always_false_1(), always_false_2()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=False]; N4[label=Query]; N5[label=Query]; N0 -> N1; N1 -> N2; N3 -> N4; N3 -> N5; } """ self.assertEqual(expected.strip(), str(observed).strip()) samples = BMGInference().infer([always_false_1(), always_false_2()], {}, 2, 1) af1 = samples[always_false_1()] af2 = samples[always_false_2()] expected = "tensor([[False, False]])" self.assertEqual(expected, str(af1)) self.assertEqual(expected, str(af2)) def test_redundant_functionals_2(self) -> None: self.maxDiff = None # Here's a particularly weird one: we have what is initially two # distinct queries: flip() + 0 and 0 + flip(), but the graph optimizer # deduces that both queries refer to the same non-constant node. observed = BMGInference().to_dot([flip3(), flip4()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=Query]; N4[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; N2 -> N4; } """ self.assertEqual(expected.strip(), str(observed).strip()) samples = BMGInference().infer([flip3(), flip4()], {}, 10, 1) f3 = samples[flip3()] f4 = samples[flip4()] self.assertEqual(str(f3), str(f4)) def test_invalid_tensors(self) -> None: self.maxDiff = None with self.assertRaises(ValueError) as ex: BMGInference().to_dot([invalid_tensor_1(), invalid_tensor_2()], {}) # TODO: This error message is horrid. Fix it. expected = ( "The model uses a tensor " + "operation unsupported by Bean Machine Graph.\n" + "The unsupported node is the operator of a query.\n" + "The model uses a tensor operation unsupported by Bean Machine Graph.\n" + "The unsupported node is the operator of a query." ) self.assertEqual(expected, str(ex.exception))
beanmachine-main
tests/ppl/compiler/bmg_query_test.py
beanmachine-main
tests/ppl/compiler/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Tests for bmg_nodes.py""" import unittest import torch from beanmachine.ppl.compiler.bmg_nodes import ( ConstantRealMatrixNode, ConstantTensorNode, MatrixMultiplicationNode, MultiplicationNode, NormalNode, RealNode, ) from beanmachine.ppl.compiler.sizer import Sizer from beanmachine.ppl.compiler.support import ComputeSupport def support(n): return str(ComputeSupport()[n]) def size(n): return Sizer()[n] class BMGNodesTest(unittest.TestCase): def test_RealNode(self) -> None: r42 = RealNode(42.0) self.assertEqual(r42.value, 42.0) self.assertEqual(size(r42), torch.Size([])) # Note that support always returns a set of tensors, even though this # node is technically scalar valued. In practice we never need to compute # the support of a RealNode, so fixing this minor oddity is unnecessary. self.assertEqual(support(r42), "tensor(42.)") def test_MultiplicationNode(self) -> None: r2 = RealNode(2.0) r3 = RealNode(3.0) rx = MultiplicationNode([r2, r3]) self.assertEqual(size(rx), torch.Size([])) self.assertEqual(support(rx), "tensor(6.)") def test_ConstantTensorNode_1d(self) -> None: v42 = torch.tensor([42, 43]) t42 = ConstantTensorNode(v42) self.assertEqual(t42.value[0], v42[0]) self.assertEqual(t42.value[1], v42[1]) self.assertEqual(v42.size(), torch.Size([2])) self.assertEqual(size(t42), v42.size()) self.assertEqual(support(t42), "tensor([42, 43])") def test_ConstantTensorNode_2d(self) -> None: v42 = torch.tensor([[42, 43], [44, 45]]) t42 = ConstantTensorNode(v42) self.assertEqual(t42.value[0, 0], v42[0, 0]) self.assertEqual(t42.value[1, 0], v42[1, 0]) self.assertEqual(v42.size(), torch.Size([2, 2])) self.assertEqual(size(t42), v42.size()) expected = """ tensor([[42, 43], [44, 45]])""" self.assertEqual(support(t42).strip(), expected.strip()) def test_ConstantRealMatrixNode_2d(self) -> None: v42 = torch.tensor([[42, 43], [44, 45]]) t42 = ConstantRealMatrixNode(v42) self.assertEqual(t42.value[0, 0], v42[0, 0]) self.assertEqual(t42.value[1, 0], v42[1, 0]) self.assertEqual(v42.size(), torch.Size([2, 2])) self.assertEqual(size(t42), v42.size()) expected = """ tensor([[42, 43], [44, 45]])""" self.assertEqual(support(t42).strip(), expected.strip()) def test_MatrixMultiplicationNode(self) -> None: v42 = torch.tensor([[42, 43], [44, 45]]) mv = torch.mm(v42, v42) t42 = ConstantRealMatrixNode(v42) mt = MatrixMultiplicationNode(t42, t42) self.assertEqual(v42.size(), torch.Size([2, 2])) self.assertEqual(size(mt), mv.size()) expected = """ tensor([[3656, 3741], [3828, 3917]]) """ self.assertEqual(support(mt).strip(), expected.strip()) def test_inputs_and_outputs(self) -> None: # We must maintain the invariant that the output set and the # input set of every node are consistent even when the graph # is edited. r1 = RealNode(1.0) self.assertEqual(len(r1.outputs.items), 0) n = NormalNode(r1, r1) # r1 has two outputs, both equal to n self.assertEqual(r1.outputs.items[n], 2) r2 = RealNode(2.0) n.inputs[0] = r2 # r1 and r2 now each have one output self.assertEqual(r1.outputs.items[n], 1) self.assertEqual(r2.outputs.items[n], 1)
beanmachine-main
tests/ppl/compiler/bmg_nodes_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.gen_bmg_cpp import to_bmg_cpp from beanmachine.ppl.compiler.gen_bmg_graph import to_bmg_graph from beanmachine.ppl.compiler.gen_bmg_python import to_bmg_python from beanmachine.ppl.compiler.gen_dot import to_dot from beanmachine.ppl.compiler.runtime import BMGRuntime from beanmachine.ppl.model.rv_identifier import RVIdentifier from torch import tensor from torch.distributions import Normal def _rv_id() -> RVIdentifier: return RVIdentifier(lambda a, b: a, (1, 1)) @bm.random_variable def norm(): return Normal(tensor(0.0), tensor(1.0)) @bm.functional def f1by2(): # a 1x2 tensor in Python becomes a 2x1 matrix in BMG return tensor([norm().exp(), norm()]) @bm.functional def f2by1(): # a 2x1 tensor in Python becomes a 1x2 matrix in BMG return tensor([[norm().exp()], [norm()]]) @bm.functional def f2by3(): # a 2x3 tensor in Python becomes a 3x2 matrix in BMG return tensor([[norm().exp(), 10, 20], [norm(), 30, 40]]) @bm.functional def f1by2by3(): # A 1x2x3 tensor in Python is an error in BMG. return tensor([[[norm().exp(), 10, 20], [norm(), 30, 40]]]) class ToMatrixTest(unittest.TestCase): def test_to_matrix_1by2(self) -> None: self.maxDiff = None bmg = BMGRuntime().accumulate_graph([f1by2()], {}) observed = to_dot( bmg, node_types=True, edge_requirements=True, after_transform=True, label_edges=True, ) expected = """ digraph "graph" { N0[label="0.0:R"]; N1[label="1.0:R+"]; N2[label="Normal:R"]; N3[label="Sample:R"]; N4[label="2:N"]; N5[label="1:N"]; N6[label="Exp:R+"]; N7[label="ToReal:R"]; N8[label="ToMatrix:MR[2,1]"]; N9[label="Query:MR[2,1]"]; N0 -> N2[label="mu:R"]; N1 -> N2[label="sigma:R+"]; N2 -> N3[label="operand:R"]; N3 -> N6[label="operand:R"]; N3 -> N8[label="1:R"]; N4 -> N8[label="rows:N"]; N5 -> N8[label="columns:N"]; N6 -> N7[label="operand:<=R"]; N7 -> N8[label="0:R"]; N8 -> N9[label="operator:any"]; } """ self.assertEqual(expected.strip(), observed.strip()) observed = to_bmg_cpp(bmg).code expected = """ graph::Graph g; uint n0 = g.add_constant_real(0.0); uint n1 = g.add_constant_pos_real(1.0); uint n2 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n0, n1})); uint n3 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n2})); uint n4 = g.add_constant_natural(2); uint n5 = g.add_constant_natural(1); uint n6 = g.add_operator( graph::OperatorType::EXP, std::vector<uint>({n3})); uint n7 = g.add_operator( graph::OperatorType::TO_REAL, std::vector<uint>({n6})); uint n8 = g.add_operator( graph::OperatorType::TO_MATRIX, std::vector<uint>({n4, n5, n7, n3})); uint q0 = g.query(n8); """ self.assertEqual(expected.strip(), observed.strip()) observed = to_bmg_python(bmg).code expected = """ from beanmachine import graph from torch import tensor g = graph.Graph() n0 = g.add_constant_real(0.0) n1 = g.add_constant_pos_real(1.0) n2 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n0, n1], ) n3 = g.add_operator(graph.OperatorType.SAMPLE, [n2]) n4 = g.add_constant_natural(2) n5 = g.add_constant_natural(1) n6 = g.add_operator(graph.OperatorType.EXP, [n3]) n7 = g.add_operator(graph.OperatorType.TO_REAL, [n6]) n8 = g.add_operator( graph.OperatorType.TO_MATRIX, [n4, n5, n7, n3], ) q0 = g.query(n8) """ self.assertEqual(expected.strip(), observed.strip()) observed = to_bmg_graph(bmg).graph.to_dot() expected = """ digraph "graph" { N0[label="0"]; N1[label="1"]; N2[label="Normal"]; N3[label="~"]; N4[label="2"]; N5[label="1"]; N6[label="exp"]; N7[label="ToReal"]; N8[label="ToMatrix"]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N6; N3 -> N8; N4 -> N8; N5 -> N8; N6 -> N7; N7 -> N8; Q0[label="Query"]; N8 -> Q0; } """ self.assertEqual(expected.strip(), observed.strip()) def test_to_matrix_2by1(self) -> None: self.maxDiff = None bmg = BMGRuntime().accumulate_graph([f2by1()], {}) observed = to_dot( bmg, node_types=True, edge_requirements=True, after_transform=True, label_edges=True, ) expected = """ digraph "graph" { N0[label="0.0:R"]; N1[label="1.0:R+"]; N2[label="Normal:R"]; N3[label="Sample:R"]; N4[label="1:N"]; N5[label="2:N"]; N6[label="Exp:R+"]; N7[label="ToReal:R"]; N8[label="ToMatrix:MR[1,2]"]; N9[label="Query:MR[1,2]"]; N0 -> N2[label="mu:R"]; N1 -> N2[label="sigma:R+"]; N2 -> N3[label="operand:R"]; N3 -> N6[label="operand:R"]; N3 -> N8[label="1:R"]; N4 -> N8[label="rows:N"]; N5 -> N8[label="columns:N"]; N6 -> N7[label="operand:<=R"]; N7 -> N8[label="0:R"]; N8 -> N9[label="operator:any"]; } """ self.assertEqual(expected.strip(), observed.strip()) observed = to_bmg_cpp(bmg).code expected = """ graph::Graph g; uint n0 = g.add_constant_real(0.0); uint n1 = g.add_constant_pos_real(1.0); uint n2 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n0, n1})); uint n3 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n2})); uint n4 = g.add_constant_natural(1); uint n5 = g.add_constant_natural(2); uint n6 = g.add_operator( graph::OperatorType::EXP, std::vector<uint>({n3})); uint n7 = g.add_operator( graph::OperatorType::TO_REAL, std::vector<uint>({n6})); uint n8 = g.add_operator( graph::OperatorType::TO_MATRIX, std::vector<uint>({n4, n5, n7, n3})); uint q0 = g.query(n8); """ self.assertEqual(expected.strip(), observed.strip()) observed = to_bmg_python(bmg).code expected = """ from beanmachine import graph from torch import tensor g = graph.Graph() n0 = g.add_constant_real(0.0) n1 = g.add_constant_pos_real(1.0) n2 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n0, n1], ) n3 = g.add_operator(graph.OperatorType.SAMPLE, [n2]) n4 = g.add_constant_natural(1) n5 = g.add_constant_natural(2) n6 = g.add_operator(graph.OperatorType.EXP, [n3]) n7 = g.add_operator(graph.OperatorType.TO_REAL, [n6]) n8 = g.add_operator( graph.OperatorType.TO_MATRIX, [n4, n5, n7, n3], ) q0 = g.query(n8) """ self.assertEqual(expected.strip(), observed.strip()) observed = to_bmg_graph(bmg).graph.to_dot() expected = """ digraph "graph" { N0[label="0"]; N1[label="1"]; N2[label="Normal"]; N3[label="~"]; N4[label="1"]; N5[label="2"]; N6[label="exp"]; N7[label="ToReal"]; N8[label="ToMatrix"]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N6; N3 -> N8; N4 -> N8; N5 -> N8; N6 -> N7; N7 -> N8; Q0[label="Query"]; N8 -> Q0; } """ self.assertEqual(expected.strip(), observed.strip()) def test_to_matrix_2by3(self) -> None: self.maxDiff = None bmg = BMGRuntime().accumulate_graph([f2by3()], {}) observed = to_dot( bmg, node_types=True, edge_requirements=True, after_transform=True, label_edges=True, ) expected = """ digraph "graph" { N00[label="0.0:R"]; N01[label="1.0:R+"]; N02[label="Normal:R"]; N03[label="Sample:R"]; N04[label="3:N"]; N05[label="2:N"]; N06[label="Exp:R+"]; N07[label="ToReal:R"]; N08[label="10.0:R"]; N09[label="20.0:R"]; N10[label="30.0:R"]; N11[label="40.0:R"]; N12[label="ToMatrix:MR[3,2]"]; N13[label="Query:MR[3,2]"]; N00 -> N02[label="mu:R"]; N01 -> N02[label="sigma:R+"]; N02 -> N03[label="operand:R"]; N03 -> N06[label="operand:R"]; N03 -> N12[label="3:R"]; N04 -> N12[label="rows:N"]; N05 -> N12[label="columns:N"]; N06 -> N07[label="operand:<=R"]; N07 -> N12[label="0:R"]; N08 -> N12[label="1:R"]; N09 -> N12[label="2:R"]; N10 -> N12[label="4:R"]; N11 -> N12[label="5:R"]; N12 -> N13[label="operator:any"]; } """ self.assertEqual(expected.strip(), observed.strip()) def test_to_matrix_2(self) -> None: # Test TO_MATRIX, TO_REAL_MATRIX, TO_POS_REAL_MATRIX and # TO_NEG_REAL_MATRIX. The first composes a matrix from elements; # the latter convert a matrix of one type (probability in this case) # to a matrix of another type. # # Notice that we do not explicitly insert a ToRealMatrix # node here; the problem fixer detects that we have a 2x1 # probability matrix from the column index but the # LogSumExpVector needs a real, positive real or negative # real matrix, and inserts a ToRealMatrix node on that edge. self.maxDiff = None bmg = BMGraphBuilder() zero = bmg.add_constant(0) one = bmg.add_constant(1) two = bmg.add_natural(2) three = bmg.add_constant(3) neg_three = bmg.add_neg_real(-3.0) beta = bmg.add_beta(three, three) b0 = bmg.add_sample(beta) b1 = bmg.add_sample(beta) b2 = bmg.add_sample(beta) b3 = bmg.add_sample(beta) pm = bmg.add_to_matrix(two, two, b0, b1, b2, b3) nm = bmg.add_to_matrix(two, two, neg_three, neg_three, neg_three, neg_three) c0 = bmg.add_column_index(pm, zero) c1 = bmg.add_column_index(pm, one) nc0 = bmg.add_column_index(nm, zero) tpr = bmg.add_to_positive_real_matrix(c1) tnr = bmg.add_to_negative_real_matrix(nc0) lse0 = bmg.add_logsumexp_vector(c0) lse1 = bmg.add_logsumexp_vector(tpr) lse2 = bmg.add_logsumexp_vector(tnr) bmg.add_query(lse0, _rv_id()) bmg.add_query(lse1, _rv_id()) bmg.add_query(lse2, _rv_id()) observed = to_dot( bmg, node_types=True, edge_requirements=True, after_transform=True, label_edges=True, ) expected = """ digraph "graph" { N00[label="3.0:R+"]; N01[label="Beta:P"]; N02[label="Sample:P"]; N03[label="Sample:P"]; N04[label="Sample:P"]; N05[label="Sample:P"]; N06[label="2:N"]; N07[label="ToMatrix:MP[2,2]"]; N08[label="0:N"]; N09[label="ColumnIndex:MP[2,1]"]; N10[label="ToRealMatrix:MR[2,1]"]; N11[label="LogSumExp:R"]; N12[label="Query:R"]; N13[label="1:N"]; N14[label="ColumnIndex:MP[2,1]"]; N15[label="ToPosRealMatrix:MR+[2,1]"]; N16[label="LogSumExp:R"]; N17[label="Query:R"]; N18[label="-3.0:R-"]; N19[label="ToMatrix:MR-[2,2]"]; N20[label="ColumnIndex:MR-[2,1]"]; N21[label="ToNegRealMatrix:MR-[2,1]"]; N22[label="LogSumExp:R"]; N23[label="Query:R"]; N00 -> N01[label="alpha:R+"]; N00 -> N01[label="beta:R+"]; N01 -> N02[label="operand:P"]; N01 -> N03[label="operand:P"]; N01 -> N04[label="operand:P"]; N01 -> N05[label="operand:P"]; N02 -> N07[label="0:P"]; N03 -> N07[label="1:P"]; N04 -> N07[label="2:P"]; N05 -> N07[label="3:P"]; N06 -> N07[label="columns:N"]; N06 -> N07[label="rows:N"]; N06 -> N19[label="columns:N"]; N06 -> N19[label="rows:N"]; N07 -> N09[label="left:MP[2,2]"]; N07 -> N14[label="left:MP[2,2]"]; N08 -> N09[label="right:N"]; N08 -> N20[label="right:N"]; N09 -> N10[label="operand:any"]; N10 -> N11[label="operand:MR[2,1]"]; N11 -> N12[label="operator:any"]; N13 -> N14[label="right:N"]; N14 -> N15[label="operand:any"]; N15 -> N16[label="operand:MR+[2,1]"]; N16 -> N17[label="operator:any"]; N18 -> N19[label="0:R-"]; N18 -> N19[label="1:R-"]; N18 -> N19[label="2:R-"]; N18 -> N19[label="3:R-"]; N19 -> N20[label="left:MR-[2,2]"]; N20 -> N21[label="UNKNOWN:any"]; N21 -> N22[label="operand:MR-[2,1]"]; N22 -> N23[label="operator:any"]; } """ self.assertEqual(expected.strip(), observed.strip()) observed = to_bmg_cpp(bmg).code expected = """ graph::Graph g; uint n0 = g.add_constant_pos_real(3.0); uint n1 = g.add_distribution( graph::DistributionType::BETA, graph::AtomicType::PROBABILITY, std::vector<uint>({n0, n0})); uint n2 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n3 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n4 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n5 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n6 = g.add_constant_natural(2); uint n7 = g.add_operator( graph::OperatorType::TO_MATRIX, std::vector<uint>({n6, n6, n2, n3, n4, n5})); uint n8 = g.add_constant_natural(0); uint n9 = g.add_operator( graph::OperatorType::COLUMN_INDEX, std::vector<uint>({n7, n8})); uint n10 = g.add_operator( graph::OperatorType::TO_REAL_MATRIX, std::vector<uint>({n9})); uint n11 = g.add_operator( graph::OperatorType::LOGSUMEXP_VECTOR, std::vector<uint>({n10})); uint q0 = g.query(n11); uint n12 = g.add_constant_natural(1); uint n13 = g.add_operator( graph::OperatorType::COLUMN_INDEX, std::vector<uint>({n7, n12})); uint n14 = g.add_operator( graph::OperatorType::TO_POS_REAL_MATRIX, std::vector<uint>({n13})); uint n15 = g.add_operator( graph::OperatorType::LOGSUMEXP_VECTOR, std::vector<uint>({n14})); uint q1 = g.query(n15); uint n16 = g.add_constant_neg_real(-3.0); uint n17 = g.add_operator( graph::OperatorType::TO_MATRIX, std::vector<uint>({n6, n6, n16, n16, n16, n16})); uint n18 = g.add_operator( graph::OperatorType::COLUMN_INDEX, std::vector<uint>({n17, n8})); uint n19 = g.add_operator( graph::OperatorType::TO_NEG_REAL_MATRIX, std::vector<uint>({n18})); uint n20 = g.add_operator( graph::OperatorType::LOGSUMEXP_VECTOR, std::vector<uint>({n19})); uint q2 = g.query(n20); """ self.assertEqual(expected.strip(), observed.strip()) observed = to_bmg_graph(bmg).graph.to_dot() expected = """ digraph "graph" { N0[label="3"]; N1[label="Beta"]; N2[label="~"]; N3[label="~"]; N4[label="~"]; N5[label="~"]; N6[label="2"]; N7[label="ToMatrix"]; N8[label="0"]; N9[label="ColumnIndex"]; N10[label="ToReal"]; N11[label="LogSumExp"]; N12[label="1"]; N13[label="ColumnIndex"]; N14[label="ToPosReal"]; N15[label="LogSumExp"]; N16[label="-3"]; N17[label="ToMatrix"]; N18[label="ColumnIndex"]; N19[label="ToNegReal"]; N20[label="LogSumExp"]; N0 -> N1; N0 -> N1; N1 -> N2; N1 -> N3; N1 -> N4; N1 -> N5; N2 -> N7; N3 -> N7; N4 -> N7; N5 -> N7; N6 -> N7; N6 -> N7; N6 -> N17; N6 -> N17; N7 -> N9; N7 -> N13; N8 -> N9; N8 -> N18; N9 -> N10; N10 -> N11; N12 -> N13; N13 -> N14; N14 -> N15; N16 -> N17; N16 -> N17; N16 -> N17; N16 -> N17; N17 -> N18; N18 -> N19; N19 -> N20; Q0[label="Query"]; N11 -> Q0; Q1[label="Query"]; N15 -> Q1; Q2[label="Query"]; N20 -> Q2; } """ self.assertEqual(expected.strip(), observed.strip()) def test_to_matrix_1by2by3(self) -> None: self.maxDiff = None bmg = BMGRuntime().accumulate_graph([f1by2by3()], {}) # TODO: Error message could be more specific here than "a tensor". # We could say what is wrong: its size. expected = """ The model uses a tensor operation unsupported by Bean Machine Graph. The unsupported node was created in function call f1by2by3().""" with self.assertRaises(ValueError) as ex: to_dot( bmg, node_types=True, edge_requirements=True, after_transform=True, label_edges=True, ) self.assertEqual(expected.strip(), str(ex.exception).strip())
beanmachine-main
tests/ppl/compiler/to_matrix_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """End-to-end test for tutorial on GMM with Poisson number of components""" # This file is a manual replica of the Bento tutorial with the same name # TODO: The disabled test generates the following error: # E TypeError: Distribution 'Poisson' is not supported by Bean Machine Graph. # This will need to be fixed for OSS readiness task import logging import unittest # Comments after imports suggest alternative comment style (for original tutorial) import beanmachine.ppl as bm import torch # from torch import manual_seed, tensor import torch.distributions as dist # from torch.distributions import Bernoulli, Normal, Uniform from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor # This makes the results deterministic and reproducible. logging.getLogger("beanmachine").setLevel(50) torch.manual_seed(42) # Model class GaussianMixtureModel(object): @bm.random_variable def mu(self, c): return dist.Normal(0.0, 10.0) @bm.random_variable def sigma(self, c): return dist.Gamma(1, 1) @bm.random_variable def component(self, i): return dist.Bernoulli(probs=0.5) @bm.random_variable def y(self, i): c = self.component(i) return dist.Normal(self.mu(c), self.sigma(c)) # Creating sample data n = 12 # num observations k = 2 # true number of clusters gmm = GaussianMixtureModel() ground_truth = { **{gmm.mu(i): tensor(i % 2).float() for i in range(k)}, **{gmm.sigma(i): tensor(0.1) for i in range(k)}, **{gmm.component(i): tensor(i % k).float() for i in range(n)}, } # [Visualization code in tutorial skipped] # Inference parameters num_samples = ( 1 ###00 Sample size should not affect (the ability to find) compilation issues. ) queries = ( [gmm.component(j) for j in range(n)] + [gmm.mu(i) for i in range(k)] + [gmm.sigma(i) for i in range(k)] ) observations = { gmm.y(i): ground_truth[gmm.mu(ground_truth[gmm.component(i)].item())] for i in range(n) } class tutorialGMM1Dimension2Components(unittest.TestCase): def test_tutorial_GMM_1_dimension_2_components(self) -> None: """Check BM and BMG inference both terminate""" self.maxDiff = None # Inference with BM torch.manual_seed( 42 ) # Note: Second time we seed. Could be a good tutorial style mh = bm.CompositionalInference() mh.infer( queries, observations, num_samples=num_samples, num_chains=1, ) bmg = BMGInference() bmg.infer( queries=queries, observations=observations, num_samples=num_samples, num_chains=1, ) self.assertTrue(True, msg="We just want to check this point is reached") def test_tutorial_GMM_1_dimension_2_components_to_dot_cpp_python( self, ) -> None: self.maxDiff = None observed = BMGInference().to_dot(queries, observations) expected = """digraph "graph" { N00[label=0.5]; N01[label=Bernoulli]; N02[label=Sample]; N03[label=0.0]; N04[label=10.0]; N05[label=Normal]; N06[label=Sample]; N07[label=Sample]; N08[label=1.0]; N09[label=Gamma]; N10[label=Sample]; N11[label=Sample]; N12[label=if]; N13[label=if]; N14[label=Normal]; N15[label=Sample]; N16[label="Observation 0.0"]; N17[label=Sample]; N18[label=if]; N19[label=if]; N20[label=Normal]; N21[label=Sample]; N22[label="Observation 1.0"]; N23[label=Sample]; N24[label=if]; N25[label=if]; N26[label=Normal]; N27[label=Sample]; N28[label="Observation 0.0"]; N29[label=Sample]; N30[label=if]; N31[label=if]; N32[label=Normal]; N33[label=Sample]; N34[label="Observation 1.0"]; N35[label=Sample]; N36[label=if]; N37[label=if]; N38[label=Normal]; N39[label=Sample]; N40[label="Observation 0.0"]; N41[label=Sample]; N42[label=if]; N43[label=if]; N44[label=Normal]; N45[label=Sample]; N46[label="Observation 1.0"]; N47[label=Sample]; N48[label=if]; N49[label=if]; N50[label=Normal]; N51[label=Sample]; N52[label="Observation 0.0"]; N53[label=Sample]; N54[label=if]; N55[label=if]; N56[label=Normal]; N57[label=Sample]; N58[label="Observation 1.0"]; N59[label=Sample]; N60[label=if]; N61[label=if]; N62[label=Normal]; N63[label=Sample]; N64[label="Observation 0.0"]; N65[label=Sample]; N66[label=if]; N67[label=if]; N68[label=Normal]; N69[label=Sample]; N70[label="Observation 1.0"]; N71[label=Sample]; N72[label=if]; N73[label=if]; N74[label=Normal]; N75[label=Sample]; N76[label="Observation 0.0"]; N77[label=Sample]; N78[label=if]; N79[label=if]; N80[label=Normal]; N81[label=Sample]; N82[label="Observation 1.0"]; N83[label=Query]; N84[label=Query]; N85[label=Query]; N86[label=Query]; N87[label=Query]; N88[label=Query]; N89[label=Query]; N90[label=Query]; N91[label=Query]; N92[label=Query]; N93[label=Query]; N94[label=Query]; N95[label=Query]; N96[label=Query]; N97[label=Query]; N98[label=Query]; N00 -> N01; N01 -> N02; N01 -> N17; N01 -> N23; N01 -> N29; N01 -> N35; N01 -> N41; N01 -> N47; N01 -> N53; N01 -> N59; N01 -> N65; N01 -> N71; N01 -> N77; N02 -> N12; N02 -> N13; N02 -> N83; N03 -> N05; N04 -> N05; N05 -> N06; N05 -> N07; N06 -> N12; N06 -> N18; N06 -> N24; N06 -> N30; N06 -> N36; N06 -> N42; N06 -> N48; N06 -> N54; N06 -> N60; N06 -> N66; N06 -> N72; N06 -> N78; N06 -> N95; N07 -> N12; N07 -> N18; N07 -> N24; N07 -> N30; N07 -> N36; N07 -> N42; N07 -> N48; N07 -> N54; N07 -> N60; N07 -> N66; N07 -> N72; N07 -> N78; N07 -> N96; N08 -> N09; N08 -> N09; N09 -> N10; N09 -> N11; N10 -> N13; N10 -> N19; N10 -> N25; N10 -> N31; N10 -> N37; N10 -> N43; N10 -> N49; N10 -> N55; N10 -> N61; N10 -> N67; N10 -> N73; N10 -> N79; N10 -> N97; N11 -> N13; N11 -> N19; N11 -> N25; N11 -> N31; N11 -> N37; N11 -> N43; N11 -> N49; N11 -> N55; N11 -> N61; N11 -> N67; N11 -> N73; N11 -> N79; N11 -> N98; N12 -> N14; N13 -> N14; N14 -> N15; N15 -> N16; N17 -> N18; N17 -> N19; N17 -> N84; N18 -> N20; N19 -> N20; N20 -> N21; N21 -> N22; N23 -> N24; N23 -> N25; N23 -> N85; N24 -> N26; N25 -> N26; N26 -> N27; N27 -> N28; N29 -> N30; N29 -> N31; N29 -> N86; N30 -> N32; N31 -> N32; N32 -> N33; N33 -> N34; N35 -> N36; N35 -> N37; N35 -> N87; N36 -> N38; N37 -> N38; N38 -> N39; N39 -> N40; N41 -> N42; N41 -> N43; N41 -> N88; N42 -> N44; N43 -> N44; N44 -> N45; N45 -> N46; N47 -> N48; N47 -> N49; N47 -> N89; N48 -> N50; N49 -> N50; N50 -> N51; N51 -> N52; N53 -> N54; N53 -> N55; N53 -> N90; N54 -> N56; N55 -> N56; N56 -> N57; N57 -> N58; N59 -> N60; N59 -> N61; N59 -> N91; N60 -> N62; N61 -> N62; N62 -> N63; N63 -> N64; N65 -> N66; N65 -> N67; N65 -> N92; N66 -> N68; N67 -> N68; N68 -> N69; N69 -> N70; N71 -> N72; N71 -> N73; N71 -> N93; N72 -> N74; N73 -> N74; N74 -> N75; N75 -> N76; N77 -> N78; N77 -> N79; N77 -> N94; N78 -> N80; N79 -> N80; N80 -> N81; N81 -> N82; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_cpp(queries, observations) expected = """graph::Graph g; uint n0 = g.add_constant_probability(0.5); uint n1 = g.add_distribution( graph::DistributionType::BERNOULLI, graph::AtomicType::BOOLEAN, std::vector<uint>({n0})); uint n2 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n3 = g.add_constant_real(0.0); uint n4 = g.add_constant_pos_real(10.0); uint n5 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n3, n4})); uint n6 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n5})); uint n7 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n5})); uint n8 = g.add_constant_pos_real(1.0); uint n9 = g.add_distribution( graph::DistributionType::GAMMA, graph::AtomicType::POS_REAL, std::vector<uint>({n8, n8})); uint n10 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n9})); uint n11 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n9})); uint n12 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n2, n7, n6})); uint n13 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n2, n11, n10})); uint n14 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n12, n13})); uint n15 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n14})); g.observe(n15, 0.0); uint n16 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n17 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n16, n7, n6})); uint n18 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n16, n11, n10})); uint n19 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n17, n18})); uint n20 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n19})); g.observe(n20, 1.0); uint n21 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n22 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n21, n7, n6})); uint n23 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n21, n11, n10})); uint n24 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n22, n23})); uint n25 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n24})); g.observe(n25, 0.0); uint n26 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n27 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n26, n7, n6})); uint n28 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n26, n11, n10})); uint n29 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n27, n28})); uint n30 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n29})); g.observe(n30, 1.0); uint n31 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n32 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n31, n7, n6})); uint n33 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n31, n11, n10})); uint n34 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n32, n33})); uint n35 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n34})); g.observe(n35, 0.0); uint n36 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n37 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n36, n7, n6})); uint n38 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n36, n11, n10})); uint n39 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n37, n38})); uint n40 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n39})); g.observe(n40, 1.0); uint n41 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n42 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n41, n7, n6})); uint n43 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n41, n11, n10})); uint n44 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n42, n43})); uint n45 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n44})); g.observe(n45, 0.0); uint n46 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n47 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n46, n7, n6})); uint n48 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n46, n11, n10})); uint n49 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n47, n48})); uint n50 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n49})); g.observe(n50, 1.0); uint n51 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n52 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n51, n7, n6})); uint n53 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n51, n11, n10})); uint n54 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n52, n53})); uint n55 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n54})); g.observe(n55, 0.0); uint n56 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n57 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n56, n7, n6})); uint n58 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n56, n11, n10})); uint n59 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n57, n58})); uint n60 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n59})); g.observe(n60, 1.0); uint n61 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n62 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n61, n7, n6})); uint n63 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n61, n11, n10})); uint n64 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n62, n63})); uint n65 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n64})); g.observe(n65, 0.0); uint n66 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n67 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n66, n7, n6})); uint n68 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n66, n11, n10})); uint n69 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n67, n68})); uint n70 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n69})); g.observe(n70, 1.0); uint q0 = g.query(n2); uint q1 = g.query(n16); uint q2 = g.query(n21); uint q3 = g.query(n26); uint q4 = g.query(n31); uint q5 = g.query(n36); uint q6 = g.query(n41); uint q7 = g.query(n46); uint q8 = g.query(n51); uint q9 = g.query(n56); uint q10 = g.query(n61); uint q11 = g.query(n66); uint q12 = g.query(n6); uint q13 = g.query(n7); uint q14 = g.query(n10); uint q15 = g.query(n11); """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_python(queries, observations) expected = """ from beanmachine import graph from torch import tensor g = graph.Graph() n0 = g.add_constant_probability(0.5) n1 = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [n0], ) n2 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) n3 = g.add_constant_real(0.0) n4 = g.add_constant_pos_real(10.0) n5 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n3, n4], ) n6 = g.add_operator(graph.OperatorType.SAMPLE, [n5]) n7 = g.add_operator(graph.OperatorType.SAMPLE, [n5]) n8 = g.add_constant_pos_real(1.0) n9 = g.add_distribution( graph.DistributionType.GAMMA, graph.AtomicType.POS_REAL, [n8, n8], ) n10 = g.add_operator(graph.OperatorType.SAMPLE, [n9]) n11 = g.add_operator(graph.OperatorType.SAMPLE, [n9]) n12 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n2, n7, n6], ) n13 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n2, n11, n10], ) n14 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n12, n13], ) n15 = g.add_operator(graph.OperatorType.SAMPLE, [n14]) g.observe(n15, 0.0) n16 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) n17 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n16, n7, n6], ) n18 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n16, n11, n10], ) n19 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n17, n18], ) n20 = g.add_operator(graph.OperatorType.SAMPLE, [n19]) g.observe(n20, 1.0) n21 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) n22 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n21, n7, n6], ) n23 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n21, n11, n10], ) n24 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n22, n23], ) n25 = g.add_operator(graph.OperatorType.SAMPLE, [n24]) g.observe(n25, 0.0) n26 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) n27 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n26, n7, n6], ) n28 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n26, n11, n10], ) n29 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n27, n28], ) n30 = g.add_operator(graph.OperatorType.SAMPLE, [n29]) g.observe(n30, 1.0) n31 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) n32 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n31, n7, n6], ) n33 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n31, n11, n10], ) n34 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n32, n33], ) n35 = g.add_operator(graph.OperatorType.SAMPLE, [n34]) g.observe(n35, 0.0) n36 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) n37 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n36, n7, n6], ) n38 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n36, n11, n10], ) n39 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n37, n38], ) n40 = g.add_operator(graph.OperatorType.SAMPLE, [n39]) g.observe(n40, 1.0) n41 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) n42 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n41, n7, n6], ) n43 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n41, n11, n10], ) n44 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n42, n43], ) n45 = g.add_operator(graph.OperatorType.SAMPLE, [n44]) g.observe(n45, 0.0) n46 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) n47 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n46, n7, n6], ) n48 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n46, n11, n10], ) n49 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n47, n48], ) n50 = g.add_operator(graph.OperatorType.SAMPLE, [n49]) g.observe(n50, 1.0) n51 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) n52 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n51, n7, n6], ) n53 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n51, n11, n10], ) n54 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n52, n53], ) n55 = g.add_operator(graph.OperatorType.SAMPLE, [n54]) g.observe(n55, 0.0) n56 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) n57 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n56, n7, n6], ) n58 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n56, n11, n10], ) n59 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n57, n58], ) n60 = g.add_operator(graph.OperatorType.SAMPLE, [n59]) g.observe(n60, 1.0) n61 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) n62 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n61, n7, n6], ) n63 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n61, n11, n10], ) n64 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n62, n63], ) n65 = g.add_operator(graph.OperatorType.SAMPLE, [n64]) g.observe(n65, 0.0) n66 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) n67 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n66, n7, n6], ) n68 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [n66, n11, n10], ) n69 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n67, n68], ) n70 = g.add_operator(graph.OperatorType.SAMPLE, [n69]) g.observe(n70, 1.0) q0 = g.query(n2) q1 = g.query(n16) q2 = g.query(n21) q3 = g.query(n26) q4 = g.query(n31) q5 = g.query(n36) q6 = g.query(n41) q7 = g.query(n46) q8 = g.query(n51) q9 = g.query(n56) q10 = g.query(n61) q11 = g.query(n66) q12 = g.query(n6) q13 = g.query(n7) q14 = g.query(n10) q15 = g.query(n11) """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/gmm_1d_2comp_test.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm import graphviz from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import exp from torch.distributions import Normal @bm.random_variable def X(): return Normal(0.0, 3.0) @bm.random_variable def Y(): return Normal(loc=0.0, scale=exp(X() * 0.5)) class NealsFunnelTest(unittest.TestCase): def test_neals_funnel(self) -> None: self.maxDiff = None observations = {} queries = [X(), Y()] observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N00[label=0.0]; N01[label=3.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Query]; N05[label=0.5]; N06[label="*"]; N07[label=Exp]; N08[label=Normal]; N09[label=Sample]; N10[label=Query]; N00 -> N02; N00 -> N08; N01 -> N02; N02 -> N03; N03 -> N04; N03 -> N06; N05 -> N06; N06 -> N07; N07 -> N08; N08 -> N09; N09 -> N10; } """ self.assertEqual(expected.strip(), observed.strip()) def test_to_graphviz_type(self) -> None: self.maxDiff = None observations = {} queries = [X(), Y()] observed = type(BMGInference().to_graphviz(queries, observations)) expected = graphviz.Source self.assertEqual(expected, observed)
beanmachine-main
tests/ppl/compiler/neals_funnel_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Comparison operators are not supported in BMG yet but we need to be able to detect use of them and give an error. Here we verify that we can rewrite code containing them correctly.""" import unittest import astor import beanmachine.ppl as bm from beanmachine.ppl.compiler.bm_to_bmg import _bm_function_to_bmg_ast from torch.distributions import Normal, StudentT @bm.random_variable def x(): return Normal(0.0, 1.0) @bm.random_variable def y(): z = 0.0 < x() < 2.0 return StudentT(3.0, z, 4.0) class ComparisonRewritingTest(unittest.TestCase): def test_comparison_rewriting_1(self) -> None: self.maxDiff = None # The key thing to note here is that we eliminate Python's weird # comparison logic entirely; we reduce # # z = 0.0 < x() < 2.0 # # to the equivalent of: # # tx = x() # comp = 0.0 < tx # if comp: # z = tx < 2.0 # else: # z = comp # # which has the same semantics but has only simple comparisons and # simple control flows. self.assertTrue(y.is_random_variable) bmgast = _bm_function_to_bmg_ast(y().function, "y_helper") observed = astor.to_source(bmgast) expected = """ def y_helper(bmg): import operator def y(): a1 = 0.0 r6 = [] r10 = {} a4 = bmg.handle_function(x, r6, r10) a7 = bmg.handle_function(operator.lt, [a1, a4]) bmg.handle_if(a7) if a7: a8 = 2.0 z = bmg.handle_function(operator.lt, [a4, a8]) else: z = a7 a16 = 3.0 a13 = [a16] a17 = [z] a12 = bmg.handle_function(operator.add, [a13, a17]) a18 = 4.0 a14 = [a18] r11 = bmg.handle_function(operator.add, [a12, a14]) r15 = {} r2 = bmg.handle_function(StudentT, r11, r15) return r2 a3 = bmg.handle_dot_get(bm, 'random_variable') r5 = [y] r9 = {} y = bmg.handle_function(a3, r5, r9) return y """ self.assertEqual(observed.strip(), expected.strip())
beanmachine-main
tests/ppl/compiler/comparison_rewriting_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest # The item() member function should be treated as an identity by the compiler # for the purposes of graph generation. import beanmachine.ppl as bm from beanmachine.ppl.inference.bmg_inference import BMGInference from torch.distributions import Bernoulli, Beta @bm.random_variable def beta(): return Beta(2.0, 2.0) @bm.random_variable def flip(): return Bernoulli(beta().item()) class ItemTest(unittest.TestCase): def test_item_member_function(self) -> None: self.maxDiff = None observed = BMGInference().to_dot([flip()], {}, after_transform=False) expected = """ digraph "graph" { N0[label=2.0]; N1[label=Beta]; N2[label=Sample]; N3[label=Item]; N4[label=Bernoulli]; N5[label=Sample]; N6[label=Query]; N0 -> N1; N0 -> N1; N1 -> N2; N2 -> N3; N3 -> N4; N4 -> N5; N5 -> N6; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([flip()], {}, after_transform=True) expected = """ digraph "graph" { N0[label=2.0]; N1[label=Beta]; N2[label=Sample]; N3[label=Bernoulli]; N4[label=Sample]; N5[label=Query]; N0 -> N1; N0 -> N1; N1 -> N2; N2 -> N3; N3 -> N4; N4 -> N5; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/item_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from beanmachine.ppl.compiler.profiler import ProfilerData class ProfilerTest(unittest.TestCase): def test_profiler(self) -> None: self.maxDiff = None pd = ProfilerData() pd.begin("A", 1000000000) pd.begin("B", 1100000000) pd.begin("C", 1200000000) pd.finish("C", 1300000000) pd.begin("C", 1400000000) pd.finish("C", 1500000000) pd.finish("B", 1600000000) pd.finish("A", 1700000000) pd.begin("D", 1800000000) pd.finish("D", 1900000000) expected = """ begin A 1000000000 begin B 1100000000 begin C 1200000000 finish C 1300000000 begin C 1400000000 finish C 1500000000 finish B 1600000000 finish A 1700000000 begin D 1800000000 finish D 1900000000""" self.assertEqual(expected.strip(), str(pd).strip()) # B accounts for 500 ms of A; the two Cs account for 200 ms of B; # the rest is unattributed report = pd.to_report() expected = """ A:(1) 700 ms B:(1) 500 ms C:(2) 200 ms unattributed: 300 ms unattributed: 200 ms D:(1) 100 ms Total time: 800 ms """ self.assertEqual(expected.strip(), str(report).strip()) self.assertEqual(700000000, report.A.total_time) self.assertEqual(500000000, report.A.B.total_time) self.assertEqual(200000000, report.A.B.C.total_time)
beanmachine-main
tests/ppl/compiler/profiler_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference.bmg_inference import BMGInference from torch.distributions import Bernoulli # The dependency graph here is x -> y -> z -> x @bm.random_variable def bad_cycle_1_x(): return Bernoulli(bad_cycle_1_y()) @bm.random_variable def bad_cycle_1_y(): return Bernoulli(bad_cycle_1_z()) @bm.random_variable def bad_cycle_1_z(): return Bernoulli(bad_cycle_1_x()) # The dependency graph here is z -> x(2) -> y(0) -> x(1) -> y(0) @bm.random_variable def bad_cycle_2_x(n): return Bernoulli(bad_cycle_2_y(0)) @bm.random_variable def bad_cycle_2_y(n): return Bernoulli(bad_cycle_2_x(n + 1)) @bm.random_variable def bad_cycle_2_z(): return Bernoulli(bad_cycle_2_x(2)) class CycleDetectorTest(unittest.TestCase): def test_bad_cyclic_model_1(self) -> None: with self.assertRaises(RecursionError): BMGInference().infer([bad_cycle_1_x()], {}, 1) def test_bad_cyclic_model_2(self) -> None: with self.assertRaises(RecursionError): BMGInference().infer([bad_cycle_2_z()], {}, 1)
beanmachine-main
tests/ppl/compiler/cycle_detector_test.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm import torch import torch.distributions as dist from beanmachine.ppl.inference import BMGInference # https://github.com/facebookresearch/beanmachine/issues/1312 @bm.random_variable def unif(): return dist.Uniform(0, 1) @bm.random_variable def beta(): return dist.Beta(unif() + 0.1, unif() + 0.1) @bm.random_variable def flip(): return dist.Bernoulli(1 - beta()) class BugRegressionTest(unittest.TestCase): def test_regress_1312(self) -> None: self.maxDiff = None # There were two problems exposed by this user-supplied repro. Both are # now fixed. # # The first was that a typo in the code which propagated type mutations # through the graph during the problem-fixing phase was causing some types # to not be updated correctly, which was then causing internal compiler # errors down the line. # # The second was that due to the order in which the problem fixers ran, # the 1-beta operation was generated as: # # ToProb(Add(1.0, ToReal(Negate(ToPosReal(Sample(Beta(...)))))) # # Which is not wrong but is quite inefficient. It is now generated as # you would expect: # # Complement(Sample(Beta(...))) queries = [unif()] observations = {flip(): torch.tensor(1)} observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N00[label=Flat]; N01[label=Sample]; N02[label=ToPosReal]; N03[label=0.1]; N04[label="+"]; N05[label=Beta]; N06[label=Sample]; N07[label=complement]; N08[label=Bernoulli]; N09[label=Sample]; N10[label="Observation True"]; N11[label=Query]; N00 -> N01; N01 -> N02; N01 -> N11; N02 -> N04; N03 -> N04; N04 -> N05; N04 -> N05; N05 -> N06; N06 -> N07; N07 -> N08; N08 -> N09; N09 -> N10; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/bug_regression_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # """End-to-end test for n-schools model based on the one in PPL Bench""" # See for example https://github.com/facebookresearch/pplbench/blob/main/pplbench/models/n_schools.py import logging import unittest from typing import Tuple import beanmachine.ppl as bm import numpy as np import torch.distributions as dist import xarray as xr from beanmachine import graph from beanmachine.ppl.inference.bmg_inference import BMGInference from scipy.stats import norm from torch import tensor LOGGER = logging.getLogger(__name__) # Planned additions: # TODO: It would be great to have another example based on Sepehr's bma model here: # https://www.internalfb.com/intern/diffusion/FBS/browsefile/master/fbcode/applications/fb/bma/bma_model.py # TODO: It would be great to also have a test case based on the PPL bench version of n-schools that follows: # Start n-schools model """ N Schools This is a generalization of a classical 8 schools model to n schools. The model posits that the effect of a school on a student's performance can be explained by the a baseline effect of all schools plus an additive effect of the state, the school district and the school type. Hyper Parameters: n - total number of schools num_states - number of states num_districts_per_state - number of school districts in each state num_types - number of school types scale_state - state effect scale scale_district - district effect scale scale_type - school type effect scale Model: beta_baseline = StudentT(dof_baseline, 0.0, scale_baseline) sigma_state ~ HalfCauchy(0, scale_state) sigma_district ~ HalfCauchy(0, scale_district) sigma_type ~ HalfCauchy(0, scale_type) for s in 0 .. num_states - 1 beta_state[s] ~ Normal(0, sigma_state) for d in 0 .. num_districts_per_state - 1 beta_district[s, d] ~ Normal(0, sigma_district) for t in 0 .. num_types - 1 beta_type[t] ~ Normal(0, sigma_type) for i in 0 ... n - 1 Assume we are given state[i], district[i], type[i] Y_hat[i] = beta_baseline + beta_state[state[i]] + beta_district[state[i], district[i]] + beta_type[type[i]] sigma[i] ~ Uniform(0.5, 1.5) Y[i] ~ Normal(Y_hat[i], sigma[i]) The dataset consists of the following Y[school] - float sigma[school] - float and it includes the attributes n - number of schools num_states num_districts_per_state num_types dof_baseline scale_baseline scale_state scale_district scale_type state_idx[school] - 0 .. num_states - 1 district_idx[school] - 0 .. num_districts_per_state - 1 type_idx[school] - 0 .. num_types - 1 The posterior samples include the following, sigma_state[draw] - float sigma_district[draw] - float sigma_type[draw] - float beta_baseline[draw] - float beta_state[draw, state] - float beta_district[draw, state, district] - float beta_type[draw, type] - float """ def generate_data( # type: ignore seed: int, n: int = 2000, num_states: int = 8, num_districts_per_state: int = 5, num_types: int = 5, dof_baseline: float = 3.0, scale_baseline: float = 10.0, scale_state: float = 1.0, scale_district: float = 1.0, scale_type: float = 1.0, ) -> Tuple[xr.Dataset, xr.Dataset]: """ See the class documentation for an explanation of the parameters. :param seed: random number generator seed """ if n % 2 != 0: LOGGER.warn(f"n should be a multiple of 2. Actual values = {n}") # In this model we will generate exactly equal amounts of training # and test data with the same number of training and test schools # in each state, district, and type combination n = n // 2 rng = np.random.default_rng(seed) beta_baseline = rng.standard_t(dof_baseline) * scale_baseline sigma_state = np.abs(rng.standard_cauchy()) * scale_state sigma_district = np.abs(rng.standard_cauchy()) * scale_district sigma_type = np.abs(rng.standard_cauchy()) * scale_type beta_state = rng.normal(loc=0, scale=sigma_state, size=num_states) beta_district = rng.normal( loc=0, scale=sigma_district, size=(num_states, num_districts_per_state) ) beta_type = rng.normal(loc=0, scale=sigma_type, size=num_types) # we will randomly assign the schools to states, district, and types state_idx = rng.integers(low=0, high=num_states, size=n) district_idx = rng.integers(low=0, high=num_districts_per_state, size=n) type_idx = rng.integers(low=0, high=num_types, size=n) y_hat = ( beta_baseline + beta_state[state_idx] + beta_district[state_idx, district_idx] + beta_type[type_idx] ) train_sigma = rng.uniform(0.5, 1.5, size=n) train_y = rng.normal(loc=y_hat, scale=train_sigma) test_sigma = rng.uniform(0.5, 1.5, size=n) test_y = rng.normal(loc=y_hat, scale=test_sigma) return tuple( # type: ignore xr.Dataset( {"Y": (["school"], y), "sigma": (["school"], sigma)}, coords={"school": np.arange(n)}, attrs={ "n": n, "num_states": num_states, "num_districts_per_state": num_districts_per_state, "num_types": num_types, "dof_baseline": dof_baseline, "scale_baseline": scale_baseline, "scale_state": scale_state, "scale_district": scale_district, "scale_type": scale_type, "state_idx": state_idx, "district_idx": district_idx, "type_idx": type_idx, }, ) for y, sigma in [(train_y, train_sigma), (test_y, test_sigma)] ) def evaluate_posterior_predictive(samples: xr.Dataset, test: xr.Dataset) -> np.ndarray: """ Computes the predictive likelihood of all the test items w.r.t. each sample. See the class documentation for the `samples` and `test` parameters. :returns: a numpy array of the same size as the sample dimension. """ # transpose the datasets to be in a convenient format samples = samples.transpose("draw", "state", "district", "type") y_hat = ( samples.beta_baseline.values[:, np.newaxis] + samples.beta_state.values[:, test.attrs["state_idx"]] + samples.beta_district.values[ :, test.attrs["state_idx"], test.attrs["district_idx"] ] + samples.beta_type.values[:, test.attrs["type_idx"]] ) # size = (iterations, n_test) loglike = norm.logpdf( test.Y.values[np.newaxis, :], loc=y_hat, scale=test.sigma.values[np.newaxis, :], ) # size = (iterations, n_test) return loglike.sum(axis=1) # size = (iterations,) ## OLD # DATA NUM_CLASS = 2 # num_classes (For Dirichlet may be need at least two) # TODO: Clarify number of lablers is implicit NUM_ITEMS = 1 # number of items PREV_PRIOR = tensor([1.0, 0.0]) # prior on prevalence # PREV_PRIOR is a list of length NUM_CLASSES CONF_MATRIX_PRIOR = tensor([1.0, 0.0]) # prior on confusion matrix # CONF_MATRIX_PRIOR is a list of length NUM_CLASS # TODO: Does Dirichlet support 2d matrices? # TODO: Is it really necessary to reject dirichlet on tensor([1])? IDX_RATINGS = [[0]] # indexed ratings that labelers assigned to items IDX_LABELERS = [[0]] # indexed list of labelers who labeled items EXPERT_CONF_MATRIX = tensor( [[0.99, 0.01], [0.01, 0.99]] ) # confusion matrix of an expert (if we have true ratings) # EXPERT_CONF_MATRIX is of size NUM_CLASS x NUM_CLASS # Row (first) index is true class, and column (second) index is observed IDX_TRUE_RATINGS = [0] # Of size NUM_ITEMS # Represents true class of items by a perfect labler # When information is missing, use value NUM_CLASS # MODEL @bm.random_variable def prevalence(): # Dirichlet distribution support is implemented in Beanstalk but not yet landed. return dist.Dirichlet(PREV_PRIOR) @bm.random_variable def confusion_matrix(labeler, true_class): return dist.Dirichlet(CONF_MATRIX_PRIOR) # size: NUM_CLASSES # log of the unnormalized item probs # log P(true label of item i = k | labels) # shape: [NUM_ITEMS, NUM_CLASSES] @bm.functional def log_item_prob(i, k): # Indexing into a simplex with a constant is implemented # but not yet landed prob = prevalence()[k].log() for r in range(len(IDX_RATINGS[i])): label = IDX_RATINGS[i][r] labeler = IDX_LABELERS[i][r] prob = prob + confusion_matrix(labeler, k)[label].log() if IDX_TRUE_RATINGS[i] != NUM_CLASS: # Value NUM_CLASS means missing value prob = prob + EXPERT_CONF_MATRIX[k, IDX_TRUE_RATINGS[i]].log() return prob # log of joint prob of labels, prev, conf_matrix @bm.random_variable def target(): joint_log_prob = 0 for i in range(NUM_ITEMS): # logsumexp on a newly-constructed tensor with stochastic # elements has limited support but this should work: log_probs = tensor( # TODO: Hard-coded k in {0,1} # [log_item_prob(i, 0), log_item_prob(i, 1), log_item_prob(i, 2)] # [log_item_prob(i, 0), log_item_prob(i, 1)] [log_item_prob(i, k) for k in range(NUM_CLASS)] ) joint_log_prob = joint_log_prob + log_probs.logsumexp(0) return dist.Bernoulli(joint_log_prob.exp()) observations = {target(): tensor(1.0)} queries = [ log_item_prob(0, 0), # Ideally, all the other elements too prevalence(), confusion_matrix(0, 0), # Ideally, all the other elements too ] ssrw = "SingleSiteRandomWalk" bmgi = "BMG inference" both = {ssrw, bmgi} # TODO: Replace 4th param of expecteds by more methodical calculation expecteds = [ (prevalence(), both, 0.5000, 0.001), (confusion_matrix(0, 0), both, 0.5000, 0.001), (log_item_prob(0, 0), {ssrw}, -1.3863, 0.5), (log_item_prob(0, 0), {bmgi}, -1.0391, 0.5), ] class NSchoolsTest(unittest.TestCase): def test_eight_schools_e2e(self): # see https://www.jstatsoft.org/article/view/v012i03/v12i03.pdf # For each school, the average treatment effect and the standard deviation DATA = [ (28.39, 14.9), (7.94, 10.2), (-2.75, 16.3), (6.82, 11.0), (-0.64, 9.4), (0.63, 11.4), (18.01, 10.4), (12.16, 17.6), ] # the expected mean and standard deviation of each random variable EXPECTED = [ (11.1, 9.1), (7.6, 6.6), (5.7, 8.4), (7.1, 7.0), (5.1, 6.8), (5.7, 7.3), (10.4, 7.3), (8.3, 8.4), (7.6, 5.9), # overall mean (6.7, 5.6), # overall std ] g = graph.Graph() zero = g.add_constant(0.0) thousand = g.add_constant_pos_real(1000.0) # overall_mean ~ Normal(0, 1000) overall_mean_dist = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [zero, thousand] ) overall_mean = g.add_operator(graph.OperatorType.SAMPLE, [overall_mean_dist]) # overall_std ~ HalfCauchy(1000) # [note: the original paper had overall_std ~ Uniform(0, 1000)] overall_std_dist = g.add_distribution( graph.DistributionType.HALF_CAUCHY, graph.AtomicType.POS_REAL, [thousand] ) overall_std = g.add_operator(graph.OperatorType.SAMPLE, [overall_std_dist]) # for each school we will add two random variables, # but first we need to define a distribution school_effect_dist = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [overall_mean, overall_std], ) for treatment_mean_value, treatment_std_value in DATA: # school_effect ~ Normal(overall_mean, overall_std) school_effect = g.add_operator( graph.OperatorType.SAMPLE, [school_effect_dist] ) g.query(school_effect) # treatment_mean ~ Normal(school_effect, treatment_std) treatment_std = g.add_constant_pos_real(treatment_std_value) treatment_mean_dist = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [school_effect, treatment_std], ) treatment_mean = g.add_operator( graph.OperatorType.SAMPLE, [treatment_mean_dist] ) g.observe(treatment_mean, treatment_mean_value) g.query(overall_mean) g.query(overall_std) observed = g.to_dot() expected = """ digraph "graph" { N0[label="0"]; N1[label="1000"]; N2[label="Normal"]; N3[label="~"]; N4[label="HalfCauchy"]; N5[label="~"]; N6[label="Normal"]; N7[label="~"]; N8[label="14.9"]; N9[label="Normal"]; N10[label="~"]; N11[label="~"]; N12[label="10.2"]; N13[label="Normal"]; N14[label="~"]; N15[label="~"]; N16[label="16.3"]; N17[label="Normal"]; N18[label="~"]; N19[label="~"]; N20[label="11"]; N21[label="Normal"]; N22[label="~"]; N23[label="~"]; N24[label="9.4"]; N25[label="Normal"]; N26[label="~"]; N27[label="~"]; N28[label="11.4"]; N29[label="Normal"]; N30[label="~"]; N31[label="~"]; N32[label="10.4"]; N33[label="Normal"]; N34[label="~"]; N35[label="~"]; N36[label="17.6"]; N37[label="Normal"]; N38[label="~"]; N0 -> N2; N1 -> N2; N1 -> N4; N2 -> N3; N3 -> N6; N4 -> N5; N5 -> N6; N6 -> N7; N6 -> N11; N6 -> N15; N6 -> N19; N6 -> N23; N6 -> N27; N6 -> N31; N6 -> N35; N7 -> N9; N8 -> N9; N9 -> N10; N11 -> N13; N12 -> N13; N13 -> N14; N15 -> N17; N16 -> N17; N17 -> N18; N19 -> N21; N20 -> N21; N21 -> N22; N23 -> N25; N24 -> N25; N25 -> N26; N27 -> N29; N28 -> N29; N29 -> N30; N31 -> N33; N32 -> N33; N33 -> N34; N35 -> N37; N36 -> N37; N37 -> N38; O0[label="Observation"]; N10 -> O0; O1[label="Observation"]; N14 -> O1; O2[label="Observation"]; N18 -> O2; O3[label="Observation"]; N22 -> O3; O4[label="Observation"]; N26 -> O4; O5[label="Observation"]; N30 -> O5; O6[label="Observation"]; N34 -> O6; O7[label="Observation"]; N38 -> O7; Q0[label="Query"]; N7 -> Q0; Q1[label="Query"]; N11 -> Q1; Q2[label="Query"]; N15 -> Q2; Q3[label="Query"]; N19 -> Q3; Q4[label="Query"]; N23 -> Q4; Q5[label="Query"]; N27 -> Q5; Q6[label="Query"]; N31 -> Q6; Q7[label="Query"]; N35 -> Q7; Q8[label="Query"]; N3 -> Q8; Q9[label="Query"]; N5 -> Q9; }""" self.assertTrue(expected, observed) means = g.infer_mean(3000, graph.InferenceType.NMC) for idx, (mean, std) in enumerate(EXPECTED): self.assertTrue( abs(means[idx] - mean) < std * 0.5, f"index {idx} expected {mean} +- {std*0.5} actual {means[idx]}", ) # TODO: The following tests should be turned into working tests focused on # n-schools (rather than the CLARA examples they are templated on.) def disabled_test_nschools_tensor_cgm_no_update_inference(self) -> None: """Check BM and BMG inference both terminate""" self.maxDiff = None num_samples = 10 # First, let's see how the model fairs with Random Walk inference inference = bm.SingleSiteRandomWalk() # or NUTS mcsamples = inference.infer(queries, observations, num_samples) for rand_var, inferences, value, delta in expecteds: if ssrw in inferences: samples = mcsamples[rand_var] observed = samples.mean() expected = tensor([value]) self.assertAlmostEqual(first=observed, second=expected, delta=delta) # Second, let's see how it fairs with the bmg inference inference = BMGInference() mcsamples = inference.infer(queries, observations, num_samples) for rand_var, inferences, value, delta in expecteds: if bmgi in inferences: samples = mcsamples[rand_var] observed = samples.mean() expected = tensor([value]) self.assertAlmostEqual(first=observed, second=expected, delta=delta) def disabled_test_nschools_tensor_cgm_no_update_to_dot_cpp_python(self) -> None: self.maxDiff = None observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N00[label="[1.0,0.0]"]; N01[label=Dirichlet]; N02[label=Sample]; N03[label=Sample]; N04[label=Sample]; N05[label=0]; N06[label=index]; N07[label=Log]; N08[label=index]; N09[label=Log]; N10[label="+"]; N11[label=-0.010050326585769653]; N12[label="+"]; N13[label=1]; N14[label=index]; N15[label=Log]; N16[label=index]; N17[label=Log]; N18[label="+"]; N19[label=-4.605170249938965]; N20[label="+"]; N21[label=LogSumExp]; N22[label=ToReal]; N23[label=Exp]; N24[label=ToProb]; N25[label=Bernoulli]; N26[label=Sample]; N27[label="Observation True"]; N28[label=Query]; N29[label=Query]; N30[label=Query]; N00 -> N01; N01 -> N02; N01 -> N03; N01 -> N04; N02 -> N06; N02 -> N14; N02 -> N29; N03 -> N08; N03 -> N30; N04 -> N16; N05 -> N06; N05 -> N08; N05 -> N16; N06 -> N07; N07 -> N10; N08 -> N09; N09 -> N10; N10 -> N12; N11 -> N12; N12 -> N21; N12 -> N28; N13 -> N14; N14 -> N15; N15 -> N18; N16 -> N17; N17 -> N18; N18 -> N20; N19 -> N20; N20 -> N21; N21 -> N22; N22 -> N23; N23 -> N24; N24 -> N25; N25 -> N26; N26 -> N27; } """ self.assertEqual(observed.strip(), expected.strip()) observed = BMGInference().to_cpp(queries, observations) expected = """ graph::Graph g; Eigen::MatrixXd m0(2, 1); m0 << 1.0, 0.0; uint n0 = g.add_constant_pos_matrix(m0); uint n1 = g.add_distribution( graph::DistributionType::DIRICHLET, graph::ValueType( graph::VariableType::COL_SIMPLEX_MATRIX, graph::AtomicType::PROBABILITY, 2, 1 ) std::vector<uint>({n0})); uint n2 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n3 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n4 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); uint n5 = g.add_constant(0); uint n6 = g.add_operator( graph::OperatorType::INDEX, std::vector<uint>({n2, n5})); uint n7 = g.add_operator( graph::OperatorType::LOG, std::vector<uint>({n6})); uint n8 = g.add_operator( graph::OperatorType::INDEX, std::vector<uint>({n3, n5})); uint n9 = g.add_operator( graph::OperatorType::LOG, std::vector<uint>({n8})); uint n10 = g.add_operator( graph::OperatorType::ADD, std::vector<uint>({n7, n9})); uint n11 = g.add_constant_neg_real(-0.010050326585769653); uint n12 = g.add_operator( graph::OperatorType::ADD, std::vector<uint>({n10, n11})); uint n13 = g.add_constant(1); uint n14 = g.add_operator( graph::OperatorType::INDEX, std::vector<uint>({n2, n13})); uint n15 = g.add_operator( graph::OperatorType::LOG, std::vector<uint>({n14})); uint n16 = g.add_operator( graph::OperatorType::INDEX, std::vector<uint>({n4, n5})); uint n17 = g.add_operator( graph::OperatorType::LOG, std::vector<uint>({n16})); uint n18 = g.add_operator( graph::OperatorType::ADD, std::vector<uint>({n15, n17})); uint n19 = g.add_constant_neg_real(-4.605170249938965); uint n20 = g.add_operator( graph::OperatorType::ADD, std::vector<uint>({n18, n19})); n21 = g.add_operator( graph::OperatorType::LOGSUMEXP, std::vector<uint>({n12, n20})); uint n22 = g.add_operator( graph::OperatorType::TO_REAL, std::vector<uint>({n21})); uint n23 = g.add_operator( graph::OperatorType::EXP, std::vector<uint>({n22})); uint n24 = g.add_operator( graph::OperatorType::TO_PROBABILITY, std::vector<uint>({n23})); uint n25 = g.add_distribution( graph::DistributionType::BERNOULLI, graph::AtomicType::BOOLEAN, std::vector<uint>({n24})); uint n26 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n25})); g.observe(n26, true); g.query(n12); g.query(n2); g.query(n3); """ self.assertEqual(observed.strip(), expected.strip()) observed = BMGInference().to_python(queries, observations) expected = """ from beanmachine import graph from torch import tensor g = graph.Graph() n0 = g.add_constant_pos_matrix(tensor([[1.0],[0.0]])) n1 = g.add_distribution( graph.DistributionType.DIRICHLET, graph.ValueType( graph.VariableType.COL_SIMPLEX_MATRIX, graph.AtomicType.PROBABILITY, 2, 1, ), [n0], ) n2 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) n3 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) n4 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) n5 = g.add_constant(0) n6 = g.add_operator(graph.OperatorType.INDEX, [n2, n5]) n7 = g.add_operator(graph.OperatorType.LOG, [n6]) n8 = g.add_operator(graph.OperatorType.INDEX, [n3, n5]) n9 = g.add_operator(graph.OperatorType.LOG, [n8]) n10 = g.add_operator(graph.OperatorType.ADD, [n7, n9]) n11 = g.add_constant_neg_real(-0.010050326585769653) n12 = g.add_operator(graph.OperatorType.ADD, [n10, n11]) n13 = g.add_constant(1) n14 = g.add_operator(graph.OperatorType.INDEX, [n2, n13]) n15 = g.add_operator(graph.OperatorType.LOG, [n14]) n16 = g.add_operator(graph.OperatorType.INDEX, [n4, n5]) n17 = g.add_operator(graph.OperatorType.LOG, [n16]) n18 = g.add_operator(graph.OperatorType.ADD, [n15, n17]) n19 = g.add_constant_neg_real(-4.605170249938965) n20 = g.add_operator(graph.OperatorType.ADD, [n18, n19]) n21 = g.add_operator( graph.OperatorType.LOGSUMEXP, [n12, n20]) n22 = g.add_operator(graph.OperatorType.TO_REAL, [n21]) n23 = g.add_operator(graph.OperatorType.EXP, [n22]) n24 = g.add_operator(graph.OperatorType.TO_PROBABILITY, [n23]) n25 = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [n24]) n26 = g.add_operator(graph.OperatorType.SAMPLE, [n25]) g.observe(n26, True) g.query(n12) g.query(n2) g.query(n3) """ self.assertEqual(observed.strip(), expected.strip())
beanmachine-main
tests/ppl/compiler/n-schools_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Tests for single_assignment.py""" import ast import unittest import astor from beanmachine.ppl.compiler.ast_patterns import ast_domain from beanmachine.ppl.compiler.rules import ( FirstMatch as first, TryMany as many, TryOnce as once, ) from beanmachine.ppl.compiler.single_assignment import SingleAssignment _some_top_down = ast_domain.some_top_down class SingleAssignmentTest(unittest.TestCase): s = SingleAssignment() default_rule = s._rule default_rules = s._rules def test_single_assignment_sanity_check(self) -> None: """If you manually change one of the two numbers in the test it should fail""" self.assertEqual(3, 3) def test_single_assignment_unique_id_preserves_prefix(self) -> None: """The method unique_id preserves name prefix""" s = SingleAssignment() root = "root" name = s._unique_id(root) self.assertEqual(root, name[0 : len(root)]) def check_rewrite( self, source, expected, rules=default_rules, msg=None, reset=True ): """Applying rules to source yields expected""" self.maxDiff = None if reset: self.s._count = 0 self.s._rules = rules m = ast.parse(source) result = self.s.single_assignment(m) self.assertEqual(astor.to_source(result).strip(), expected.strip(), msg=msg) def check_rewrites(self, sources, rule=default_rule, reset=True): """Applying rules to each element of sources yelds the next one""" self.assertIsInstance(sources, list, msg="\nSources should be list of strings.") self.assertGreater(len(sources), 0, msg="\nSources should be a non-empty list.") if len(sources) == 1: return self.check_rewrite( sources[0], sources[0], once(_some_top_down(rule)), msg="\nExpected the term to be a normal form for rule.", reset=reset, ) source, *rest = sources expected, *_ = rest self.check_rewrite( source, expected, _some_top_down(rule), msg="\nExpected rule to rewrite one term to the other", reset=reset, ) self.check_rewrites(rest, rule, reset=False) def test_check_rewrites(self) -> None: """The method check_rewrites performs several rewrites for it in one shot. This method illustrates these functions.""" # The tests use a running example consisting of three terms that are the first, # intermediate, and final terms in a sequence of rewrites by the rule # self.s._handle_boolop_binarize() # The three terms are simple as follows: source1 = """ def f(x): x = a and b and c and d """ source2 = """ def f(x): x = (a and b) and c and d """ source3 = """ def f(x): x = ((a and b) and c) and d """ # First, check that it raises errors on bad inputs with self.assertRaises( AssertionError, msg="The following line should raise an error!" ): self.check_rewrites(42, self.s._handle_boolop_binarize()) with self.assertRaises( AssertionError, msg="The following line should raise an error!" ): self.check_rewrites([], self.s._handle_boolop_binarize()) # Second, make sure it it does what is expected on normal forms self.check_rewrites([source3], self.s._handle_boolop_binarize()) with self.assertRaises( AssertionError, msg="The following line should raise an error!" ): self.check_rewrites([source1], self.s._handle_boolop_binarize()) # Third, normal forms are unchanged if we have one "many" too many self.check_rewrites([source3], many(self.s._handle_boolop_binarize())) with self.assertRaises( AssertionError, msg="The following line should raise an error!" ): self.check_rewrites([source1], many(self.s._handle_boolop_binarize())) # Fourth, it will recognize valid rewrites self.check_rewrites( [source1, source2, source3], self.s._handle_boolop_binarize() ) # In common use, it is expect that the intermediate expressions are # all gathered in a list (if we would like to test the sequence in) # multiple ways, or they may be inlined directly. To get a sense of # the way the automatic formatting renders such uses, we include both # here: sources = [ """ def f(x): x = a and b and c and d """, """ def f(x): x = (a and b) and c and d """, """ def f(x): x = ((a and b) and c) and d """, ] self.check_rewrites(sources, self.s._handle_boolop_binarize()) # and self.check_rewrites( [ """ def f(x): x = a and b and c and d """, """ def f(x): x = (a and b) and c and d """, """ def f(x): x = ((a and b) and c) and d """, ], self.s._handle_boolop_binarize(), ) # Both forms are a bit verbose, but the first is somewhat more passable # Fifth, the above call is essentially the following reduction but # with the intermediate term(s) spelled out: self.check_rewrite( source1, source3, many(_some_top_down(self.s._handle_boolop_binarize())) ) # Sixth, we can use the default rules to document full reduction sequences sources_continued = [ source3, """ def f(x): a1 = (a and b) and c x = a1 and d """, """ def f(x): a2 = a and b a1 = a2 and c if a1: x = d else: x = a1 """, """ def f(x): if a: a2 = b else: a2 = a if a2: a1 = c else: a1 = a2 if a1: x = d else: x = a1 """, ] self.check_rewrites(sources_continued) # TODO: Remarks based on the sequence above: # At some point we may decide to use top_down rather than some_top_down def check_rewrite_as_ast(self, source, expected, rules=default_rules): """Applying rules to source yields expected -- checked as ASTs""" self.maxDiff = None self.s._count = 0 m = ast.parse(source) result = self.s.single_assignment(m) self.assertEqual(ast.dump(result), ast.dump(ast.parse(expected))) def test_single_assignment_pre_unassigned_expressions(self) -> None: """Tests for state before adding rule to handle unassigned expressions""" source = """ def f(x): g(x)+x """ expected = """ def f(x): g(x) + x """ self.check_rewrite( source, expected, many( # Custom wire rewrites to rewrites existing before this diff _some_top_down( first( [ self.s._handle_return(), self.s._handle_for(), self.s._handle_assign(), ] ) ) ), ) def test_single_assignment_unassigned_expressions(self) -> None: """Test unassiged expressions rewrite""" # Check that the unassigned expressions rule (unExp) works alone source = """ def f(x): g(x)+x """ expected = """ def f(x): u1 = g(x) + x """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_unassigned()) ) # Check that the unassigned expressions rule (unExp) works in context source = """ def f(x): g(x)+x """ expected = """ def f(x): r3 = [x] r4 = {} a2 = g(*r3, **r4) u1 = a2 + x """ self.check_rewrite( source, expected, many( _some_top_down( first( [ self.s._handle_unassigned(), self.s._handle_return(), self.s._handle_for(), self.s._handle_assign(), ] ) ) ), ) def test_single_assignment_if(self) -> None: """Test if rewrite""" # Check that rule will leave uninteresting expressions alone source = """ def f(x): if x: c=a+b+c else: b=c+a+b """ expected = """ def f(x): if x: c = a + b + c else: b = c + a + b """ self.check_rewrite( source, expected, many(_some_top_down(first([self.s._handle_if()]))) ) # Check that the if rule works (alone) on an elementary expression source = """ def f(x): if x+x>x: c=a+b+c else: b=c+a+b """ expected = """ def f(x): r1 = x + x > x if r1: c = a + b + c else: b = c + a + b """ self.check_rewrite(source, expected, _some_top_down(self.s._handle_if())) # Check that the if rule works (alone) with elif clauses source = """ def f(x): if x+x>x: c=a+b+c elif y+y>y: a=c+b+a else: b=c+a+b """ expected = """ def f(x): r1 = x + x > x if r1: c = a + b + c else: r2 = y + y > y if r2: a = c + b + a else: b = c + a + b """ self.check_rewrite(source, expected, many(_some_top_down(self.s._handle_if()))) # Check that the if rule works (with others) on an elementary expression source = """ def f(x): if gt(x+x,x): c=a+b+c else: b=c+a+b """ expected = """ def f(x): a6 = x + x a5 = [a6] a7 = [x] r4 = a5 + a7 r8 = {} r1 = gt(*r4, **r8) if r1: a2 = a + b c = a2 + c else: a3 = c + a b = a3 + b """ self.check_rewrite( source, expected, many( _some_top_down( first( [ self.s._handle_if(), self.s._handle_unassigned(), self.s._handle_return(), self.s._handle_for(), self.s._handle_assign(), ] ) ) ), ) def test_single_assignment_while(self) -> None: """Test while rewrite""" # Check that while_not_True rule works (alone) on simple cases source = """ def f(x): while c: x=x+1 """ expected = """ def f(x): while True: w1 = c if w1: x = x + 1 else: break """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_while_not_True()) ) # Check that the while_not_True rewrite reaches normal form self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_while_not_True())) ) # Check that while_not_True_else rule works (alone) on simple cases source = """ def f(x): while c: x=x+1 else: x=x-1 """ expected = """ def f(x): while True: w1 = c if w1: x = x + 1 else: break if not w1: x = x - 1 """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_while_not_True_else()) ) # Check that the while_not_True_else rewrite reaches normal form self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_while_not_True_else())) ) # Check that while_True_else rule works (alone) on simple cases source = """ def f(x): while True: x=x+1 else: x=x-1 """ expected = """ def f(x): while True: x = x + 1 """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_while_True_else()) ) # Check that while_True_else rule, alone, on simple cases, reaches a normal form self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_while_True_else())) ) # Check that (combined) while rule works (alone) on simple cases source = """ def f(x): while c: x=x+1 while d: y=y+1 else: y=y-1 while True: z=z+1 else: z=z-1 """ expected = """ def f(x): while True: w1 = c if w1: x = x + 1 else: break while True: w2 = d if w2: y = y + 1 else: break if not w2: y = y - 1 while True: z = z + 1 """ self.check_rewrite(source, expected, _some_top_down(self.s._handle_while())) # Extra check: Make sure they are idempotent self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_while())) ) # Check that the while rewrite works with everything else self.maxDiff = None source = """ def f(x): while c: x=(x+1)-s else: x=(x-1)+s while True: y=(y+1)-s else: y=(y-1)+s """ expected = """ def f(x): while True: w1 = c if w1: a5 = 1 a2 = x + a5 x = a2 - s else: break r3 = not w1 if r3: a8 = 1 a6 = x - a8 x = a6 + s while True: a7 = 1 a4 = y + a7 y = a4 - s """ self.check_rewrite(source, expected) def test_single_assignment_boolop_binarize(self) -> None: """Test the rule for converting boolean operators into binary operators""" source = """ def f(x): x = a and b and c and d """ expected = """ def f(x): x = ((a and b) and c) and d """ self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_boolop_binarize())) ) source = """ def f(x): x = a and b and c or d or e """ expected = """ def f(x): x = ((a and b) and c or d) or e """ self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_boolop_binarize())) ) def test_single_assignment_boolop_linearize(self) -> None: """Test the assign rule for linearizing binary boolean ops""" source = """ def f(x): x = (a and b) and c """ expected = """ def f(x): a1 = a and b x = a1 and c """ self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_boolop_linearize())), ) source = """ def f(x): x = ((a and b) and c) and d """ expected = """ def f(x): a2 = a and b a1 = a2 and c x = a1 and d """ self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_boolop_linearize())), ) def test_single_assignment_and2if(self) -> None: """Test the assign rule for converting a binary and into an if statement""" source = """ def f(x): x = a and b """ expected = """ def f(x): if a: x = b else: x = a """ self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_and2if())) ) def test_single_assignment_or2if(self) -> None: """Test the assign rule for converting a binary or into an if statement""" source = """ def f(x): x = a or b """ expected = """ def f(x): if a: x = a else: x = b """ self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_or2if())) ) def test_single_assignment_boolop_all(self) -> None: """Test the combined rules for boolean operators""" source = """ def f(x): x = a and b and c and d """ expected = """ def f(x): if a: a2 = b else: a2 = a if a2: a1 = c else: a1 = a2 if a1: x = d else: x = a1 """ self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_boolop_all())) ) source = """ def f(x): x = a and b and c or d or e """ expected = """ def f(x): if a: a3 = b else: a3 = a if a3: a2 = c else: a2 = a3 if a2: a1 = a2 else: a1 = d if a1: x = a1 else: x = e """ self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_boolop_all())) ) def test_single_assignment_handle_compare_binarize(self) -> None: """Test the rule for converting n-way comparisons into binary ones""" source = """ def f(x): x = a < b > c == d """ expected = """ def f(x): x = a < b and (b > c and c == d) """ self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_compare_binarize())) ) source = """ def f(x): x = a < 1 + b > c == d """ expected = """ def f(x): x = a < 1 + b > c == d """ self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_compare_binarize())) ) source = """ def f(x): x = a + 1 < b > c + 1 == d """ expected = """ def f(x): x = a + 1 < b and b > c + 1 == d """ # Note that the term after the "and" is not reduced self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_compare_binarize())) ) def test_single_assignment_handle_assign_compare_lefthandside(self) -> None: """Test the rule for lifting first argument of n-way comparison""" source = """ def f(x): x = 1 + a < 1 + b > c == d """ expected = """ def f(x): a1 = 1 + a x = a1 < 1 + b > c == d """ self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_compare_lefthandside())), ) def test_single_assignment_handle_assign_compare_righthandside(self) -> None: """Test the rule for lifting second argument of n-way comparison""" source = """ def f(x): z = 1 + a x = z < 1 + b > c == d """ expected = """ def f(x): z = 1 + a a1 = 1 + b x = z < a1 > c == d """ self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_compare_righthandside())), ) def test_single_assignment_handle_assign_compare_bothhandsides(self) -> None: """Test the rules for lifting first and second args of n-way comparison""" source = """ def f(x): x = 1 + a < 1 + b > c == d """ expected = """ def f(x): a1 = 1 + a a2 = 1 + b x = a1 < a2 > c == d """ self.check_rewrite( source, expected, many( _some_top_down( first( [ self.s._handle_assign_compare_lefthandside(), self.s._handle_assign_compare_righthandside(), ] ) ) ), ) def test_single_assignment_handle_assign_compare_all(self) -> None: """Test alls rules for n-way comparisons""" source = """ def f(x): x = 1 + a < 1 + b > c == d """ expected = """ def f(x): a1 = 1 + a a2 = 1 + b x = a1 < a2 and (a2 > c and c == d) """ self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_compare_all())) ) def test_single_assignment_handle_assign_compare_all_combined(self) -> None: """Test alls rules for n-way comparisons combined with rest""" source = """ def f(x): x = 1 + a < 1 + b > c == d """ expected = """ def f(x): a2 = 1 a1 = a2 + a a4 = 1 a3 = a4 + b a5 = a1 < a3 if a5: a6 = a3 > c if a6: x = c == d else: x = a6 else: x = a5 """ self.check_rewrite(source, expected) def test_single_assignment_lists(self) -> None: """Test the assign rule for lists""" source = """ def f(x): x = [1+a,a+b,c+d] """ expected = """ def f(x): a2 = 1 a1 = a2 + a a3 = a + b a4 = c + d x = [a1, a3, a4] """ self.check_rewrite(source, expected) def test_single_assignment_dict(self) -> None: """Test the assign rule for dictionaries""" source = """ def f(x): x = {"a"+"b":x+x} """ expected = """ def f(x): a2 = 'a' a4 = 'b' a1 = a2 + a4 a3 = x + x x = {a1: a3} """ self.check_rewrite(source, expected) source = """ def f(x): x = {"a"+"b":x+x, "c"+"d":x-x} """ expected = """ def f(x): a2 = 'a' a4 = 'b' a1 = a2 + a4 a3 = x + x a6 = 'c' a8 = 'd' a5 = a6 + a8 a7 = x - x x = {a1: a3, a5: a7}""" self.check_rewrite(source, expected) def test_single_assignment_tuple(self) -> None: """Test the assign rule for tuples""" source = """ def f(x): x = 1+a,a+b,c+d """ expected = """ def f(x): a2 = 1 a1 = a2 + a a3 = a + b a4 = c + d x = a1, a3, a4 """ self.check_rewrite(source, expected) def test_single_assignment_1(self) -> None: """Tests for single_assignment.py""" self.maxDiff = None source = """ def f(): aab = a + b if aab: return 1 + ~x + 2 + g(5, y=6) z = torch.tensor([1.0 + 2.0, 4.0]) for x in [[10, 20], [30, 40]]: for y in x: _1 = x+y _2 = print(_1) return 8 * y / (4 * z) """ expected = """ def f(): aab = a + b if aab: a16 = 1 a22 = ~x a8 = a16 + a22 a17 = 2 a5 = a8 + a17 a28 = 5 r23 = [a28] a33 = 6 r31 = dict(y=a33) a9 = g(*r23, **r31) r1 = a5 + a9 return r1 a2 = torch.tensor a29 = 1.0 a32 = 2.0 a24 = a29 + a32 a30 = 4.0 a18 = [a24, a30] r10 = [a18] r25 = {} z = a2(*r10, **r25) a11 = 10 a19 = 20 a6 = [a11, a19] a20 = 30 a26 = 40 a12 = [a20, a26] f3 = [a6, a12] for x in f3: for y in x: _1 = x + y r13 = [_1] r27 = {} _2 = print(*r13, **r27) a14 = 8 a7 = a14 * y a21 = 4 a15 = a21 * z r4 = a7 / a15 return r4 """ self.check_rewrite(source, expected) def test_single_assignment_2(self) -> None: """Tests for single_assignment.py""" self.maxDiff = None source = "b = c(d + e).f(g + h)" expected = """ a6 = d + e r4 = [a6] r8 = {} a2 = c(*r4, **r8) a1 = a2.f a5 = g + h r3 = [a5] r7 = {} b = a1(*r3, **r7) """ self.check_rewrite(source, expected) def test_single_assignment_3(self) -> None: """Tests for single_assignment.py""" self.maxDiff = None source = "a = (b+c)[f(d+e)]" expected = """ a1 = b + c a4 = d + e r3 = [a4] r5 = {} a2 = f(*r3, **r5) a = a1[a2] """ self.check_rewrite(source, expected) def test_single_assignment_call_single_star_arg(self) -> None: """Test the assign rule final step in rewriting regular call arguments""" source = """ x = f(*([1]+[2])) """ expected = """ r1 = [1] + [2] x = f(*r1) """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_call_single_star_arg()), ) self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_call_single_star_arg())), ) expected = """ a3 = 1 a2 = [a3] a6 = 2 a4 = [a6] r1 = a2 + a4 r5 = {} x = f(*r1, **r5) """ self.check_rewrite(source, expected) def test_single_assignment_call_single_double_star_arg(self) -> None: """Test the assign rule final step in rewriting keyword call arguments""" source = """ x = f(*d, **({x: 5})) """ expected = """ r1 = {x: 5} x = f(*d, **r1) """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_call_single_double_star_arg()), ) self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_call_single_double_star_arg())), ) expected = """ a2 = 5 r1 = {x: a2} x = f(*d, **r1)""" self.check_rewrite(source, expected) def test_single_assignment_call_two_star_args(self) -> None: """Test the assign rule for merging starred call arguments""" source = """ x = f(*[1],*[2]) """ expected = """ x = f(*([1] + [2])) """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_call_two_star_args()) ) self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_call_two_star_args())), ) expected = """ a3 = 1 a2 = [a3] a6 = 2 a4 = [a6] r1 = a2 + a4 r5 = {} x = f(*r1, **r5) """ self.check_rewrite(source, expected) def test_single_assignment_call_two_double_star_args(self) -> None: """Test the assign rule for merging double starred call arguments""" source = """ x = f(*d,**a, **b, **c) """ expected = """ x = f(*d, **dict(**a, **b), **c) """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_call_two_double_star_args()), ) expected = """ x = f(*d, **dict(**dict(**a, **b), **c)) """ self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_call_two_double_star_args())), ) source = expected expected = """ r1 = dict(**dict(**a, **b), **c) x = f(*d, **r1) """ self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_call_single_double_star_arg())), ) expected = """ a2 = dict(**a, **b) r1 = dict(**a2, **c) x = f(*d, **r1) """ self.check_rewrite(source, expected) source = """ x= f(**{a:1},**{b:3}) """ expected = """ x = f(**dict(**{a: 1}, **{b: 3})) """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_call_two_double_star_args()), ) def test_single_assignment_call_regular_arg(self) -> None: """Test the assign rule for starring an unstarred regular arg""" source = """ x = f(*[1], 2) """ expected = """ x = f(*[1], *[2]) """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_call_regular_arg()) ) self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_call_regular_arg())), ) expected = """ a3 = 1 a2 = [a3] a6 = 2 a4 = [a6] r1 = a2 + a4 r5 = {} x = f(*r1, **r5) """ self.check_rewrite(source, expected) def test_single_assignment_call_keyword_arg(self) -> None: """Test the assign rule for starring an unstarred keyword arg""" source = """ x = f(**dict(**d), k=42, **dict(**e)) """ expected = """ x = f(**dict(**d), **dict(k=42), **dict(**e)) """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_call_keyword_arg()) ) self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_call_keyword_arg())), ) # TODO: This just for debugging a non-terminating loop expected = """ x = f(*[], **dict(**d), k=42, **dict(**e)) """ self.check_rewrite(source, expected, _some_top_down(self.s._handle_assign())) source = expected expected = """ r1 = [] x = f(*r1, **dict(**d), k=42, **dict(**e)) """ self.check_rewrite(source, expected, _some_top_down(self.s._handle_assign())) source = expected expected = """ r1 = [] x = f(*r1, **dict(**d), **dict(k=42), **dict(**e)) """ self.check_rewrite(source, expected, _some_top_down(self.s._handle_assign())) source = expected expected = """ r1 = [] x = f(*r1, **dict(**dict(**d), **dict(k=42)), **dict(**e)) """ self.check_rewrite(source, expected, _some_top_down(self.s._handle_assign())) source = expected expected = """ r1 = [] x = f(*r1, **dict(**dict(**dict(**d), **dict(k=42)), **dict(**e))) """ self.check_rewrite(source, expected, _some_top_down(self.s._handle_assign())) source = expected expected = """ r1 = [] a3 = dict(**d) a6 = 42 a5 = dict(k=a6) a2 = dict(**a3, **a5) a4 = dict(**e) r1 = dict(**a2, **a4) x = f(*r1, **r1) """ self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign())) ) source = """ x = f(**dict(**d), k=42, **dict(**e)) """ expected = """ r1 = [] a4 = dict(**d) a7 = 42 a6 = dict(k=a7) a3 = dict(**a4, **a6) a5 = dict(**e) r2 = dict(**a3, **a5) x = f(*r1, **r2) """ self.check_rewrite(source, expected) source = """ x = f() """ expected = """ r1 = [] r2 = {} x = f(*r1, **r2) """ self.check_rewrite(source, expected) def test_single_assignment_call_empty_regular_arg(self) -> None: """Test the assign rule for starring an empty regular arg""" source = """ x = f() """ expected = """ x = f(*[]) """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_call_empty_regular_arg()), ) self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_call_empty_regular_arg())), ) expected = """ r1 = [] r2 = {} x = f(*r1, **r2) """ self.check_rewrite(source, expected) def test_single_assignment_call_three_arg(self) -> None: """Test the assign rule for starring an unstarred regular arg""" source = """ x = f(1, 2, 3) """ expected = """ a6 = 1 a3 = [a6] a9 = 2 a7 = [a9] a2 = a3 + a7 a8 = 3 a4 = [a8] r1 = a2 + a4 r5 = {} x = f(*r1, **r5) """ self.check_rewrite(source, expected) def disabled_test_crashing_case(self) -> None: """Debugging a crash in an external test""" # PYTHON VERSIONING ISSUE # TODO: There is some difference in the parse trees in the new version of # Python that we are not expecting. Until we understand what is going on, # disable this test. source = """ def flip_logit_constant(): return Bernoulli(logits=tensor(-2.0)) """ expected = """ def flip_logit_constant(): r2 = [] a7 = 2.0 a6 = -a7 r5 = [a6] r8 = {} a4 = tensor(*r5, **r8) r3 = dict(logits=a4) r1 = Bernoulli(*r2, **r3) return r1 """ self.check_rewrite(source, expected) self.check_rewrite_as_ast(source, expected) def test_single_assignment_listComp(self) -> None: """Test the assign rule for desugaring listComps""" # TODO: We should add some tests to check that we # handle nested function definitions correctly self.maxDiff = None source = """ x = [i for i in range(0,j) if even(i+j)] """ expected = """ def p1(): r2 = [] for i in range(0, j): if even(i + j): r2.append(i) return r2 x = p1() """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_listComp()) ) self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_listComp())) ) expected = """ def p1(): r2 = [] a15 = 0 a12 = [a15] a16 = [j] r10 = a12 + a16 r17 = {} f3 = range(*r10, **r17) for i in f3: a11 = i + j r7 = [a11] r13 = {} r4 = even(*r7, **r13) if r4: a8 = r2.append r14 = [i] r18 = {} u6 = a8(*r14, **r18) return r2 r5 = [] r9 = {} x = p1(*r5, **r9) """ self.check_rewrite(source, expected) source = """ y = [(x,y) for x in range(0,10) for y in range (x,10) if y == 2*x] """ expected = """ def p1(): r2 = [] for x in range(0, 10): for y in range(x, 10): if y == 2 * x: r2.append((x, y)) return r2 y = p1() """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_listComp()) ) self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_listComp())) ) expected = """ def p1(): r2 = [] a15 = 0 a13 = [a15] a20 = 10 a16 = [a20] r10 = a13 + a16 r17 = {} f3 = range(*r10, **r17) for x in f3: a18 = [x] a24 = 10 a21 = [a24] r14 = a18 + a21 r22 = {} f4 = range(*r14, **r22) for y in f4: a11 = 2 a7 = a11 * x r6 = y == a7 if r6: a12 = r2.append a23 = x, y r19 = [a23] r25 = {} u8 = a12(*r19, **r25) return r2 r5 = [] r9 = {} y = p1(*r5, **r9) """ self.check_rewrite(source, expected) source = """ y = [(x,y) for x in range(0,10) if x>0 for y in range (x,10) if y == 2*x] """ expected = """ def p1(): r2 = [] for x in range(0, 10): if x > 0: for y in range(x, 10): if y == 2 * x: r2.append((x, y)) return r2 y = p1() """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_listComp()) ) self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_listComp())) ) expected = """ def p1(): r2 = [] a16 = 0 a13 = [a16] a20 = 10 a17 = [a20] r10 = a13 + a17 r18 = {} f3 = range(*r10, **r18) for x in f3: a6 = 0 r4 = x > a6 if r4: a21 = [x] a26 = 10 a23 = [a26] r19 = a21 + a23 r24 = {} f7 = range(*r19, **r24) for y in f7: a14 = 2 a11 = a14 * x r8 = y == a11 if r8: a15 = r2.append a25 = x, y r22 = [a25] r27 = {} u12 = a15(*r22, **r27) return r2 r5 = [] r9 = {} y = p1(*r5, **r9) """ self.check_rewrite(source, expected) def test_single_assignment_setComp(self) -> None: """Test the assign rule for desugaring setComps""" # TODO: We should add some tests to check that we # handle nested function definitions correctly self.maxDiff = None source = """ x = {i for i in range(0,j) if even(i+j)} """ expected = """ def p1(): r2 = set() for i in range(0, j): if even(i + j): r2.add(i) return r2 x = p1() """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_setComp()) ) self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_setComp())) ) expected = """ def p1(): r2 = set() a15 = 0 a12 = [a15] a16 = [j] r10 = a12 + a16 r17 = {} f3 = range(*r10, **r17) for i in f3: a11 = i + j r7 = [a11] r13 = {} r4 = even(*r7, **r13) if r4: a8 = r2.add r14 = [i] r18 = {} u6 = a8(*r14, **r18) return r2 r5 = [] r9 = {} x = p1(*r5, **r9) """ self.check_rewrite(source, expected) source = """ y = {(x,y) for x in range(0,10) for y in range (x,10) if y == 2*x} """ expected = """ def p1(): r2 = set() for x in range(0, 10): for y in range(x, 10): if y == 2 * x: r2.add((x, y)) return r2 y = p1() """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_setComp()) ) self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_setComp())) ) expected = """ def p1(): r2 = set() a15 = 0 a13 = [a15] a20 = 10 a16 = [a20] r10 = a13 + a16 r17 = {} f3 = range(*r10, **r17) for x in f3: a18 = [x] a24 = 10 a21 = [a24] r14 = a18 + a21 r22 = {} f4 = range(*r14, **r22) for y in f4: a11 = 2 a7 = a11 * x r6 = y == a7 if r6: a12 = r2.add a23 = x, y r19 = [a23] r25 = {} u8 = a12(*r19, **r25) return r2 r5 = [] r9 = {} y = p1(*r5, **r9) """ self.check_rewrite(source, expected) source = """ y = {(x,y) for x in range(0,10) if x>0 for y in range (x,10) if y == 2*x} """ expected = """ def p1(): r2 = set() for x in range(0, 10): if x > 0: for y in range(x, 10): if y == 2 * x: r2.add((x, y)) return r2 y = p1() """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_setComp()) ) self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_setComp())) ) expected = """ def p1(): r2 = set() a16 = 0 a13 = [a16] a20 = 10 a17 = [a20] r10 = a13 + a17 r18 = {} f3 = range(*r10, **r18) for x in f3: a6 = 0 r4 = x > a6 if r4: a21 = [x] a26 = 10 a23 = [a26] r19 = a21 + a23 r24 = {} f7 = range(*r19, **r24) for y in f7: a14 = 2 a11 = a14 * x r8 = y == a11 if r8: a15 = r2.add a25 = x, y r22 = [a25] r27 = {} u12 = a15(*r22, **r27) return r2 r5 = [] r9 = {} y = p1(*r5, **r9) """ self.check_rewrite(source, expected) def test_single_assignment_dictComp(self) -> None: """Test the assign rule for desugaring dictComps""" # TODO: We should add some tests to check that we # handle nested function definitions correctly self.maxDiff = None source = """ x = {i:i for i in range(0,j) if even(i+j)} """ expected = """ def p1(): r2 = {} for i in range(0, j): if even(i + j): r2.__setitem__(i, i) return r2 x = p1() """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_dictComp()) ) self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_dictComp())) ) expected = """ def p1(): r2 = {} a14 = 0 a12 = [a14] a15 = [j] r10 = a12 + a15 r16 = {} f3 = range(*r10, **r16) for i in f3: a11 = i + j r7 = [a11] r13 = {} r4 = even(*r7, **r13) if r4: a8 = r2.__setitem__ a18 = [i] a19 = [i] r17 = a18 + a19 r20 = {} u6 = a8(*r17, **r20) return r2 r5 = [] r9 = {} x = p1(*r5, **r9) """ self.check_rewrite(source, expected) source = """ y = {x:y for x in range(0,10) for y in range (x,10) if y == 2*x} """ expected = """ def p1(): r2 = {} for x in range(0, 10): for y in range(x, 10): if y == 2 * x: r2.__setitem__(x, y) return r2 y = p1() """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_dictComp()) ) self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_dictComp())) ) expected = """ def p1(): r2 = {} a15 = 0 a13 = [a15] a19 = 10 a16 = [a19] r10 = a13 + a16 r17 = {} f3 = range(*r10, **r17) for x in f3: a18 = [x] a22 = 10 a20 = [a22] r14 = a18 + a20 r21 = {} f4 = range(*r14, **r21) for y in f4: a11 = 2 a7 = a11 * x r6 = y == a7 if r6: a12 = r2.__setitem__ a24 = [x] a25 = [y] r23 = a24 + a25 r26 = {} u8 = a12(*r23, **r26) return r2 r5 = [] r9 = {} y = p1(*r5, **r9) """ self.check_rewrite(source, expected) source = """ y = {x:y for x in range(0,10) if x>0 for y in range (x,10) if y == 2*x} """ expected = """ def p1(): r2 = {} for x in range(0, 10): if x > 0: for y in range(x, 10): if y == 2 * x: r2.__setitem__(x, y) return r2 y = p1() """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_dictComp()) ) self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign_dictComp())) ) expected = """ def p1(): r2 = {} a16 = 0 a13 = [a16] a20 = 10 a17 = [a20] r10 = a13 + a17 r18 = {} f3 = range(*r10, **r18) for x in f3: a6 = 0 r4 = x > a6 if r4: a21 = [x] a24 = 10 a22 = [a24] r19 = a21 + a22 r23 = {} f7 = range(*r19, **r23) for y in f7: a14 = 2 a11 = a14 * x r8 = y == a11 if r8: a15 = r2.__setitem__ a26 = [x] a27 = [y] r25 = a26 + a27 r28 = {} u12 = a15(*r25, **r28) return r2 r5 = [] r9 = {} y = p1(*r5, **r9) """ self.check_rewrite(source, expected) def test_single_assignment_nested_call_named_arg(self) -> None: self.maxDiff = None # This test originally pointed out a bug in the rewriting logic. # We should be pulling the invocation of c() out into # it's own top-level function call. # # The code below should be rewritten as something like: # # t1 = [] # t2 = {} # t3 = c(*t1, **t2) # t4 = [] # t5 = {'n' : t3} # t6 = b(*t4, **t5) # return t6 source = """ def f(): return b(n=c()) """ expected = """ def f(): r2 = [] r3 = dict(n=c()) r1 = b(*r2, **r3) return r1 """ # The previous "expected" was the undesirable output, which we got at the time of the bug report # The following "expected" is after the bug fix expected = """ def f(): r2 = [] r5 = [] r6 = {} a4 = c(*r5, **r6) r3 = dict(n=a4) r1 = b(*r2, **r3) return r1 """ self.check_rewrite(source, expected) # Helper tests to fix the bug identified above # Interestingly, regular arguments are OK source = """ def f(): return b(c()) """ expected = """ def f(): r5 = [] r6 = {} a3 = c(*r5, **r6) r2 = [a3] r4 = {} r1 = b(*r2, **r4) return r1 """ self.check_rewrite(source, expected) # It was further noted that the following expression was # also not handled well source = """ def f(): return b(n1=c1(),n2=c2(),n3=c3()) """ # In particular, it produced the following output, which # has nested "dict" calls that are should be removed expected = """ def f(): r2 = [] r3 = dict(**dict(**dict(n1=c1()), **dict(n2=c2())), **dict(n3=c3())) r1 = b(*r2, **r3) return r1 """ # To fix this, first we introduced the rewrite "binary_dict_left" # With the introduction of that rule we get expected = """ def f(): r2 = [] r7 = [] r8 = {} a6 = c1(*r7, **r8) a5 = dict(n1=a6) a4 = dict(**a5, **dict(n2=c2())) r3 = dict(**a4, **dict(n3=c3())) r1 = b(*r2, **r3) return r1 """ # Next, we introduced "binary_dict_right" and then we get expected = """ def f(): r2 = [] r11 = [] r14 = {} a7 = c1(*r11, **r14) a5 = dict(n1=a7) r13 = [] r16 = {} a10 = c2(*r13, **r16) a8 = dict(n2=a10) a4 = dict(**a5, **a8) r12 = [] r15 = {} a9 = c3(*r12, **r15) a6 = dict(n3=a9) r3 = dict(**a4, **a6) r1 = b(*r2, **r3) return r1 """ self.check_rewrite(source, expected) # It was useful to note that there was no similar problem with # calls that have regular arguments source = """ def f(): return b(*[c()]) """ expected = """ def f(): r5 = [] r6 = {} a3 = c(*r5, **r6) r2 = [a3] r4 = {} r1 = b(*r2, **r4) return r1 """ self.check_rewrite(source, expected) # No similar problem with multiple regular arguments also: source = """ def f(): return b(c1(),c2()) """ expected = """ def f(): r8 = [] r10 = {} a4 = c1(*r8, **r10) a3 = [a4] r9 = [] r11 = {} a7 = c2(*r9, **r11) a5 = [a7] r2 = a3 + a5 r6 = {} r1 = b(*r2, **r6) return r1 """ self.check_rewrite(source, expected) def test_single_assignment_assign_unary_dict(self) -> None: """Test the first special rule for dict (the unary case)""" self.maxDiff = None source = """ x = dict(n=c()) """ expected = """ a1 = c() x = dict(n=a1) """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_unary_dict()) ) def test_single_assignment_assign_binary_dict_left(self) -> None: """Test the first special rule for dict (the binary left case)""" self.maxDiff = None source = """ x = dict(**c(),**d()) """ expected = """ a1 = c() x = dict(**a1, **d()) """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_binary_dict_left()) ) def test_single_assignment_assign_binary_dict_right(self) -> None: """Test the first special rule for dict (the binary right case)""" self.maxDiff = None source = """ x = dict(**c,**d()) """ expected = """ a1 = d() x = dict(**c, **a1) """ self.check_rewrite( source, expected, _some_top_down(self.s._handle_assign_binary_dict_right()) ) def test_left_value_all(self) -> None: """General tests for the full set of assignment left value rules""" # First, some "most general" normal forms. These are terms that are not # reduced by this set of rewrites, nor by all the other rules for that matter. normal_forms = [ """ def f(x): a = z a.b = z a[b] = z a[b:c] = z a[b:] = z a[:c] = z a[b:c:d] = z a[b::d] = z a[:c:d] = z a[::d] = z [] = z [a] = z [*a] = z """ ] # These terms are normal forms for this specific set self.check_rewrites(normal_forms, self.s._handle_left_value_all()) # They are also "in most general form" because they are also normal forms for all sets self.check_rewrites(normal_forms) # It would be nice of course if we could check that we have captured (at least # representatives) of all normal form productions, but no idea how to do this yet. # Second, some terms that are only in normal form for this set (but could be # reducible by other rules). This type of terms helps us check the rules in # this set do not rewrite terms prematurely (which could alter order of evaluation). # Note: It's good for such terms to actually contain a reduction that can be done # once the subterm that is "waited upon" is released. This means that if we want # to systematically derive waiting terms from normal forms, two subterms would # typically need to be changed. waiting_forms = [ """ def f(x): x.y.a = z + 1 x.y[b] = z + 1 a[x.y] = z + 1 x.y[b:c] = z + 1 x.y[b:] = z + 1 x.y[:c] = z + 1 a[x.y:c] = z + 1 a[x.y:] = z + 1 a[b:x.y] = z + 1 a[:x.y] = z + 1 x.y[:c:d] = z + 1 x.y[b::d] = z + 1 x.y[::d] = z + 1 x.y[b:c:d] = z + 1 a[x.y:c:d] = z + 1 a[x.y::d] = z + 1 a[x.y:] = z + 1 a[b:x.y:d] = z + 1 a[b:x.y:d] = z + 1 a[b:x.y] = z + 1 a[:x.y:d] = z + 1 a[:c:x.y] = z + 1 a[::x.y] = z + 1 [x.y.a] = z + 1 [*x.y.a] = z + 1 """ ] self.check_rewrites(waiting_forms, self.s._handle_left_value_all()) # Third, an example that involves several of the rewrite rules in this # set # TODO: The following reduction sequence is incomplete (and does not # reach a normal form) because we need rewrite rules for splicing # terms such as z[1:]. That functionality is in place we should # be able to continue this rewrite until the whole LHS pattern has been # converted into SSA form. terms = [ """ def f(x): [[a],b,*[c],d,(e.f[g:h[i]].j)] = z """, """ def f(x): [a] = z[0] [b, *[c], d, e.f[g:h[i]].j] = z[1:] """, """ def f(x): a1 = 0 [a] = z[a1] a2 = 1 [b, *[c], d, e.f[g:h[i]].j] = z[a2:] """, # Name RHS: """ def f(x): a1 = 0 a3 = z[a1] [a] = a3 a2 = 1 a4 = z[a2:] [b, *[c], d, e.f[g:h[i]].j] = a4 """, # Process first element: """ def f(x): a1 = 0 a3 = z[a1] [a] = a3 a2 = 1 a4 = z[a2:] b = a4[0] [*[c], d, e.f[g:h[i]].j] = a4[1:] """, # Name constants: """ def f(x): a1 = 0 a3 = z[a1] [a] = a3 a2 = 1 a4 = z[a2:] a5 = 0 b = a4[a5] a6 = 1 [*[c], d, e.f[g:h[i]].j] = a4[a6:] """, # Name RHS: """ def f(x): a1 = 0 a3 = z[a1] [a] = a3 a2 = 1 a4 = z[a2:] a5 = 0 b = a4[a5] a6 = 1 a7 = a4[a6:] [*[c], d, e.f[g:h[i]].j] = a7 """, # Process last element: """ def f(x): a1 = 0 a3 = z[a1] [a] = a3 a2 = 1 a4 = z[a2:] a5 = 0 b = a4[a5] a6 = 1 a7 = a4[a6:] [*[c], d] = a7[:-1] e.f[g:h[i]].j = a7[-1] """, # Name constants: """ def f(x): a1 = 0 a3 = z[a1] [a] = a3 a2 = 1 a4 = z[a2:] a5 = 0 b = a4[a5] a6 = 1 a7 = a4[a6:] a8 = -1 [*[c], d] = a7[:a8] a9 = -1 e.f[g:h[i]].j = a7[a9] """, # Name RHS: """ def f(x): a1 = 0 a3 = z[a1] [a] = a3 a2 = 1 a4 = z[a2:] a5 = 0 b = a4[a5] a6 = 1 a7 = a4[a6:] a10 = 1 a8 = -a10 a11 = a7[:a8] [*[c], d] = a11 a12 = 1 a9 = -a12 a13 = a7[a9] e.f[g:h[i]].j = a13 """, # Process last element and name LHS expression: """ def f(x): a1 = 0 a3 = z[a1] [a] = a3 a2 = 1 a4 = z[a2:] a5 = 0 b = a4[a5] a6 = 1 a7 = a4[a6:] a10 = 1 a8 = -a10 a11 = a7[:a8] [*[c]] = a11[:-1] d = a11[-1] a12 = 1 a9 = -a12 a13 = a7[a9] x14 = e.f[g:h[i]] x14.j = a13 """, # Name RHS expression: """ def f(x): a1 = 0 a3 = z[a1] [a] = a3 a2 = 1 a4 = z[a2:] a5 = 0 b = a4[a5] a6 = 1 a7 = a4[a6:] a10 = 1 a8 = -a10 a11 = a7[:a8] a15 = -1 [*[c]] = a11[:a15] a16 = -1 d = a11[a16] a12 = 1 a9 = -a12 a13 = a7[a9] a17 = e.f x14 = a17[g:h[i]] x14.j = a13 """, # Name RHS expression: """ def f(x): a1 = 0 a3 = z[a1] [a] = a3 a2 = 1 a4 = z[a2:] a5 = 0 b = a4[a5] a6 = 1 a7 = a4[a6:] a10 = 1 a8 = -a10 a11 = a7[:a8] a18 = 1 a15 = -a18 a19 = a11[:a15] [*[c]] = a19 a20 = 1 a16 = -a20 d = a11[a16] a12 = 1 a9 = -a12 a13 = a7[a9] a17 = e.f a21 = h[i] x14 = a17[g:a21] x14.j = a13 """, # Name LHS expression: """ def f(x): a1 = 0 a3 = z[a1] [a] = a3 a2 = 1 a4 = z[a2:] a5 = 0 b = a4[a5] a6 = 1 a7 = a4[a6:] a10 = 1 a8 = -a10 a11 = a7[:a8] a18 = 1 a15 = -a18 a19 = a11[:a15] [*x22] = a19 [c] = x22 a20 = 1 a16 = -a20 d = a11[a16] a12 = 1 a9 = -a12 a13 = a7[a9] a17 = e.f a21 = h[i] x14 = a17[g:a21] x14.j = a13 """, ] self.check_rewrites(terms) def test_left_value_attributeref(self) -> None: """Test rewrites like a.b.c = z → x = a.b; x.c = z""" terms = [ """ def f(x): a.b.c = z""", """ def f(x): x1 = a.b x1.c = z""", ] self.check_rewrites(terms, self.s._handle_left_value_attributeref()) self.check_rewrites(terms, self.s._handle_left_value_all()) self.check_rewrites(terms) def test_left_value_subscript_value(self) -> None: """Test rewrites like a.b[c] = z → x = a.b; x[c] = z. It also handles [c], [c:d], and [c:d:e] in the same way.""" terms = [ """ def f(x): a.b[c] = z""", """ def f(x): x1 = a.b x1[c] = z""", ] self.check_rewrites(terms, self.s._handle_left_value_subscript_value()) self.check_rewrites(terms, self.s._handle_left_value_all()) self.check_rewrites(terms) terms = [ """ def f(x): a.b[c:d] = z""", """ def f(x): x1 = a.b x1[c:d] = z""", ] self.check_rewrites(terms, self.s._handle_left_value_subscript_value()) self.check_rewrites(terms, self.s._handle_left_value_all()) self.check_rewrites(terms) terms = [ """ def f(x): a.b[c:d:e] = z""", """ def f(x): x1 = a.b x1[c:d:e] = z""", ] self.check_rewrites(terms, self.s._handle_left_value_subscript_value()) self.check_rewrites(terms, self.s._handle_left_value_all()) self.check_rewrites(terms) def test_left_value_subscript_slice_index(self) -> None: """Test rewrites like a[b.c] = z → x = b.c; a[x] = z.""" terms = [ """ def f(x): a[b.c] = z""", """ def f(x): x1 = b.c a[x1] = z""", ] self.check_rewrites(terms, self.s._handle_left_value_subscript_slice_index()) self.check_rewrites(terms, self.s._handle_left_value_all()) self.check_rewrites(terms) def test_left_value_subscript_slice_lower(self) -> None: """Test rewrites like a[b.c:] = z → x = b.c; a[x:] = z.""" terms = [ """ def f(x): a[b.c:] = z a[b.c:d] = z a[b.c:d:e] = z a[:d:e] = z""", """ def f(x): x1 = b.c a[x1:] = z x2 = b.c a[x2:d] = z x3 = b.c a[x3:d:e] = z a[:d:e] = z""", ] self.check_rewrites(terms, self.s._handle_left_value_subscript_slice_lower()) self.check_rewrites(terms, self.s._handle_left_value_all()) self.check_rewrites(terms) def test_left_value_subscript_slice_upper(self) -> None: """Test rewrites like a[:b.c] = z → x = b.c; a[:x] = z.""" terms = [ """ def f(x): a[:c.d] = z a[b:c.d] = z a[b:c.d:e] = z a[a::e] = z a[::e] = z a[:] = z""", """ def f(x): x1 = c.d a[:x1] = z x2 = c.d a[b:x2] = z x3 = c.d a[b:x3:e] = z a[a::e] = z a[::e] = z a[:] = z""", ] self.check_rewrites(terms, self.s._handle_left_value_subscript_slice_upper()) self.check_rewrites(terms, self.s._handle_left_value_all()) self.check_rewrites(terms) def test_left_value_subscript_slice_step(self) -> None: """Test rewrites like a[:b:c.d] = z → x = c.d; a[b:c:x] = z.""" terms = [ """ def f(x): a[::d.e] = z a[b::d.e] = z a[b:c:d.e] = z a[b::e] = z a[::e] = z a[:] = z""", """ def f(x): x1 = d.e a[::x1] = z x2 = d.e a[b::x2] = z x3 = d.e a[b:c:x3] = z a[b::e] = z a[::e] = z a[:] = z""", ] self.check_rewrites(terms, self.s._handle_left_value_subscript_slice_step()) self.check_rewrites(terms, self.s._handle_left_value_all()) self.check_rewrites(terms) def test_left_value_list_star(self) -> None: """Test rewrites like [*a.b] = z → [*y] = z; a.b = y.""" terms = [ """ def f(x): [*a.b] = z [*a[b]] = z (*a.b,) = z (*a[b],) = z""", """ def f(x): [*x1] = z a.b = x1 [*x2] = z a[b] = x2 [*x3] = z a.b = x3 [*x4] = z a[b] = x4""", ] self.check_rewrites(terms, self.s._handle_left_value_list_star()) self.check_rewrites(terms, self.s._handle_left_value_all()) self.check_rewrites(terms) def test_left_value_list_list(self) -> None: """Test rewrites like [[a.b]] = z → [y] = z; [a.b] = y. Note that this should also work for things where a.b is simply c. It also pattern matches both tuples and lists as if they are the same.""" terms = [ """ def f(x): [[a.b]] = z [[a[b]]] = z ([a.b],) = z ([a[b]],) = z [[a]] = z [[a],b] = z""", """ def f(x): [x1] = z [a.b] = x1 [x2] = z [a[b]] = x2 [x3] = z [a.b] = x3 [x4] = z [a[b]] = x4 [x5] = z [a] = x5 [[a], b] = z""", ] self.check_rewrites(terms, self.s._handle_left_value_list_list()) # The last line above is simplified by another rule in the set, so, terms[ 1 ] = """ def f(x): [x1] = z [a.b] = x1 [x2] = z [a[b]] = x2 [x3] = z [a.b] = x3 [x4] = z [a[b]] = x4 [x5] = z [a] = x5 [a] = z[0] [b] = z[1:]""" self.check_rewrites(terms, self.s._handle_left_value_all()) # As a result of the change in the last line, further simplifications # are triggered by other rewrites outside the set terms += [ """ def f(x): [x1] = z [a.b] = x1 [x2] = z [a[b]] = x2 [x3] = z [a.b] = x3 [x4] = z [a[b]] = x4 [x5] = z [a] = x5 a6 = 0 [a] = z[a6] a7 = 1 [b] = z[a7:]""", # Name RHS: """ def f(x): [x1] = z [a.b] = x1 [x2] = z [a[b]] = x2 [x3] = z [a.b] = x3 [x4] = z [a[b]] = x4 [x5] = z [a] = x5 a6 = 0 a8 = z[a6] [a] = a8 a7 = 1 a9 = z[a7:] [b] = a9""", ] self.check_rewrites(terms) def test_left_value_list_not_starred(self) -> None: """Test rewrites like [a.b.c, d] = z → a.b.c = z[0]; [d] = z[1:]. Note that this should also work for things where a.b is simply c. It also pattern matches both tuples and lists as if they are the same.""" terms = [ """ def f(x): [a.b.c, d] = z""", """ def f(x): a.b.c = z[0] [d] = z[1:]""", ] self.check_rewrites(terms, self.s._handle_left_value_list_not_starred()) self.check_rewrites(terms, self.s._handle_left_value_all()) # TODO: To fully process such terms, we need to support slicing in target language terms += [ """ def f(x): a1 = 0 a.b.c = z[a1] a2 = 1 [d] = z[a2:]""", # Name RHS: """ def f(x): a1 = 0 a3 = z[a1] a.b.c = a3 a2 = 1 a4 = z[a2:] [d] = a4""", # Name LHS expression: """ def f(x): a1 = 0 a3 = z[a1] x5 = a.b x5.c = a3 a2 = 1 a4 = z[a2:] [d] = a4""", ] self.check_rewrites(terms) def test_left_value_list_starred(self) -> None: """Test rewrites [*c, d] = z → [*c] = z[:-1]; d = z[-1]. It also pattern matches both tuples and lists as if they are the same.""" terms = [ """ def f(x): [*a,b] = z""", """ def f(x): [*a] = z[:-1] b = z[-1]""", ] self.check_rewrites(terms, self.s._handle_left_value_list_starred()) self.check_rewrites(terms, self.s._handle_left_value_all()) # TODO: To fully process such terms, we need to support slicing in target language terms += [ """ def f(x): a1 = -1 [*a] = z[:a1] a2 = -1 b = z[a2]""", # Name RHS: """ def f(x): a3 = 1 a1 = -a3 a4 = z[:a1] [*a] = a4 a5 = 1 a2 = -a5 b = z[a2]""", ] self.check_rewrites(terms) def test_assign_subscript_slice_all(self) -> None: """General tests for the subsript rewrite set.""" # First, we give examples of regular normal forms, that is, forms # where no more reduction is possible. normal_forms = [ """ def f(x): a = b[c] a = b[c:] a = b[:d] a = b[c:d] a = b[c::e] a = b[:d:e] a = b[c:d:e] a = b[::e]""" ] self.check_rewrites(normal_forms, self.s._handle_assign_subscript_slice_all()) self.check_rewrites(normal_forms) ## Second, an example of the natural order of evaluation for these rewrite rules progression = [ """ def f(x): a,b = c[d.e:f.g:h.i]""", """ def f(x): a1 = d.e a, b = c[a1:f.g:h.i]""", """ def f(x): a1 = d.e a2 = f.g a, b = c[a1:a2:h.i]""", """ def f(x): a1 = d.e a2 = f.g a3 = h.i a, b = c[a1:a2:a3]""", ] self.check_rewrites(progression, self.s._handle_assign_subscript_slice_all()) progression += [ # Name RHS: """ def f(x): a1 = d.e a2 = f.g a3 = h.i a4 = c[a1:a2:a3] a, b = a4""", # Process first element: """ def f(x): a1 = d.e a2 = f.g a3 = h.i a4 = c[a1:a2:a3] a = a4[0] [b] = a4[1:]""", # Name constants: """ def f(x): a1 = d.e a2 = f.g a3 = h.i a4 = c[a1:a2:a3] a5 = 0 a = a4[a5] a6 = 1 [b] = a4[a6:]""", # Name RHS: """ def f(x): a1 = d.e a2 = f.g a3 = h.i a4 = c[a1:a2:a3] a5 = 0 a = a4[a5] a6 = 1 a7 = a4[a6:] [b] = a7""", ] self.check_rewrites(progression) # Third, some stuck terms. The following form cannot go anywhere with any of the rules: stuck = [ """ def f(x): a, b = 1 + c[d.e:f.g:h.i]""" ] self.check_rewrites(stuck, self.s._handle_assign_subscript_slice_all()) # More specific stuck terms are also useful to express: stuck = [ """ def f(x): a, b = 1 + c[d.e:f.g:h.i]""" ] self.check_rewrites(stuck, self.s._handle_assign_subscript_slice_index_1()) stuck = [ """ def f(x): a, b = c.c[d.e]""" ] self.check_rewrites(stuck, self.s._handle_assign_subscript_slice_index_2()) stuck = [ """ def f(x): a, b = c.c[d.e:]""" ] self.check_rewrites(stuck, self.s._handle_assign_subscript_slice_lower()) stuck = [ """ def f(x): a, b = c[d.e:f.g]""" ] self.check_rewrites(stuck, self.s._handle_assign_subscript_slice_upper()) stuck = [ """ def f(x): a, b = c[d:f.g:h.i]""" ] self.check_rewrites(stuck, self.s._handle_assign_subscript_slice_step()) def test_assign_subscript_slice_index_1(self) -> None: """Test rewrites like a,b = c.d[e] → x = c.d; a,b = x[e].""" terms = [ """ def f(x): a,b = c.d[e]""", """ def f(x): a1 = c.d a, b = a1[e]""", ] self.check_rewrites(terms, self.s._handle_assign_subscript_slice_index_1()) self.check_rewrites(terms, self.s._handle_assign_subscript_slice_all()) terms += [ # Name RHS: """ def f(x): a1 = c.d a2 = a1[e] a, b = a2""", # Process first element: """ def f(x): a1 = c.d a2 = a1[e] a = a2[0] [b] = a2[1:]""", # Name constants: """ def f(x): a1 = c.d a2 = a1[e] a3 = 0 a = a2[a3] a4 = 1 [b] = a2[a4:]""", # Name RHS: """ def f(x): a1 = c.d a2 = a1[e] a3 = 0 a = a2[a3] a4 = 1 a5 = a2[a4:] [b] = a5""", ] self.check_rewrites(terms) def test_assign_subscript_slice_index_2(self) -> None: """Test rewrites like a,b = c[d.e] → x = d.e; a,b = c[x].""" terms = [ """ def f(x): a,b = c[d.e]""", """ def f(x): a1 = d.e a, b = c[a1]""", ] self.check_rewrites(terms, self.s._handle_assign_subscript_slice_index_2()) self.check_rewrites(terms, self.s._handle_assign_subscript_slice_all()) terms += [ # Name RHS: """ def f(x): a1 = d.e a2 = c[a1] a, b = a2""", # Process first element: """ def f(x): a1 = d.e a2 = c[a1] a = a2[0] [b] = a2[1:]""", # Name constants: """ def f(x): a1 = d.e a2 = c[a1] a3 = 0 a = a2[a3] a4 = 1 [b] = a2[a4:]""", # Name RHS: """ def f(x): a1 = d.e a2 = c[a1] a3 = 0 a = a2[a3] a4 = 1 a5 = a2[a4:] [b] = a5""", ] self.check_rewrites(terms) def test_assign_subscript_slice_index_2_not_too_soon(self) -> None: """Gives an example that shows that we do not rewrite too soon.""" terms = [ """ def f(x): a, b = c.c[d.e]""", ] self.check_rewrites(terms, self.s._handle_assign_subscript_slice_index_2()) def test_assign_subscript_slice_lower(self) -> None: """Test rewrites like e = a[b.c:] → x = b.c; e = a[x:].""" terms = [ """ def f(x): a,b = c[d.e:] a = b[c.d:e:f]""", """ def f(x): a1 = d.e a, b = c[a1:] a2 = c.d a = b[a2:e:f]""", ] self.check_rewrites(terms, self.s._handle_assign_subscript_slice_lower()) self.check_rewrites(terms, self.s._handle_assign_subscript_slice_all()) terms += [ # Name RHS: """ def f(x): a1 = d.e a3 = c[a1:] a, b = a3 a2 = c.d a = b[a2:e:f]""", # Process first element: """ def f(x): a1 = d.e a3 = c[a1:] a = a3[0] [b] = a3[1:] a2 = c.d a = b[a2:e:f]""", # Name constants: """ def f(x): a1 = d.e a3 = c[a1:] a4 = 0 a = a3[a4] a5 = 1 [b] = a3[a5:] a2 = c.d a = b[a2:e:f]""", # Process RHS: """ def f(x): a1 = d.e a3 = c[a1:] a4 = 0 a = a3[a4] a5 = 1 a6 = a3[a5:] [b] = a6 a2 = c.d a = b[a2:e:f]""", ] self.check_rewrites(terms) def disabled_test_assign_subscript_slice_upper_1(self) -> None: """Test rewrites like e = a[:b.c] → x = b.c; e = a[:x].""" # TODO: Test does not pass; I suspect there was a merge conflict resolution # error and this test should be updated or deleted. Disable it for now # and sort it out later. terms = [ """ def f(x): a,b = c[d:e.f] a = b[c:d.e:f] a,b = c [:e.f] a,b = c [:e.f:g]""", """ def f(x): a1 = e.f a, b = c[d:a1] a2 = d.e a = b[c:a2:f] a3 = e.f a, b = c[:a3] a4 = e.f a, b = c[:a4:g]""", ] self.check_rewrites(terms, self.s._handle_assign_subscript_slice_upper()) self.check_rewrites(terms, self.s._handle_assign_subscript_slice_all()) self.check_rewrites(terms) def test_assign_subscript_slice_upper_2(self) -> None: """Test rewrites like e = a[::b.c] → x = b.c; e = a[::x].""" terms = [ """ def f(x): a,b = c[d::e.f] a = b[c:d:e.f] a,b = c [::e.f] a,b = c [:e:f.g]""", """ def f(x): a1 = e.f a, b = c[d::a1] a2 = e.f a = b[c:d:a2] a3 = e.f a, b = c[::a3] a4 = f.g a, b = c[:e:a4]""", ] self.check_rewrites(terms, self.s._handle_assign_subscript_slice_step()) self.check_rewrites(terms, self.s._handle_assign_subscript_slice_all()) terms += [ # Name RHS: """ def f(x): a1 = e.f a5 = c[d::a1] a, b = a5 a2 = e.f a = b[c:d:a2] a3 = e.f a6 = c[::a3] a, b = a6 a4 = f.g a7 = c[:e:a4] a, b = a7""", # Process first element: """ def f(x): a1 = e.f a5 = c[d::a1] a = a5[0] [b] = a5[1:] a2 = e.f a = b[c:d:a2] a3 = e.f a6 = c[::a3] a = a6[0] [b] = a6[1:] a4 = f.g a7 = c[:e:a4] a = a7[0] [b] = a7[1:]""", # Name constants: """ def f(x): a1 = e.f a5 = c[d::a1] a8 = 0 a = a5[a8] a9 = 1 [b] = a5[a9:] a2 = e.f a = b[c:d:a2] a3 = e.f a6 = c[::a3] a10 = 0 a = a6[a10] a11 = 1 [b] = a6[a11:] a4 = f.g a7 = c[:e:a4] a12 = 0 a = a7[a12] a13 = 1 [b] = a7[a13:]""", # Name RHS: """ def f(x): a1 = e.f a5 = c[d::a1] a8 = 0 a = a5[a8] a9 = 1 a14 = a5[a9:] [b] = a14 a2 = e.f a = b[c:d:a2] a3 = e.f a6 = c[::a3] a10 = 0 a = a6[a10] a11 = 1 a15 = a6[a11:] [b] = a15 a4 = f.g a7 = c[:e:a4] a12 = 0 a = a7[a12] a13 = 1 a16 = a7[a13:] [b] = a16""", ] self.check_rewrites(terms) def test_assign_possibly_blocking_right_value(self) -> None: """Test rewrites like e1 = e2 → x = e2; e1 = x, as long as e1 and e2 are not names.""" # Here is what this rule achieves in isolation terms = [ """ def f(x): a, b = a, b""", """ def f(x): a1 = a, b a, b = a1""", ] self.check_rewrites( terms, self.s._handle_assign_possibly_blocking_right_value() ) # And here is what it achieves in the context of the other rules. In particular, # it enables the left_value rules to go further than they can without it. terms += [ # Process first element: """ def f(x): a1 = a, b a = a1[0] [b] = a1[1:]""", # Name constants: """ def f(x): a1 = a, b a2 = 0 a = a1[a2] a3 = 1 [b] = a1[a3:]""", # Name RHS: """ def f(x): a1 = a, b a2 = 0 a = a1[a2] a3 = 1 a4 = a1[a3:] [b] = a4""", ] self.check_rewrites(terms) def test_augmented_assignment(self) -> None: """Test rewrites involving += and other augmented assignments.""" source = """ def f(x): x += 123 x *= 456 x.y.z /= 2 """ expected = """ def f(x): a1 = 123 x += a1 a2 = 456 x *= a2 a3 = x.y a4 = a3.z a5 = 2 a4 /= a5 a3.z = a4 """ self.check_rewrite(source, expected) source = """ def f(x): a.b[c.d] -= 1 e.f[:] -= 2 g.h[:i.j] -= 2 k.m[n.o:] -= 3 p.q[r.s:t.u] -= 4 """ expected = """ def f(x): a1 = a.b a6 = c.d a11 = a1[a6] a16 = 1 a11 -= a16 a1[a6] = a11 a2 = e.f a7 = a2[:] a12 = 2 a7 -= a12 a2[:] = a7 a3 = g.h a8 = i.j a13 = a3[:a8] a17 = 2 a13 -= a17 a3[:a8] = a13 a4 = k.m a9 = n.o a14 = a4[a9:] a18 = 3 a14 -= a18 a4[a9:] = a14 a5 = p.q a10 = r.s a15 = t.u a19 = a5[a10:a15] a20 = 4 a19 -= a20 a5[a10:a15] = a19 """ self.check_rewrite(source, expected) source = """ def f(x): a.b[::] -= 1 c.d[e.f::] -= 2 g.h[:i.j:] -= 3 k.m[::n.o] -= 4 """ expected = """ def f(x): a1 = a.b a5 = a1[:] a9 = 1 a5 -= a9 a1[:] = a5 a2 = c.d a6 = e.f a10 = a2[a6:] a13 = 2 a10 -= a13 a2[a6:] = a10 a3 = g.h a7 = i.j a11 = a3[:a7] a14 = 3 a11 -= a14 a3[:a7] = a11 a4 = k.m a8 = n.o a12 = a4[::a8] a15 = 4 a12 -= a15 a4[::a8] = a12 """ self.check_rewrite(source, expected) source = """ def f(x): a.b[c.d:e.f:] -= 1 g.h[i.j::k.m] -= 2 n.o[:p.q:r.s] -= 3 t.u[v.w:x.y:z.zz] -= 4 """ expected = """ def f(x): a1 = a.b a5 = c.d a9 = e.f a13 = a1[a5:a9] a17 = 1 a13 -= a17 a1[a5:a9] = a13 a2 = g.h a6 = i.j a10 = k.m a14 = a2[a6::a10] a18 = 2 a14 -= a18 a2[a6::a10] = a14 a3 = n.o a7 = p.q a11 = r.s a15 = a3[:a7:a11] a19 = 3 a15 -= a19 a3[:a7:a11] = a15 a4 = t.u a8 = v.w a12 = x.y a16 = z.zz a20 = a4[a8:a12:a16] a21 = 4 a20 -= a21 a4[a8:a12:a16] = a20 """ self.check_rewrite(source, expected) def test_rewrite_super(self) -> None: # A call to super() with no arguments is very special in Python; it is a syntactic # sugar for a call to super(__class__, self), where self is the leftmost parameter # and __class__ is a magical outer variable automatically initialized to the # declaring class. We will handle "super()" calls specially later; we must # not rewrite them. They must stay just an ordinary call to "super()" # rather than being reduced to the standard form # # a = [] # b = super(*a) # source = """ class D(B): def f(self): super(D, self).g() super().h() """ expected = """ class D(B): def f(self): a12 = [D] a13 = [self] r11 = a12 + a13 a5 = super(*r11) a3 = a5.g r7 = [] r9 = {} u1 = a3(*r7, **r9) a6 = super() a4 = a6.h r8 = [] r10 = {} u2 = a4(*r8, **r10) """ self.check_rewrite(source, expected) def test_matrix_multiply_single_assignment(self) -> None: source = """ def f(x): return x @ (y * z) """ expected = """ def f(x): a2 = y * z r1 = x @ a2 return r1 """ self.check_rewrite(source, expected) def test_lambda_elimination(self) -> None: source = """ def f(x): return lambda y: x * y + 2 """ expected = """ def f(x): def a2(y): a4 = x * y a5 = 2 r3 = a4 + a5 return r3 r1 = a2 return r1 """ self.check_rewrite(source, expected) def test_decorator_elimination(self) -> None: source = """ @x @y(z) def f(): pass """ expected = """ def f(): pass r3 = [z] r6 = {} a1 = y(*r3, **r6) r4 = [f] r7 = {} f = a1(*r4, **r7) r2 = [f] r5 = {} f = x(*r2, **r5) """ self.check_rewrite(source, expected) def test_ifexp_elimination(self) -> None: source = """ x = a + b * c if d + e * f else g + h * i """ expected = """ a3 = e * f r2 = d + a3 if r2: a4 = b * c a1 = a + a4 else: a5 = h * i a1 = g + a5 x = a1 """ self.check_rewrite(source, expected) def test_single_assignment_handle_assign(self) -> None: """Test the rule for removing annotations""" source = """ def f(x): z:float = 1 + a """ expected = """ def f(x): a1 = 1 z = a1 + a """ self.check_rewrite( source, expected, many(_some_top_down(self.s._handle_assign())), )
beanmachine-main
tests/ppl/compiler/single_assignment_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Tests for bm_to_bmg.py""" import unittest import astor import beanmachine.ppl as bm from beanmachine.ppl.compiler.bm_to_bmg import _bm_function_to_bmg_ast from beanmachine.ppl.inference import BMGInference from torch import tensor from torch.distributions import Bernoulli, Dirichlet, Normal class BaseModel: @bm.random_variable def normal(self): return Normal(0.0, 1.0) @bm.functional def foo(self): return self.normal() + 2.0 def bar(self): return 3.0 class DerivedModel(BaseModel): @bm.functional def foo(self): f = super().foo() b = super(DerivedModel, self).bar() return f * b # This should be (n() + 2) * 3 def bar(self): return 4.0 @bm.random_variable def legal_subscript_mutations(): t = tensor([0.0, 0.0]) t[0] = 0.0 t[1] = 1.0 t[0:] = 2.0 t[:1] = 3.0 t[0:1] = 4.0 t[0::] = 5.0 t[:1:] = 6.0 t[::1] = 7.0 t[0:1:] = 8.0 t[0::1] = 9.0 t[:1:1] = 10.0 t[0:1:1] = 11.0 return Dirichlet(t) @bm.random_variable def normal(): return Normal(0.0, 1.0) @bm.random_variable def flip(): return Bernoulli(0.5) @bm.functional def illegal_subscript_mutation_1(): # Mutate a tensor with a stochastic value: t = tensor([0.0, 0.0]) t[0] = normal() return t @bm.functional def illegal_subscript_mutation_2(): # Mutate a stochastic tensor t = legal_subscript_mutations() t[0] = 0.0 return t @bm.functional def illegal_subscript_mutation_3(): # Mutate a tensor with a stochastic index t = tensor([0.0, 0.0]) t[flip()] = 1.0 return t @bm.functional def illegal_subscript_mutation_4(): # Mutate a tensor with a stochastic upper t = tensor([0.0, 0.0]) t[0 : flip()] = 1.0 return t @bm.functional def illegal_subscript_mutation_5(): # Mutate a tensor with a stochastic step t = tensor([0.0, 0.0]) t[0 : 1 : flip() + 1] = 1.0 return t class CompilerTest(unittest.TestCase): def test_super_call(self) -> None: self.maxDiff = None # A call to super() in Python is not a normal function. Consider: def outer(s): return s().x() class B: def x(self): return 1 class D(B): def x(self): return 2 def ordinary(self): return self.x() # 2 def sup1(self): return super().x() # 1 def sup2(self): s = super return s().x() # Doesn't have to be a keyword def callout(self): return outer(super) # but the call to super() needs to be inside D. self.assertEqual(D().ordinary(), 2) self.assertEqual(D().sup1(), 1) self.assertEqual(D().sup2(), 1) # What's happening here is: "super()" is a syntactic sugar for "super(__class__, self)" # where __class__ is an automatically-generated outer variable of the method that # contains the call to super(). That variable has the value of the containing class. # When we call D().callout() here, there is no automatically-generated outer variable # when super() is ultimately called, and therefore we get this confusing but expected # exception raised: with self.assertRaises(RuntimeError) as ex: D().callout() expected = "super(): __class__ cell not found" observed = str(ex.exception) self.assertEqual(expected.strip(), observed.strip()) # bm_to_bmg rewrites all random variables, all functionals, and their callees. # We must ensure that all calls to super() are (1) syntactically exactly that; # these calls must not be rewritten to bmg.handle_call, and (2) must have an # outer variable __class__ which is initialized to the class which originally # declared the random variable. d = DerivedModel() observed = BMGInference().to_dot([d.foo()], {}) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=2.0]; N5[label="+"]; N6[label=3.0]; N7[label="*"]; N8[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N5; N4 -> N5; N5 -> N7; N6 -> N7; N7 -> N8; } """ self.assertEqual(expected.strip(), observed.strip()) # We do this by: # * the single assignment rewriter does not fully rewrite # calls to super to their most general form; in particular # it will not rewrite super() to x = [] / super(*x). # * the bm_to_bmg rewriter does not rewrite calls to super # into bmg.handle_function. # * if the original function has an outer variable __class__ then # we generate a new outer variable with the same name and value. # Obtain the random variable for d.foo() rv = d.foo() # The random variable has a reference to the original *undecorated* # D.foo, which has an outer variable __class__. Verify that we # correctly recreate that outer variable in the rewritten function: bmgast = _bm_function_to_bmg_ast(rv.function, "foo_helper") observed = astor.to_source(bmgast) expected = """ def foo_helper(bmg, __class__): import operator def foo(self): a5 = super() a1 = bmg.handle_dot_get(a5, 'foo') r7 = [] r10 = {} f = bmg.handle_function(a1, r7, r10) a14 = [DerivedModel] a15 = [self] r13 = bmg.handle_function(operator.add, [a14, a15]) a6 = super(*r13) a2 = bmg.handle_dot_get(a6, 'bar') r8 = [] r11 = {} b = bmg.handle_function(a2, r8, r11) r3 = bmg.handle_function(operator.mul, [f, b]) return r3 a4 = bmg.handle_dot_get(bm, 'functional') r9 = [foo] r12 = {} foo = bmg.handle_function(a4, r9, r12) return foo """ self.assertEqual(observed.strip(), expected.strip()) def test_subscript_mutations(self) -> None: self.maxDiff = None observed = BMGInference().to_dot([legal_subscript_mutations()], {}) expected = """ digraph "graph" { N0[label="[11.0,10.0]"]; N1[label=Dirichlet]; N2[label=Sample]; N3[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; }""" self.assertEqual(observed.strip(), expected.strip()) with self.assertRaises(ValueError) as ex: BMGInference().to_dot([illegal_subscript_mutation_1()], {}) # TODO: Better error message expected = ( "Mutating a tensor with a stochastic value " + "is not supported in Bean Machine Graph." ) self.assertEqual(expected, str(ex.exception)) with self.assertRaises(ValueError) as ex: BMGInference().to_dot([illegal_subscript_mutation_2()], {}) # TODO: Better error message expected = "Mutating a stochastic value is not supported in Bean Machine Graph." self.assertEqual(expected, str(ex.exception)) with self.assertRaises(ValueError) as ex: BMGInference().to_dot([illegal_subscript_mutation_3()], {}) # TODO: Better error message expected = ( "Mutating a collection or tensor with a stochastic index " + "is not supported in Bean Machine Graph." ) self.assertEqual(expected, str(ex.exception)) with self.assertRaises(ValueError) as ex: BMGInference().to_dot([illegal_subscript_mutation_4()], {}) # TODO: Better error message expected = ( "Mutating a collection or tensor with a stochastic upper index " + "is not supported in Bean Machine Graph." ) self.assertEqual(expected, str(ex.exception)) with self.assertRaises(ValueError) as ex: BMGInference().to_dot([illegal_subscript_mutation_5()], {}) # TODO: Better error message expected = ( "Mutating a collection or tensor with a stochastic step " + "is not supported in Bean Machine Graph." ) self.assertEqual(expected, str(ex.exception))
beanmachine-main
tests/ppl/compiler/bm_to_bmg_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """End-to-end test of realistic logistic regression model""" import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor from torch.distributions import Bernoulli, Normal # We have N points with K coordinates each classified into one # of two categories: red or blue. There is a line separating # the sets of points; the idea is to deduce the most likely # parameters of that line. Parameters are beta(0), beta(1) # and beta(2); line is y = (-b1/b2) x - (b0/b2). # # We have three parameters to define the line instead of two because # these parameters also define how "mixed" the points are when close # to the line. # Points are generated so that posteriors should be # centered on beta(0) around -1.0, beta(1) around 2.0, # beta(2) around -3.0 N = 8 K = 2 X = [ [1.0000, 7.6483, 5.6988], [1.0000, -6.2928, 1.1692], [1.0000, 1.6583, -4.7142], [1.0000, -7.7588, 7.9859], [1.0000, -1.2421, 5.4628], [1.0000, 6.4529, 2.3994], [1.0000, -4.9269, 7.8679], [1.0000, 4.2130, 2.6175], ] # Classifications of those N points into two buckets: red = tensor(0.0) blue = tensor(1.0) Y = [red, red, blue, red, red, blue, red, blue] @bm.random_variable def beta(k): # k is 0 to K return Normal(0.0, 1.0) @bm.random_variable def y(n): # n is 0 to N-1 mu = X[n][0] * beta(0) + X[n][1] * beta(1) + X[n][2] * beta(2) return Bernoulli(logits=mu) queries = [beta(0), beta(1), beta(2)] observations = { y(0): Y[0], y(1): Y[1], y(2): Y[2], y(3): Y[3], y(4): Y[4], y(5): Y[5], y(6): Y[6], y(7): Y[7], } expected_dot = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label=Sample]; N06[label=7.6483]; N07[label="*"]; N08[label=5.6988]; N09[label="*"]; N10[label="+"]; N11[label="Bernoulli(logits)"]; N12[label=Sample]; N13[label="Observation False"]; N14[label=-6.2928]; N15[label="*"]; N16[label=1.1692]; N17[label="*"]; N18[label="+"]; N19[label="Bernoulli(logits)"]; N20[label=Sample]; N21[label="Observation False"]; N22[label=1.6583]; N23[label="*"]; N24[label=-4.7142]; N25[label="*"]; N26[label="+"]; N27[label="Bernoulli(logits)"]; N28[label=Sample]; N29[label="Observation True"]; N30[label=-7.7588]; N31[label="*"]; N32[label=7.9859]; N33[label="*"]; N34[label="+"]; N35[label="Bernoulli(logits)"]; N36[label=Sample]; N37[label="Observation False"]; N38[label=-1.2421]; N39[label="*"]; N40[label=5.4628]; N41[label="*"]; N42[label="+"]; N43[label="Bernoulli(logits)"]; N44[label=Sample]; N45[label="Observation False"]; N46[label=6.4529]; N47[label="*"]; N48[label=2.3994]; N49[label="*"]; N50[label="+"]; N51[label="Bernoulli(logits)"]; N52[label=Sample]; N53[label="Observation True"]; N54[label=-4.9269]; N55[label="*"]; N56[label=7.8679]; N57[label="*"]; N58[label="+"]; N59[label="Bernoulli(logits)"]; N60[label=Sample]; N61[label="Observation False"]; N62[label=4.213]; N63[label="*"]; N64[label=2.6175]; N65[label="*"]; N66[label="+"]; N67[label="Bernoulli(logits)"]; N68[label=Sample]; N69[label="Observation True"]; N70[label=Query]; N71[label=Query]; N72[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N02 -> N04; N02 -> N05; N03 -> N10; N03 -> N18; N03 -> N26; N03 -> N34; N03 -> N42; N03 -> N50; N03 -> N58; N03 -> N66; N03 -> N70; N04 -> N07; N04 -> N15; N04 -> N23; N04 -> N31; N04 -> N39; N04 -> N47; N04 -> N55; N04 -> N63; N04 -> N71; N05 -> N09; N05 -> N17; N05 -> N25; N05 -> N33; N05 -> N41; N05 -> N49; N05 -> N57; N05 -> N65; N05 -> N72; N06 -> N07; N07 -> N10; N08 -> N09; N09 -> N10; N10 -> N11; N11 -> N12; N12 -> N13; N14 -> N15; N15 -> N18; N16 -> N17; N17 -> N18; N18 -> N19; N19 -> N20; N20 -> N21; N22 -> N23; N23 -> N26; N24 -> N25; N25 -> N26; N26 -> N27; N27 -> N28; N28 -> N29; N30 -> N31; N31 -> N34; N32 -> N33; N33 -> N34; N34 -> N35; N35 -> N36; N36 -> N37; N38 -> N39; N39 -> N42; N40 -> N41; N41 -> N42; N42 -> N43; N43 -> N44; N44 -> N45; N46 -> N47; N47 -> N50; N48 -> N49; N49 -> N50; N50 -> N51; N51 -> N52; N52 -> N53; N54 -> N55; N55 -> N58; N56 -> N57; N57 -> N58; N58 -> N59; N59 -> N60; N60 -> N61; N62 -> N63; N63 -> N66; N64 -> N65; N65 -> N66; N66 -> N67; N67 -> N68; N68 -> N69; } """ class LogisticRegressionTest(unittest.TestCase): def test_logistic_regression_inference(self) -> None: self.maxDiff = None bmg = BMGInference() samples = bmg.infer(queries, observations, 1000) b0 = samples[beta(0)].mean() b1 = samples[beta(1)].mean() b2 = samples[beta(2)].mean() slope_ob = -b1 / b2 int_ob = -b0 / b2 slope_ex = 0.64 # Should be 0.67 int_ex = 0.16 # Should be -0.33; reasonable guess given thin data self.assertAlmostEqual(first=slope_ob, second=slope_ex, delta=0.05) self.assertAlmostEqual(first=int_ob, second=int_ex, delta=0.05) def test_logistic_regression_to_dot(self) -> None: self.maxDiff = None bmg = BMGInference() observed = bmg.to_dot(queries, observations) self.assertEqual(expected_dot.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/logistic_regression_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Tests for error_report.py""" import unittest from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.bmg_types import NegativeReal, Probability from beanmachine.ppl.compiler.error_report import ErrorReport, Violation class ErrorReportTest(unittest.TestCase): def test_error_report(self) -> None: """test_error_report""" bmg = BMGraphBuilder() r = bmg.add_real(-2.5) b = bmg.add_bernoulli(r) v = Violation(r, NegativeReal, Probability, b, "probability", {}) e = ErrorReport() e.add_error(v) expected = """ The probability of a Bernoulli is required to be a probability but is a negative real.""" self.assertEqual(expected.strip(), str(e).strip())
beanmachine-main
tests/ppl/compiler/error_report_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference import BMGInference from torch import tensor from torch.distributions import ( Bernoulli, Beta, Gamma, HalfCauchy, Normal, StudentT, Uniform, ) @bm.random_variable def beta(n): return Beta(2.0, 2.0) @bm.random_variable def flip_beta(): return Bernoulli(tensor([beta(0), beta(1)])) @bm.random_variable def beta_2_2(): return Beta(2.0, tensor([3.0, 4.0])) @bm.random_variable def flip_beta_2_2(): return Bernoulli(beta_2_2()) @bm.random_variable def uniform_2_2(): return Uniform(0.0, tensor([1.0, 1.0])) @bm.random_variable def flip_uniform_2_2(): return Bernoulli(uniform_2_2()) @bm.random_variable def flip_logits(): return Bernoulli(logits=tensor([beta(0), beta(1)])) @bm.random_variable def flip_const(): return Bernoulli(tensor([0.25, 0.75])) @bm.random_variable def flip_const_4(): return Bernoulli(tensor([0.25, 0.75, 0.5, 0.5])) @bm.random_variable def flip_const_2_3(): return Bernoulli(tensor([[0.25, 0.75, 0.5], [0.125, 0.875, 0.625]])) @bm.random_variable def normal_2_3(): mus = flip_const_2_3() # 2 x 3 tensor of 0 or 1 sigmas = tensor([2.0, 3.0, 4.0]) return Normal(mus, sigmas) @bm.random_variable def hc_3(): return HalfCauchy(tensor([1.0, 2.0, 3.0])) @bm.random_variable def studentt_2_3(): return StudentT(hc_3(), normal_2_3(), hc_3()) @bm.functional def operators(): # Note that we do NOT devectorize the multiplication; it gets # turned into a MatrixScale. phi = Normal(0, 1).cdf return phi(((beta_2_2() + tensor([[5.0, 6.0], [7.0, 8.0]])) * 10.0).exp()) @bm.functional def multiplication(): return beta_2_2() * tensor([5.0, 6.0]) @bm.functional def complement_with_log1p(): return (-beta_2_2()).log1p() @bm.random_variable def beta1234(): return Beta(tensor([1.0, 2.0]), tensor([3.0, 4.0])) @bm.functional def sum_inverted_log_probs(): p = tensor([5.0, 6.0]) * (-beta1234()).log1p() return p.sum() @bm.random_variable def gamma(): return Gamma(1, 1) @bm.functional def normal_log_probs(): mu = tensor([5.0, 6.0]) x = tensor([7.0, 8.0]) return Normal(mu, gamma()).log_prob(x) class FixVectorizedModelsTest(unittest.TestCase): def test_fix_vectorized_models_1(self) -> None: self.maxDiff = None observations = {flip_beta(): tensor([0.0, 1.0])} queries = [flip_beta(), flip_const()] observed = BMGInference().to_dot(queries, observations, after_transform=False) # The model before the rewrite: expected = """ digraph "graph" { N00[label=2.0]; N01[label=Beta]; N02[label=Sample]; N03[label=Sample]; N04[label=Tensor]; N05[label=Bernoulli]; N06[label=Sample]; N07[label="Observation tensor([0., 1.])"]; N08[label=Query]; N09[label="[0.25,0.75]"]; N10[label=Bernoulli]; N11[label=Sample]; N12[label=Query]; N00 -> N01; N00 -> N01; N01 -> N02; N01 -> N03; N02 -> N04; N03 -> N04; N04 -> N05; N05 -> N06; N06 -> N07; N06 -> N08; N09 -> N10; N10 -> N11; N11 -> N12; } """ self.assertEqual(expected.strip(), observed.strip()) # After: observed = BMGInference().to_dot(queries, observations, after_transform=True) expected = """ digraph "graph" { N00[label=2.0]; N01[label=Beta]; N02[label=Sample]; N03[label=Sample]; N04[label=Bernoulli]; N05[label=Sample]; N06[label=Bernoulli]; N07[label=Sample]; N08[label="Observation False"]; N09[label="Observation True"]; N10[label=2]; N11[label=1]; N12[label=ToMatrix]; N13[label=Query]; N14[label=0.25]; N15[label=Bernoulli]; N16[label=Sample]; N17[label=0.75]; N18[label=Bernoulli]; N19[label=Sample]; N20[label=ToMatrix]; N21[label=Query]; N00 -> N01; N00 -> N01; N01 -> N02; N01 -> N03; N02 -> N04; N03 -> N06; N04 -> N05; N05 -> N08; N05 -> N12; N06 -> N07; N07 -> N09; N07 -> N12; N10 -> N12; N10 -> N20; N11 -> N12; N11 -> N20; N12 -> N13; N14 -> N15; N15 -> N16; N16 -> N20; N17 -> N18; N18 -> N19; N19 -> N20; N20 -> N21; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fix_vectorized_models_2(self) -> None: self.maxDiff = None observations = {flip_const_4(): tensor([0.0, 1.0, 0.0, 1.0])} queries = [flip_const_4()] observed = BMGInference().to_dot(queries, observations, after_transform=False) # The model before the rewrite: expected = """ digraph "graph" { N0[label="[0.25,0.75,0.5,0.5]"]; N1[label=Bernoulli]; N2[label=Sample]; N3[label="Observation tensor([0., 1., 0., 1.])"]; N4[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; N2 -> N4; } """ self.assertEqual(expected.strip(), observed.strip()) # After: # Note that due to the order in which we do the rewriting we # end up with a not-deduplicated Bernoulli(0.5) node here, which # is slightly unfortunate but probably not worth fixing right now. observed = BMGInference().to_dot(queries, observations, after_transform=True) expected = """ digraph "graph" { N00[label=0.25]; N01[label=Bernoulli]; N02[label=Sample]; N03[label=0.75]; N04[label=Bernoulli]; N05[label=Sample]; N06[label=0.5]; N07[label=Bernoulli]; N08[label=Sample]; N09[label=Bernoulli]; N10[label=Sample]; N11[label="Observation False"]; N12[label="Observation True"]; N13[label="Observation False"]; N14[label="Observation True"]; N15[label=4]; N16[label=1]; N17[label=ToMatrix]; N18[label=Query]; N00 -> N01; N01 -> N02; N02 -> N11; N02 -> N17; N03 -> N04; N04 -> N05; N05 -> N12; N05 -> N17; N06 -> N07; N06 -> N09; N07 -> N08; N08 -> N13; N08 -> N17; N09 -> N10; N10 -> N14; N10 -> N17; N15 -> N17; N16 -> N17; N17 -> N18; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fix_vectorized_models_3(self) -> None: self.maxDiff = None observations = {flip_const_2_3(): tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])} queries = [flip_const_2_3()] observed = BMGInference().to_dot(queries, observations, after_transform=False) # The model before the rewrite: expected = """ digraph "graph" { N0[label="[[0.25,0.75,0.5],\\\\n[0.125,0.875,0.625]]"]; N1[label=Bernoulli]; N2[label=Sample]; N3[label="Observation tensor([[0., 0., 0.],\\n [1., 1., 1.]])"]; N4[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; N2 -> N4; } """ self.assertEqual(expected.strip(), observed.strip()) # After: observed = BMGInference().to_dot(queries, observations, after_transform=True) expected = """ digraph "graph" { N00[label=0.25]; N01[label=Bernoulli]; N02[label=Sample]; N03[label=0.75]; N04[label=Bernoulli]; N05[label=Sample]; N06[label=0.5]; N07[label=Bernoulli]; N08[label=Sample]; N09[label=0.125]; N10[label=Bernoulli]; N11[label=Sample]; N12[label=0.875]; N13[label=Bernoulli]; N14[label=Sample]; N15[label=0.625]; N16[label=Bernoulli]; N17[label=Sample]; N18[label="Observation False"]; N19[label="Observation False"]; N20[label="Observation False"]; N21[label="Observation True"]; N22[label="Observation True"]; N23[label="Observation True"]; N24[label=3]; N25[label=2]; N26[label=ToMatrix]; N27[label=Query]; N00 -> N01; N01 -> N02; N02 -> N18; N02 -> N26; N03 -> N04; N04 -> N05; N05 -> N19; N05 -> N26; N06 -> N07; N07 -> N08; N08 -> N20; N08 -> N26; N09 -> N10; N10 -> N11; N11 -> N21; N11 -> N26; N12 -> N13; N13 -> N14; N14 -> N22; N14 -> N26; N15 -> N16; N16 -> N17; N17 -> N23; N17 -> N26; N24 -> N26; N25 -> N26; N26 -> N27; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fix_vectorized_models_4(self) -> None: # Demonstrate we can also do devectorizations on logits-style Bernoullis. # (A logits Bernoulli with a beta prior is a likely mistake in a real model, # but it is a convenient test case.) self.maxDiff = None observations = {} queries = [flip_logits()] observed = BMGInference().to_dot(queries, observations, after_transform=False) # The model before the rewrite: expected = """ digraph "graph" { N0[label=2.0]; N1[label=Beta]; N2[label=Sample]; N3[label=Sample]; N4[label=Tensor]; N5[label="Bernoulli(logits)"]; N6[label=Sample]; N7[label=Query]; N0 -> N1; N0 -> N1; N1 -> N2; N1 -> N3; N2 -> N4; N3 -> N4; N4 -> N5; N5 -> N6; N6 -> N7; } """ self.assertEqual(expected.strip(), observed.strip()) # After: observed = BMGInference().to_dot(queries, observations, after_transform=True) expected = """ digraph "graph" { N00[label=2.0]; N01[label=Beta]; N02[label=Sample]; N03[label=Sample]; N04[label=ToReal]; N05[label="Bernoulli(logits)"]; N06[label=Sample]; N07[label=ToReal]; N08[label="Bernoulli(logits)"]; N09[label=Sample]; N10[label=2]; N11[label=1]; N12[label=ToMatrix]; N13[label=Query]; N00 -> N01; N00 -> N01; N01 -> N02; N01 -> N03; N02 -> N04; N03 -> N07; N04 -> N05; N05 -> N06; N06 -> N12; N07 -> N08; N08 -> N09; N09 -> N12; N10 -> N12; N11 -> N12; N12 -> N13; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fix_vectorized_models_5(self) -> None: self.maxDiff = None observations = {} queries = [studentt_2_3()] observed = BMGInference().to_dot(queries, observations, after_transform=False) # The model before the rewrite. Note that we have a size[3] stochastic input and # a size[2, 3] stochastic input to the StudentT, and we broadcast the three # HalfCauchy samples correctly expected = """ digraph "graph" { N00[label="[1.0,2.0,3.0]"]; N01[label=HalfCauchy]; N02[label=Sample]; N03[label="[[0.25,0.75,0.5],\\\\n[0.125,0.875,0.625]]"]; N04[label=Bernoulli]; N05[label=Sample]; N06[label="[2.0,3.0,4.0]"]; N07[label=Normal]; N08[label=Sample]; N09[label=StudentT]; N10[label=Sample]; N11[label=Query]; N00 -> N01; N01 -> N02; N02 -> N09; N02 -> N09; N03 -> N04; N04 -> N05; N05 -> N07; N06 -> N07; N07 -> N08; N08 -> N09; N09 -> N10; N10 -> N11; } """ self.assertEqual(expected.strip(), observed.strip()) # After: observed = BMGInference().to_dot(queries, observations, after_transform=True) expected = """ digraph "graph" { N00[label=1.0]; N01[label=HalfCauchy]; N02[label=Sample]; N03[label=2.0]; N04[label=HalfCauchy]; N05[label=Sample]; N06[label=3.0]; N07[label=HalfCauchy]; N08[label=Sample]; N09[label=0.25]; N10[label=Bernoulli]; N11[label=Sample]; N12[label=0.75]; N13[label=Bernoulli]; N14[label=Sample]; N15[label=0.5]; N16[label=Bernoulli]; N17[label=Sample]; N18[label=0.125]; N19[label=Bernoulli]; N20[label=Sample]; N21[label=0.875]; N22[label=Bernoulli]; N23[label=Sample]; N24[label=0.625]; N25[label=Bernoulli]; N26[label=Sample]; N27[label=ToReal]; N28[label=Normal]; N29[label=Sample]; N30[label=ToReal]; N31[label=Normal]; N32[label=Sample]; N33[label=ToReal]; N34[label=4.0]; N35[label=Normal]; N36[label=Sample]; N37[label=ToReal]; N38[label=Normal]; N39[label=Sample]; N40[label=ToReal]; N41[label=Normal]; N42[label=Sample]; N43[label=ToReal]; N44[label=Normal]; N45[label=Sample]; N46[label=StudentT]; N47[label=Sample]; N48[label=StudentT]; N49[label=Sample]; N50[label=StudentT]; N51[label=Sample]; N52[label=StudentT]; N53[label=Sample]; N54[label=StudentT]; N55[label=Sample]; N56[label=StudentT]; N57[label=Sample]; N58[label=3]; N59[label=2]; N60[label=ToMatrix]; N61[label=Query]; N00 -> N01; N01 -> N02; N02 -> N46; N02 -> N46; N02 -> N52; N02 -> N52; N03 -> N04; N03 -> N28; N03 -> N38; N04 -> N05; N05 -> N48; N05 -> N48; N05 -> N54; N05 -> N54; N06 -> N07; N06 -> N31; N06 -> N41; N07 -> N08; N08 -> N50; N08 -> N50; N08 -> N56; N08 -> N56; N09 -> N10; N10 -> N11; N11 -> N27; N12 -> N13; N13 -> N14; N14 -> N30; N15 -> N16; N16 -> N17; N17 -> N33; N18 -> N19; N19 -> N20; N20 -> N37; N21 -> N22; N22 -> N23; N23 -> N40; N24 -> N25; N25 -> N26; N26 -> N43; N27 -> N28; N28 -> N29; N29 -> N46; N30 -> N31; N31 -> N32; N32 -> N48; N33 -> N35; N34 -> N35; N34 -> N44; N35 -> N36; N36 -> N50; N37 -> N38; N38 -> N39; N39 -> N52; N40 -> N41; N41 -> N42; N42 -> N54; N43 -> N44; N44 -> N45; N45 -> N56; N46 -> N47; N47 -> N60; N48 -> N49; N49 -> N60; N50 -> N51; N51 -> N60; N52 -> N53; N53 -> N60; N54 -> N55; N55 -> N60; N56 -> N57; N57 -> N60; N58 -> N60; N59 -> N60; N60 -> N61; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fix_vectorized_models_6(self) -> None: self.maxDiff = None observations = {} queries = [flip_beta_2_2(), flip_uniform_2_2()] observed = BMGInference().to_dot(queries, observations, after_transform=False) # The model before the rewrite: notice that here torch automatically # broadcast the 2.0 to [2.0, 2.0] for us when the node was accumulated, # and similarly for 0.0. expected = """ digraph "graph" { N00[label="[2.0,2.0]"]; N01[label="[3.0,4.0]"]; N02[label=Beta]; N03[label=Sample]; N04[label=Bernoulli]; N05[label=Sample]; N06[label=Query]; N07[label="[0.0,0.0]"]; N08[label="[1.0,1.0]"]; N09[label=Uniform]; N10[label=Sample]; N11[label=Bernoulli]; N12[label=Sample]; N13[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N03 -> N04; N04 -> N05; N05 -> N06; N07 -> N09; N08 -> N09; N09 -> N10; N10 -> N11; N11 -> N12; N12 -> N13; } """ self.assertEqual(expected.strip(), observed.strip()) # After: notice that we correctly generate two samples from a Flat distribution # here. observed = BMGInference().to_dot(queries, observations, after_transform=True) expected = """ digraph "graph" { N00[label=2.0]; N01[label=3.0]; N02[label=Beta]; N03[label=Sample]; N04[label=4.0]; N05[label=Beta]; N06[label=Sample]; N07[label=Bernoulli]; N08[label=Sample]; N09[label=Bernoulli]; N10[label=Sample]; N11[label=2]; N12[label=1]; N13[label=ToMatrix]; N14[label=Query]; N15[label=Flat]; N16[label=Sample]; N17[label=Sample]; N18[label=Bernoulli]; N19[label=Sample]; N20[label=Bernoulli]; N21[label=Sample]; N22[label=ToMatrix]; N23[label=Query]; N00 -> N02; N00 -> N05; N01 -> N02; N02 -> N03; N03 -> N07; N04 -> N05; N05 -> N06; N06 -> N09; N07 -> N08; N08 -> N13; N09 -> N10; N10 -> N13; N11 -> N13; N11 -> N22; N12 -> N13; N12 -> N22; N13 -> N14; N15 -> N16; N15 -> N17; N16 -> N18; N17 -> N20; N18 -> N19; N19 -> N22; N20 -> N21; N21 -> N22; N22 -> N23; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fix_vectorized_models_7(self) -> None: self.maxDiff = None observations = {} queries = [operators()] observed = BMGInference().to_dot(queries, observations, after_transform=False) # The model before the rewrite: expected = """ digraph "graph" { N00[label="[2.0,2.0]"]; N01[label="[3.0,4.0]"]; N02[label=Beta]; N03[label=Sample]; N04[label="[[5.0,6.0],\\\\n[7.0,8.0]]"]; N05[label="+"]; N06[label=10.0]; N07[label="*"]; N08[label=Exp]; N09[label=Phi]; N10[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N03 -> N05; N04 -> N05; N05 -> N07; N06 -> N07; N07 -> N08; N08 -> N09; N09 -> N10; } """ self.assertEqual(expected.strip(), observed.strip()) # Verify that it works in BMG. g, _ = BMGInference().to_graph(queries, observations) observed = g.to_dot() expected = """ digraph "graph" { N0[label="2"]; N1[label="3"]; N2[label="Beta"]; N3[label="~"]; N4[label="4"]; N5[label="Beta"]; N6[label="~"]; N7[label="10"]; N8[label="2"]; N9[label="1"]; N10[label="ToMatrix"]; N11[label="ToPosReal"]; N12[label="Broadcast"]; N13[label="matrix"]; N14[label="MatrixAdd"]; N15[label="MatrixScale"]; N16[label="MatrixExp"]; N17[label="ToReal"]; N18[label="MatrixPhi"]; N0 -> N2; N0 -> N5; N1 -> N2; N2 -> N3; N3 -> N10; N4 -> N5; N5 -> N6; N6 -> N10; N7 -> N15; N8 -> N10; N8 -> N12; N8 -> N12; N9 -> N10; N10 -> N11; N11 -> N12; N12 -> N14; N13 -> N14; N14 -> N15; N15 -> N16; N16 -> N17; N17 -> N18; Q0[label="Query"]; N18 -> Q0; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fix_vectorized_models_8(self) -> None: self.maxDiff = None observations = {} queries = [multiplication()] observed = BMGInference().to_dot(queries, observations, after_transform=False) # The model before the rewrite: expected = """ digraph "graph" { N0[label="[2.0,2.0]"]; N1[label="[3.0,4.0]"]; N2[label=Beta]; N3[label=Sample]; N4[label="[5.0,6.0]"]; N5[label="*"]; N6[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N5; N4 -> N5; N5 -> N6; } """ self.assertEqual(expected.strip(), observed.strip()) # After: observed = BMGInference().to_dot(queries, observations, after_transform=True) expected = """ digraph "graph" { N00[label=2.0]; N01[label=3.0]; N02[label=Beta]; N03[label=Sample]; N04[label=4.0]; N05[label=Beta]; N06[label=Sample]; N07[label=2]; N08[label=1]; N09[label=ToMatrix]; N10[label=ToPosRealMatrix]; N11[label="[5.0,6.0]"]; N12[label=ElementwiseMult]; N13[label=Query]; N00 -> N02; N00 -> N05; N01 -> N02; N02 -> N03; N03 -> N09; N04 -> N05; N05 -> N06; N06 -> N09; N07 -> N09; N08 -> N09; N09 -> N10; N10 -> N12; N11 -> N12; N12 -> N13; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fix_vectorized_models_9(self) -> None: self.maxDiff = None observations = {} queries = [complement_with_log1p()] observed = BMGInference().to_dot(queries, observations, after_transform=False) # The model before the rewrite: expected = """ digraph "graph" { N0[label="[2.0,2.0]"]; N1[label="[3.0,4.0]"]; N2[label=Beta]; N3[label=Sample]; N4[label="-"]; N5[label=Log1p]; N6[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N4; N4 -> N5; N5 -> N6; } """ self.assertEqual(expected.strip(), observed.strip()) # After: observed = BMGInference().to_dot(queries, observations, after_transform=True) expected = """ digraph "graph" { N00[label=2.0]; N01[label=3.0]; N02[label=Beta]; N03[label=Sample]; N04[label=4.0]; N05[label=Beta]; N06[label=Sample]; N07[label=2]; N08[label=1]; N09[label=complement]; N10[label=Log]; N11[label=complement]; N12[label=Log]; N13[label=ToMatrix]; N14[label=Query]; N00 -> N02; N00 -> N05; N01 -> N02; N02 -> N03; N03 -> N09; N04 -> N05; N05 -> N06; N06 -> N11; N07 -> N13; N08 -> N13; N09 -> N10; N10 -> N13; N11 -> N12; N12 -> N13; N13 -> N14; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fix_vectorized_models_10(self) -> None: self.maxDiff = None queries = [sum_inverted_log_probs()] observations = {} observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N00[label=1.0]; N01[label=3.0]; N02[label=Beta]; N03[label=Sample]; N04[label=2.0]; N05[label=4.0]; N06[label=Beta]; N07[label=Sample]; N08[label="[5.0,6.0]"]; N09[label=2]; N10[label=1]; N11[label=complement]; N12[label=Log]; N13[label=complement]; N14[label=Log]; N15[label=ToMatrix]; N16[label=ToRealMatrix]; N17[label=ElementwiseMult]; N18[label=MatrixSum]; N19[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N03 -> N11; N04 -> N06; N05 -> N06; N06 -> N07; N07 -> N13; N08 -> N17; N09 -> N15; N10 -> N15; N11 -> N12; N12 -> N15; N13 -> N14; N14 -> N15; N15 -> N16; N16 -> N17; N17 -> N18; N18 -> N19; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fix_vectorized_models_11(self) -> None: self.maxDiff = None queries = [normal_log_probs()] observations = {} observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N00[label=1.0]; N01[label=Gamma]; N02[label=Sample]; N03[label=2]; N04[label=1]; N05[label=5.0]; N06[label=Normal]; N07[label=7.0]; N08[label=LogProb]; N09[label=6.0]; N10[label=Normal]; N11[label=8.0]; N12[label=LogProb]; N13[label=ToMatrix]; N14[label=Query]; N00 -> N01; N00 -> N01; N01 -> N02; N02 -> N06; N02 -> N10; N03 -> N13; N04 -> N13; N05 -> N06; N06 -> N08; N07 -> N08; N08 -> N13; N09 -> N10; N10 -> N12; N11 -> N12; N12 -> N13; N13 -> N14; } """ self.assertEqual(observed.strip(), expected.strip())
beanmachine-main
tests/ppl/compiler/fix_vectorized_models_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Tests for bm_graph_builder.py""" import unittest from typing import Any import torch from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.gen_bmg_cpp import to_bmg_cpp from beanmachine.ppl.compiler.gen_bmg_graph import to_bmg_graph from beanmachine.ppl.compiler.gen_bmg_python import to_bmg_python from beanmachine.ppl.compiler.gen_dot import to_dot from beanmachine.ppl.compiler.runtime import BMGRuntime from beanmachine.ppl.model.rv_identifier import RVIdentifier from torch import Tensor def tidy(s: str) -> str: return "\n".join(c.strip() for c in s.strip().split("\n")).strip() def tensor_equality(x: Tensor, y: Tensor) -> bool: # Tensor equality is weird. Suppose x and y are both # tensor([1.0, 2.0]). Then x.eq(y) is tensor([True, True]), # and x.eq(y).all() is tensor(True). return bool(x.eq(y).all()) class BMGraphBuilderTest(unittest.TestCase): def assertEqual(self, x: Any, y: Any) -> bool: if isinstance(x, Tensor) and isinstance(y, Tensor): return tensor_equality(x, y) return super().assertEqual(x, y) def test_graph_builder_1(self) -> None: # Just a trivial model to test whether we can take a properly-typed # accumulated graph and turn it into BMG, DOT, or a program that # produces a BMG. # # @random_variable def flip(): return Bernoulli(0.5) # @functional def mult(): return (-flip() + 2) * 2 bmg = BMGraphBuilder() half = bmg.add_probability(0.5) two = bmg.add_real(2) flip = bmg.add_bernoulli(half) samp = bmg.add_sample(flip) real = bmg.add_to_real(samp) neg = bmg.add_negate(real) add = bmg.add_addition(two, neg) mult = bmg.add_multiplication(two, add) bmg.add_observation(samp, True) bmg.add_query(mult, RVIdentifier(wrapper=lambda a, b: a, arguments=(1, 1))) observed = to_dot(bmg, label_edges=False) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label="Observation True"]; N4[label=2]; N5[label=ToReal]; N6[label="-"]; N7[label="+"]; N8[label="*"]; N9[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; N2 -> N5; N4 -> N7; N4 -> N8; N5 -> N6; N6 -> N7; N7 -> N8; N8 -> N9; }""" self.maxDiff = None self.assertEqual(expected.strip(), observed.strip()) g = to_bmg_graph(bmg).graph observed = g.to_string() expected = """ 0: CONSTANT(probability 0.5) (out nodes: 1) 1: BERNOULLI(0) (out nodes: 2) 2: SAMPLE(1) (out nodes: 4) observed to be boolean 1 3: CONSTANT(real 2) (out nodes: 6, 7) 4: TO_REAL(2) (out nodes: 5) 5: NEGATE(4) (out nodes: 6) 6: ADD(3, 5) (out nodes: 7) 7: MULTIPLY(3, 6) (out nodes: ) queried """ self.assertEqual(tidy(expected), tidy(observed)) observed = to_bmg_python(bmg).code expected = """ from beanmachine import graph from torch import tensor g = graph.Graph() n0 = g.add_constant_probability(0.5) n1 = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [n0], ) n2 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) g.observe(n2, True) n3 = g.add_constant_real(2.0) n4 = g.add_operator(graph.OperatorType.TO_REAL, [n2]) n5 = g.add_operator(graph.OperatorType.NEGATE, [n4]) n6 = g.add_operator(graph.OperatorType.ADD, [n3, n5]) n7 = g.add_operator(graph.OperatorType.MULTIPLY, [n3, n6]) q0 = g.query(n7) """ self.assertEqual(expected.strip(), observed.strip()) observed = to_bmg_cpp(bmg).code expected = """ graph::Graph g; uint n0 = g.add_constant_probability(0.5); uint n1 = g.add_distribution( graph::DistributionType::BERNOULLI, graph::AtomicType::BOOLEAN, std::vector<uint>({n0})); uint n2 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); g.observe(n2, true); uint n3 = g.add_constant_real(2.0); uint n4 = g.add_operator( graph::OperatorType::TO_REAL, std::vector<uint>({n2})); uint n5 = g.add_operator( graph::OperatorType::NEGATE, std::vector<uint>({n4})); uint n6 = g.add_operator( graph::OperatorType::ADD, std::vector<uint>({n3, n5})); uint n7 = g.add_operator( graph::OperatorType::MULTIPLY, std::vector<uint>({n3, n6})); uint q0 = g.query(n7); """ self.assertEqual(expected.strip(), observed.strip()) def test_graph_builder_2(self) -> None: bmg = BMGraphBuilder() one = bmg.add_pos_real(1) two = bmg.add_pos_real(2) # These should all be folded: four = bmg.add_power(two, two) fourth = bmg.add_division(one, four) flip = bmg.add_bernoulli(fourth) samp = bmg.add_sample(flip) inv = bmg.add_complement(samp) # NOT operation real = bmg.add_to_positive_real(inv) div = bmg.add_division(real, two) p = bmg.add_power(div, two) lg = bmg.add_log(p) bmg.add_query(lg, RVIdentifier(wrapper=lambda a, b: a, arguments=(1, 1))) # Note that the orphan nodes "1" and "4" are not stripped out # by default. If you want them gone, the "after_transform" flag does # a type check and also removes everything that is not an ancestor # of a query or observation. observed = to_dot(bmg, label_edges=False) expected = """ digraph "graph" { N00[label=1]; N01[label=4]; N02[label=0.25]; N03[label=Bernoulli]; N04[label=Sample]; N05[label=complement]; N06[label=ToPosReal]; N07[label=2]; N08[label="/"]; N09[label="**"]; N10[label=Log]; N11[label=Query]; N02 -> N03; N03 -> N04; N04 -> N05; N05 -> N06; N06 -> N08; N07 -> N08; N07 -> N09; N08 -> N09; N09 -> N10; N10 -> N11; } """ self.maxDiff = None self.assertEqual(expected.strip(), observed.strip()) g = to_bmg_graph(bmg).graph observed = g.to_string() # Here however the orphaned nodes are never added to the graph. expected = """ 0: CONSTANT(probability 0.25) (out nodes: 1) 1: BERNOULLI(0) (out nodes: 2) 2: SAMPLE(1) (out nodes: 3) 3: COMPLEMENT(2) (out nodes: 4) 4: TO_POS_REAL(3) (out nodes: 6) 5: CONSTANT(positive real 0.5) (out nodes: 6) 6: MULTIPLY(4, 5) (out nodes: 8) 7: CONSTANT(positive real 2) (out nodes: 8) 8: POW(6, 7) (out nodes: 9) 9: LOG(8) (out nodes: ) queried """ self.assertEqual(tidy(expected), tidy(observed)) def test_to_positive_real(self) -> None: """Test to_positive_real""" bmg = BMGraphBuilder() two = bmg.add_pos_real(2.0) # to_positive_real on a positive real constant is an identity self.assertEqual(bmg.add_to_positive_real(two), two) beta22 = bmg.add_beta(two, two) to_pr = bmg.add_to_positive_real(beta22) # to_positive_real nodes are deduplicated self.assertEqual(bmg.add_to_positive_real(beta22), to_pr) def test_to_probability(self) -> None: """Test to_probability""" bmg = BMGraphBuilder() h = bmg.add_probability(0.5) # to_probability on a prob constant is an identity self.assertEqual(bmg.add_to_probability(h), h) # We have (hc / (0.5 + hc)) which is always between # 0 and 1, but the quotient of two positive reals # is a positive real. Force it to be a probability. hc = bmg.add_halfcauchy(h) s = bmg.add_addition(hc, h) q = bmg.add_division(hc, s) to_p = bmg.add_to_probability(q) # to_probability nodes are deduplicated self.assertEqual(bmg.add_to_probability(q), to_p) def test_if_then_else(self) -> None: self.maxDiff = None bmg = BMGraphBuilder() p = bmg.add_constant(0.5) z = bmg.add_constant(0.0) o = bmg.add_constant(1.0) b = bmg.add_bernoulli(p) s = bmg.add_sample(b) bmg.add_if_then_else(s, o, z) observed = to_dot(bmg) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=1.0]; N4[label=0.0]; N5[label=if]; N0 -> N1[label=probability]; N1 -> N2[label=operand]; N2 -> N5[label=condition]; N3 -> N5[label=consequence]; N4 -> N5[label=alternative]; }""" self.assertEqual(expected.strip(), observed.strip()) def test_allowed_functions(self) -> None: bmg = BMGRuntime() p = bmg._bmg.add_constant(0.5) b = bmg._bmg.add_bernoulli(p) s = bmg._bmg.add_sample(b) d = bmg.handle_function(dict, [[(1, s)]]) self.assertEqual(d, {1: s}) def test_add_tensor(self) -> None: bmg = BMGraphBuilder() p = bmg.add_constant(0.5) b = bmg.add_bernoulli(p) s = bmg.add_sample(b) # Tensors are deduplicated t1 = bmg.add_tensor(torch.Size([3]), s, s, p) t2 = bmg.add_tensor(torch.Size([3]), *[s, s, p]) self.assertTrue(t1 is t2) observed = to_dot(bmg) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=Tensor]; N0 -> N1[label=probability]; N0 -> N3[label=2]; N1 -> N2[label=operand]; N2 -> N3[label=0]; N2 -> N3[label=1]; } """ self.assertEqual(observed.strip(), expected.strip()) def test_remove_leaf_from_builder(self) -> None: bmg = BMGraphBuilder() p = bmg.add_constant(0.5) b = bmg.add_bernoulli(p) s = bmg.add_sample(b) o = bmg.add_observation(s, True) observed = to_dot(bmg) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label="Observation True"]; N0 -> N1[label=probability]; N1 -> N2[label=operand]; N2 -> N3[label=operand]; } """ self.assertEqual(observed.strip(), expected.strip()) with self.assertRaises(ValueError): # Not a leaf bmg.remove_leaf(s) bmg.remove_leaf(o) observed = to_dot(bmg) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N0 -> N1[label=probability]; N1 -> N2[label=operand]; } """ self.assertEqual(observed.strip(), expected.strip()) # Is a leaf now. bmg.remove_leaf(s) observed = to_dot(bmg) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N0 -> N1[label=probability]; } """ self.assertEqual(observed.strip(), expected.strip())
beanmachine-main
tests/ppl/compiler/bm_graph_builder_test.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # LKJ Cholesky compiler tests import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference import BMGInference from torch.distributions import HalfNormal, LKJCholesky @bm.random_variable def lkj_chol_1(): return LKJCholesky(3, 2.0) @bm.random_variable def lkj_chol_2(): # Distribution created in random variable, named argument return LKJCholesky(concentration=2.0, dim=3) @bm.random_variable def half_normal(): return HalfNormal(1.0) @bm.random_variable def lkj_chol_3(): # Distribution parameterized by another rv return LKJCholesky(3, half_normal()) @bm.random_variable def bad_lkj_chol_1(): # LKJ Cholesky must have dimension at least 2 return LKJCholesky(1, half_normal()) @bm.random_variable def bad_lkj_chol_2(): # LKJ Cholesky must have natural dimension return LKJCholesky(3.5, half_normal()) @bm.random_variable def bad_lkj_chol_3(): # LKJ Cholesky must have a positive concentration value return LKJCholesky(3, -2.0) class LKJCholeskyTest(unittest.TestCase): expected_simple_case = """ digraph "graph" { N0[label=3]; N1[label=2.0]; N2[label=LKJCholesky]; N3[label=Sample]; N4[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N4; } """.strip() expected_random_parameter_case = """ digraph "graph" { N0[label=1.0]; N1[label=HalfNormal]; N2[label=Sample]; N3[label=3]; N4[label=LKJCholesky]; N5[label=Sample]; N6[label=Query]; N0 -> N1; N1 -> N2; N2 -> N4; N3 -> N4; N4 -> N5; N5 -> N6; } """.strip() def test_lkj_chol_1(self) -> None: observed = BMGInference().to_dot([lkj_chol_1()], {}) self.assertEqual(self.expected_simple_case, observed.strip()) def test_lkj_chol_2(self) -> None: queries = [lkj_chol_2()] observed = BMGInference().to_dot(queries, {}) self.assertEqual(self.expected_simple_case, observed.strip()) def test_lkj_chol_3(self) -> None: queries = [lkj_chol_3()] observed = BMGInference().to_dot(queries, {}) self.assertEqual(self.expected_random_parameter_case, observed.strip()) def test_bad_lkj_chol_1(self) -> None: queries = [bad_lkj_chol_1()] self.assertRaises(ValueError, lambda: BMGInference().infer(queries, {}, 1)) def test_bad_lkj_chol_2(self) -> None: queries = [bad_lkj_chol_2()] self.assertRaises(ValueError, lambda: BMGInference().infer(queries, {}, 1)) def test_bad_lkj_chol_3s(self) -> None: queries = [bad_lkj_chol_3()] self.assertRaises(ValueError, lambda: BMGInference().infer(queries, {}, 1))
beanmachine-main
tests/ppl/compiler/lkj_cholesky_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Tests for bm_to_bmg.py""" import math import unittest import astor import beanmachine.ppl as bm from beanmachine.ppl.compiler.bm_to_bmg import ( _bm_function_to_bmg_ast, _bm_function_to_bmg_function, ) from beanmachine.ppl.compiler.bmg_nodes import ExpNode from beanmachine.ppl.compiler.gen_dot import to_dot from beanmachine.ppl.compiler.runtime import BMGRuntime from torch import tensor from torch.distributions import Bernoulli, Beta, Normal def f(x): return math.exp(x) class C: def m(self): return counter = 0 # Random variable that takes an argument @bm.random_variable def norm(n): global counter counter = counter + 1 return Normal(loc=0.0, scale=1.0) # Random variable that takes no argument @bm.random_variable def coin(): return Beta(2.0, 2.0) # Call to random variable inside random variable @bm.random_variable def flip(): return Bernoulli(coin()) # Functional that takes no argument @bm.functional def exp_coin(): return coin().exp() # Functional that takes an ordinary value argument @bm.functional def exp_norm(n): return norm(n).exp() # Functional that takes an graph node argument @bm.functional def exp_coin_2(c): return c.exp() # Ordinary function def add_one(x): return 1 + x # Functional that calls normal, functional, random variable functions @bm.functional def exp_coin_3(): return add_one(exp_coin_2(coin())) @bm.random_variable def coin_with_class(): C().m() f = True while f: f = not f return Beta(2.0, 2.0) @bm.functional def bad_functional_1(): # It's not legal to call a random variable function with # a stochastic value that has infinite support. return norm(coin()) @bm.random_variable def flips(n): return Bernoulli(0.5) @bm.random_variable def norm_ten(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9): return Normal(loc=0.0, scale=1.0) @bm.functional def bad_functional_2(): # There are 1024 possibilities for this call; we give an # error when the control flow is this complex. return norm_ten( flips(0), flips(1), flips(2), flips(3), flips(4), flips(5), flips(6), flips(7), flips(8), flips(9), ) @bm.functional def bad_functional_3(): # Calling rv functions with named arguments is not allowed. return flips(n=1) @bm.functional def bad_functional_4(): # Calling functionals with named arguments is not allowed. return exp_coin_2(c=1) @bm.random_variable def beta(n): return Beta(2.0, 2.0) @bm.functional def beta_tensor_1a(): # What happens if we have two uses of the same RV indexed # with a tensor? return beta(tensor(1)).log() @bm.functional def beta_tensor_1b(): return beta(tensor(1)).exp() observable_side_effect = 0 def cause_side_effect(): global observable_side_effect observable_side_effect = 1 return True @bm.random_variable def assertions_are_removed(): assert cause_side_effect() return Bernoulli(0.5) @bm.random_variable def flip_with_comprehension(): _ = [0 for x in []] return Bernoulli(0.5) @bm.random_variable def flip_with_nested_function(): def x(): return 0.5 x() return Bernoulli(0.5) # Verify that aliased decorator is allowed: myrv = bm.random_variable @myrv def aliased_rv(): return Bernoulli(0.5) # Verify that random variable constructed without explicit decorator is allowed: def some_function(): return Bernoulli(0.5) undecorated_rv = myrv(some_function) # TODO: What if some_function is a lambda instead of a function definition? # TODO: What if the function has outer variables? class JITTest(unittest.TestCase): def test_function_transformation_1(self) -> None: """Unit tests for JIT functions""" self.maxDiff = None # Verify code generation of lifted, nested form of # functions f(x), norm(), above. self.assertTrue(norm.is_random_variable) # TODO: Stop using _bm_function_to_bmg_ast for testing. bmgast = _bm_function_to_bmg_ast(f, "f_helper") observed = astor.to_source(bmgast) expected = """ def f_helper(bmg): import operator def f(x): a2 = bmg.handle_dot_get(math, 'exp') r3 = [x] r4 = {} r1 = bmg.handle_function(a2, r3, r4) return r1 return f""" self.assertEqual(observed.strip(), expected.strip()) bmgast = _bm_function_to_bmg_ast(norm().function, "norm_helper") observed = astor.to_source(bmgast) expected = """ def norm_helper(bmg): import operator def norm(n): global counter a1 = 1 counter = bmg.handle_function(operator.add, [counter, a1]) r4 = [] a9 = 0.0 a8 = dict(loc=a9) a11 = 1.0 a10 = dict(scale=a11) r7 = dict(**a8, **a10) r2 = bmg.handle_function(Normal, r4, r7) return r2 a3 = bmg.handle_dot_get(bm, 'random_variable') r5 = [norm] r6 = {} norm = bmg.handle_function(a3, r5, r6) return norm """ self.assertEqual(observed.strip(), expected.strip()) # * Obtain the lifted version of f. # * Ask the graph builder to transform the rv associated # with norm(0) to a sample node. # * Invoke the lifted f and verify that we accumulate an # exp(sample(normal(0, 1))) node into the graph. bmg = BMGRuntime() lifted_f = _bm_function_to_bmg_function(f, bmg) norm_sample = bmg._rv_to_node(norm(0)) result = lifted_f(norm_sample) self.assertTrue(isinstance(result, ExpNode)) dot = to_dot(bmg._bmg) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=Exp]; N0 -> N2[label=mu]; N1 -> N2[label=sigma]; N2 -> N3[label=operand]; N3 -> N4[label=operand]; } """ self.assertEqual(dot.strip(), expected.strip()) # Verify that we've executed the body of the lifted # norm(n) exactly once. global counter self.assertEqual(counter, 1) # Turning an rv into a node should be idempotent; # the second time, we do not increment the counter. bmg._rv_to_node(norm(0)) self.assertEqual(counter, 1) bmg._rv_to_node(norm(1)) self.assertEqual(counter, 2) bmg._rv_to_node(norm(1)) self.assertEqual(counter, 2) def test_function_transformation_2(self) -> None: """Unit tests for JIT functions""" self.maxDiff = None # We have flip() which calls Bernoulli(coin()). What should happen # here is: # * _rv_to_node jit-compiles flip() and executes the lifted version. # * while executing the lifted flip() we encounter a call to # coin(). We detect that coin is a random variable function, # and call it. # * We now have the RVIdentifier for coin() in hand, which we # then jit-compile in turn, and execute the lifted version. # * That completes the construction of the graph. bmg = BMGRuntime() bmg._rv_to_node(flip()) dot = to_dot(bmg._bmg) expected = """ digraph "graph" { N0[label=2.0]; N1[label=Beta]; N2[label=Sample]; N3[label=Bernoulli]; N4[label=Sample]; N0 -> N1[label=alpha]; N0 -> N1[label=beta]; N1 -> N2[label=operand]; N2 -> N3[label=probability]; N3 -> N4[label=operand]; } """ self.assertEqual(dot.strip(), expected.strip()) def test_function_transformation_3(self) -> None: """Unit tests for JIT functions""" self.maxDiff = None rt = BMGRuntime() queries = [coin(), exp_coin()] observations = {flip(): tensor(1.0)} bmg = rt.accumulate_graph(queries, observations) dot = to_dot(bmg) expected = """ digraph "graph" { N0[label=2.0]; N1[label=Beta]; N2[label=Sample]; N3[label=Bernoulli]; N4[label=Sample]; N5[label="Observation tensor(1.)"]; N6[label=Query]; N7[label=Exp]; N8[label=Query]; N0 -> N1[label=alpha]; N0 -> N1[label=beta]; N1 -> N2[label=operand]; N2 -> N3[label=probability]; N2 -> N6[label=operator]; N2 -> N7[label=operand]; N3 -> N4[label=operand]; N4 -> N5[label=operand]; N7 -> N8[label=operator]; } """ self.assertEqual(dot.strip(), expected.strip()) def test_function_transformation_4(self) -> None: """Unit tests for JIT functions""" self.maxDiff = None rt = BMGRuntime() queries = [exp_norm(0)] observations = {} bmg = rt.accumulate_graph(queries, observations) dot = to_dot(bmg) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=Exp]; N5[label=Query]; N0 -> N2[label=mu]; N1 -> N2[label=sigma]; N2 -> N3[label=operand]; N3 -> N4[label=operand]; N4 -> N5[label=operator]; } """ self.assertEqual(dot.strip(), expected.strip()) def test_function_transformation_5(self) -> None: """Unit tests for JIT functions""" self.maxDiff = None rt = BMGRuntime() queries = [exp_coin_3()] observations = {} bmg = rt.accumulate_graph(queries, observations) dot = to_dot(bmg) # Note that though functional exp_coin_3 calls functional exp_coin_2, # we only get one query node emitted into the graph because the # caller only asked for one query node. expected = """ digraph "graph" { N0[label=1]; N1[label=2.0]; N2[label=Beta]; N3[label=Sample]; N4[label=Exp]; N5[label="+"]; N6[label=Query]; N0 -> N5[label=left]; N1 -> N2[label=alpha]; N1 -> N2[label=beta]; N2 -> N3[label=operand]; N3 -> N4[label=operand]; N4 -> N5[label=right]; N5 -> N6[label=operator]; } """ self.assertEqual(expected.strip(), dot.strip()) def test_function_transformation_6(self) -> None: """Unit tests for JIT functions""" # This test regresses some crashing bugs. The compiler crashed if an # RV function contained: # # * a class constructor # * a call to a class method # * a while loop self.maxDiff = None rt = BMGRuntime() queries = [coin_with_class()] observations = {} bmg = rt.accumulate_graph(queries, observations) dot = to_dot(bmg) expected = """ digraph "graph" { N0[label=2.0]; N1[label=Beta]; N2[label=Sample]; N3[label=Query]; N0 -> N1[label=alpha]; N0 -> N1[label=beta]; N1 -> N2[label=operand]; N2 -> N3[label=operator]; } """ self.assertEqual(dot.strip(), expected.strip()) # TODO: Also test lambdas and nested functions. # TODO: What should we do about closures? def test_bad_control_flow_1(self) -> None: """Unit tests for JIT functions""" self.maxDiff = None bmg = BMGRuntime() queries = [bad_functional_1()] observations = {} # TODO: Better exception class with self.assertRaises(ValueError) as ex: bmg.accumulate_graph(queries, observations) self.assertEqual( str(ex.exception), "Stochastic control flow must have finite support." ) def test_bad_control_flow_2(self) -> None: """Unit tests for JIT functions""" self.maxDiff = None bmg = BMGRuntime() queries = [bad_functional_2()] observations = {} # TODO: Better exception class with self.assertRaises(ValueError) as ex: bmg.accumulate_graph(queries, observations) self.assertEqual(str(ex.exception), "Stochastic control flow is too complex.") def test_bad_control_flow_3(self) -> None: """Unit tests for JIT functions""" self.maxDiff = None bmg = BMGRuntime() queries = [bad_functional_3()] observations = {} # TODO: Better exception class with self.assertRaises(ValueError) as ex: bmg.accumulate_graph(queries, observations) self.assertEqual( str(ex.exception), "Random variable function calls must not have named arguments.", ) def test_bad_control_flow_4(self) -> None: """Unit tests for JIT functions""" self.maxDiff = None bmg = BMGRuntime() queries = [bad_functional_4()] observations = {} # TODO: Better exception class with self.assertRaises(ValueError) as ex: bmg.accumulate_graph(queries, observations) self.assertEqual( str(ex.exception), "Functional calls must not have named arguments.", ) def test_rv_identity(self) -> None: self.maxDiff = None # This test demonstrates an invariant which we must maintain as we modify # the implementation details of the jitter: two calls to the same RV with # the same arguments must produce the same sample node. Here the two calls # to beta(tensor(1)) must both produce the same sample node, not two samples. # # TODO: # # Right now this invariant is maintained by the @memoize modifier that is # automatically generated on a lifted rv function, but that mechanism # is redundant to the rv_map inside the graph builder, so we will eventually # remove it. When we do so, we'll need to ensure that one of the following # happens: # # * We add a hash function to RVIdentifier that treats identical-content tensors # as the same argument # * We build a special-purpose map for tracking RVID -> Sample node mappings. # * We restrict arguments to rv functions to be hashable (and canonicalize tensor # arguments to single values.) # * Or some other similar mechanism for maintaining this invariant. rt = BMGRuntime() queries = [beta_tensor_1a(), beta_tensor_1b()] observations = {} bmg = rt.accumulate_graph(queries, observations) observed = to_dot(bmg) expected = """ digraph "graph" { N0[label=2.0]; N1[label=Beta]; N2[label=Sample]; N3[label=Log]; N4[label=Query]; N5[label=Exp]; N6[label=Query]; N0 -> N1[label=alpha]; N0 -> N1[label=beta]; N1 -> N2[label=operand]; N2 -> N3[label=operand]; N2 -> N5[label=operand]; N3 -> N4[label=operator]; N5 -> N6[label=operator]; }""" self.assertEqual(expected.strip(), observed.strip()) def test_assertions_are_removed(self) -> None: # The lifted form of a function removes all assertion statements. # We can demonstrate this by making an assertion that causes an # observable effect, and then showing that the effect does not # happen when the lifted form is executed. global observable_side_effect self.maxDiff = None self.assertEqual(observable_side_effect, 0) # In non-lifted code, the assertion causes a side effect. assert cause_side_effect() self.assertEqual(observable_side_effect, 1) observable_side_effect = 0 bmg = BMGRuntime() bmg.accumulate_graph([assertions_are_removed()], {}) # The side effect is not caused. self.assertEqual(observable_side_effect, 0) def test_nested_functions_and_comprehensions(self) -> None: self.maxDiff = None # We had a bug where a nested function or comprehension inside a # random_variable would crash while accumulating the graph; # this test regresses that bug by simply verifying that we do # not crash in those scenarios now. bmg = BMGRuntime() bmg.accumulate_graph([flip_with_nested_function()], {}) bmg = BMGRuntime() bmg.accumulate_graph([flip_with_comprehension()], {}) def test_aliased_rv(self) -> None: self.maxDiff = None rt = BMGRuntime() queries = [aliased_rv()] observations = {} bmg = rt.accumulate_graph(queries, observations) observed = to_dot(bmg) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=Query]; N0 -> N1[label=probability]; N1 -> N2[label=operand]; N2 -> N3[label=operator]; } """ self.assertEqual(expected.strip(), observed.strip()) def test_undecorated_rv(self) -> None: self.maxDiff = None rt = BMGRuntime() queries = [undecorated_rv()] observations = {} bmg = rt.accumulate_graph(queries, observations) observed = to_dot(bmg) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=Query]; N0 -> N1[label=probability]; N1 -> N2[label=operand]; N2 -> N3[label=operator]; } """ self.assertEqual(expected.strip(), observed.strip()) def test_nested_rv(self) -> None: self.maxDiff = None # A random variable that is nested inside another function and closed over # an outer local variable works: prob = 0.5 @bm.random_variable def nested_flip(): return Bernoulli(prob) observed = to_dot(BMGRuntime().accumulate_graph([nested_flip()], {})) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=Query]; N0 -> N1[label=probability]; N1 -> N2[label=operand]; N2 -> N3[label=operator]; } """ self.assertEqual(expected.strip(), observed.strip()) # Check that we can close over an outer variable with a lambda also. mean = 0.0 sigma = 1.0 shift = 2.0 # Lambda random variable: lambda_norm = bm.random_variable(lambda: Normal(mean, sigma)) # Lambda that is not a functional, not declared inside a functional, but called # from inside a functional: lambda_mult = lambda x, y: x * y # noqa # Lambda functional: lambda_sum = bm.functional(lambda: lambda_mult(lambda_norm() + shift, 4.0)) observed = to_dot(BMGRuntime().accumulate_graph([lambda_sum()], {})) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=2.0]; N5[label="+"]; N6[label=4.0]; N7[label="*"]; N8[label=Query]; N0 -> N2[label=mu]; N1 -> N2[label=sigma]; N2 -> N3[label=operand]; N3 -> N5[label=left]; N4 -> N5[label=right]; N5 -> N7[label=left]; N6 -> N7[label=right]; N7 -> N8[label=operator]; } """ self.assertEqual(expected.strip(), observed.strip()) # What if we have a nested function inside a random variable? @bm.random_variable def norm1(): return Normal(0.0, 1.0) @bm.random_variable def norm2(): def mult(x, y): return x * y return Normal(mult(norm1(), 2.0), 3.0) observed = to_dot(BMGRuntime().accumulate_graph([norm2()], {})) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=2.0]; N5[label="*"]; N6[label=3.0]; N7[label=Normal]; N8[label=Sample]; N9[label=Query]; N0 -> N2[label=mu]; N1 -> N2[label=sigma]; N2 -> N3[label=operand]; N3 -> N5[label=left]; N4 -> N5[label=right]; N5 -> N7[label=mu]; N6 -> N7[label=sigma]; N7 -> N8[label=operand]; N8 -> N9[label=operator]; } """ self.assertEqual(expected.strip(), observed.strip()) # What if we have a random variable nested inside another? @bm.random_variable def norm3(): @bm.random_variable def norm4(): return Normal(0.0, 1.0) return Normal(norm4() * 5.0, 6.0) observed = to_dot(BMGRuntime().accumulate_graph([norm3()], {})) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=5.0]; N5[label="*"]; N6[label=6.0]; N7[label=Normal]; N8[label=Sample]; N9[label=Query]; N0 -> N2[label=mu]; N1 -> N2[label=sigma]; N2 -> N3[label=operand]; N3 -> N5[label=left]; N4 -> N5[label=right]; N5 -> N7[label=mu]; N6 -> N7[label=sigma]; N7 -> N8[label=operand]; N8 -> N9[label=operator]; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/jit_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Compilation test of Todd's Bayesian Multiple Testing model""" import unittest import beanmachine.ppl as bm import torch.distributions as dist from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor @bm.random_variable def theta(): return dist.Beta(2, 5) @bm.random_variable def sigma(): return dist.HalfCauchy(5) @bm.random_variable def tau(): return dist.HalfCauchy(5) @bm.random_variable def z(i): return dist.Bernoulli(theta()) @bm.random_variable def mu(i): return dist.Normal(0, tau()) @bm.random_variable def x(i): return dist.Normal(mu(i) * z(i), sigma()) class BMTModelTest(unittest.TestCase): def test_bmt_to_dot(self) -> None: self.maxDiff = None x_obs = [3.0, -0.75, 2.0, -0.3] n_obs = len(x_obs) queries = ( [theta(), sigma(), tau()] + [z(i) for i in range(n_obs)] + [mu(i) for i in range(n_obs)] ) observations = {x(i): tensor(v) for i, v in enumerate(x_obs)} observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N00[label=5.0]; N01[label=HalfCauchy]; N02[label=Sample]; N03[label=0.0]; N04[label=Normal]; N05[label=Sample]; N06[label=2.0]; N07[label=Beta]; N08[label=Sample]; N09[label=Bernoulli]; N10[label=Sample]; N11[label=Sample]; N12[label=if]; N13[label=Normal]; N14[label=Sample]; N15[label="Observation 3.0"]; N16[label=Sample]; N17[label=Sample]; N18[label=if]; N19[label=Normal]; N20[label=Sample]; N21[label="Observation -0.75"]; N22[label=Sample]; N23[label=Sample]; N24[label=if]; N25[label=Normal]; N26[label=Sample]; N27[label="Observation 2.0"]; N28[label=Sample]; N29[label=Sample]; N30[label=if]; N31[label=Normal]; N32[label=Sample]; N33[label="Observation -0.30000001192092896"]; N34[label=Query]; N35[label=Query]; N36[label=Query]; N37[label=Query]; N38[label=Query]; N39[label=Query]; N40[label=Query]; N41[label=Query]; N42[label=Query]; N43[label=Query]; N44[label=Query]; N00 -> N01; N00 -> N07; N01 -> N02; N01 -> N11; N02 -> N04; N02 -> N36; N03 -> N04; N03 -> N12; N03 -> N18; N03 -> N24; N03 -> N30; N04 -> N05; N04 -> N16; N04 -> N22; N04 -> N28; N05 -> N12; N05 -> N41; N06 -> N07; N07 -> N08; N08 -> N09; N08 -> N34; N09 -> N10; N09 -> N17; N09 -> N23; N09 -> N29; N10 -> N12; N10 -> N37; N11 -> N13; N11 -> N19; N11 -> N25; N11 -> N31; N11 -> N35; N12 -> N13; N13 -> N14; N14 -> N15; N16 -> N18; N16 -> N42; N17 -> N18; N17 -> N38; N18 -> N19; N19 -> N20; N20 -> N21; N22 -> N24; N22 -> N43; N23 -> N24; N23 -> N39; N24 -> N25; N25 -> N26; N26 -> N27; N28 -> N30; N28 -> N44; N29 -> N30; N29 -> N40; N30 -> N31; N31 -> N32; N32 -> N33; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/bmt_model_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from beanmachine.ppl.compiler.broadcaster import broadcast_fnc from torch import Size class BroadcastTest(unittest.TestCase): def test_broadcast_success(self) -> None: input_sizes = [Size([3]), Size([3, 2, 1]), Size([1, 2, 1]), Size([2, 3])] expectations = [ [0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5], [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1], [0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5], ] target_size = Size([3, 2, 3]) i = 0 for input_size in input_sizes: broadcaster = broadcast_fnc(input_size, target_size) expectation = expectations[i] i = i + 1 for j in range(0, 18): input_index = broadcaster(j) expected = expectation[j] self.assertEqual(input_index, expected)
beanmachine-main
tests/ppl/compiler/broadcaster_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Tests for patterns.py""" import ast import re import unittest from beanmachine.ppl.compiler.ast_patterns import ( ast_str, binop, compare, expr, module, name_constant, num, ) from beanmachine.ppl.compiler.patterns import ( HeadTail, ListAll, ListAny, match, negate, twoPlusList, ) def tidy(s: str) -> str: return re.sub(" +", " ", s.replace("\n", " ")).strip() class PatternsTest(unittest.TestCase): def test_twoPlus(self) -> None: """Test the twoPlusList pattern""" self.assertTrue(match(twoPlusList, [1, 2, 3]).is_success) self.assertTrue(match(twoPlusList, [1, 2]).is_fail) self.assertTrue(match(twoPlusList, [1]).is_fail) self.assertTrue(match(twoPlusList, []).is_fail) def test_atomic(self) -> None: """Test atomic patterns""" p = binop( left=binop(left=num(n=0), right=name_constant(value=True)), right=num(n=1.5) ) observed = str(p) expected = """ (isinstance(test, BinOp) and (isinstance(test.left, BinOp) and (isinstance(test.left.left, Num) and test.left.left.n==0) and (isinstance(test.left.right, NameConstant) and test.left.right.value==True)) and (isinstance(test.right, Num) and test.right.n==1.5)) """ self.maxDiff = None self.assertEqual(tidy(observed), tidy(expected)) result = p(ast.parse("0 * True + 1.5").body[0].value) self.assertTrue(result) # This one fails because it is binop(0, binop(true, 1.5)), and the # pattern is looking for binop(binop(0, true), 1.5) result = p(ast.parse("0 + True * 1.5").body[0].value) self.assertFalse(result) def test_negate(self) -> None: """Test negate""" p = negate(ast_str(s="abc")) result = p(ast.parse("'abc'").body[0].value) self.assertTrue(result.is_fail()) result = p(ast.parse("1+2").body[0].value) self.assertTrue(result.is_success()) def test_list_patterns(self) -> None: """Tests for list patterns""" p = module(body=[]) observed = str(p) expected = """(isinstance(test, Module) and test.body==[])""" self.maxDiff = None self.assertEqual(tidy(observed), tidy(expected)) result = p(ast.parse("")) self.assertTrue(result.is_success()) result = p(ast.parse("1 + 2")) self.assertTrue(result.is_fail()) p = module(body=[expr(value=binop()), expr(value=binop())]) observed = str(p) expected = """ (isinstance(test, Module) and [(isinstance(test.body[0], Expr) and isinstance(test.body[0].value, BinOp)), (isinstance(test.body[1], Expr) and isinstance(test.body[1].value, BinOp))]) """ self.assertEqual(tidy(observed), tidy(expected)) result = p(ast.parse("1 + 2")) self.assertTrue(result.is_fail()) result = p(ast.parse("1 + 2; 3 * 4")) self.assertTrue(result.is_success()) p = module(ListAny(expr(compare()))) observed = str(p) expected = """ (isinstance(test, Module) and test.body.any(x:(isinstance(x, Expr) and isinstance(x.value, Compare)))) """ self.assertEqual(tidy(observed), tidy(expected)) result = p(ast.parse("1 + 2; x is None")) self.assertTrue(result.is_success()) result = p(ast.parse("1 + 2; 3 * 4")) self.assertTrue(result.is_fail()) p = module(ListAll(expr(compare()))) observed = str(p) expected = """ (isinstance(test, Module) and test.body.all(x:(isinstance(x, Expr) and isinstance(x.value, Compare)))) """ self.assertEqual(tidy(observed), tidy(expected)) result = p(ast.parse("1 + 2; x is None")) self.assertTrue(result.is_fail()) result = p(ast.parse("x is None; y is None")) self.assertTrue(result.is_success()) # This pattern says that the body is a list where the head # is a binop statement and the tail is empty; that is, there # is only one item in the list. We could match any list pattern # against the tail. p = module(HeadTail(expr(binop()), [])) result = p(ast.parse("1 + 2; x is None")) self.assertTrue(result.is_fail()) result = p(ast.parse("x is None")) self.assertTrue(result.is_fail()) result = p(ast.parse("1 + 2")) self.assertTrue(result.is_success())
beanmachine-main
tests/ppl/compiler/patterns_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm import torch from beanmachine.ppl.inference import BMGInference from torch import exp, log, logsumexp from torch.distributions import Normal @bm.random_variable def norm(x): return Normal(0.0, 1.0) @bm.functional def log_1(): return log(exp(norm(0)) + exp(norm(1)) + exp(norm(2))) @bm.functional def logsumexp_1(): return logsumexp(torch.tensor([norm(0), norm(1), norm(2)]), 0) @bm.functional def log_2(): return log_1() @bm.functional def log_3(): return logsumexp_1() @bm.functional def exp_1(): return exp(norm(3)) + exp(norm(4)) @bm.functional def log_4(): return log(exp(norm(0)) + exp(norm(1)) + exp(norm(2)) + exp_1()) @bm.functional def log_5(): return log_4() @bm.functional def log_6(): return log_4() + exp_1() class FixLogSumExpTest(unittest.TestCase): def test_fix_log_sum_exp_1(self) -> None: self.maxDiff = None observations = {} queries_observed = [log_2()] graph_observed = BMGInference().to_dot(queries_observed, observations) queries_expected = [log_3()] graph_expected = BMGInference().to_dot(queries_expected, observations) self.assertEqual(graph_observed.strip(), graph_expected.strip()) def test_fix_log_sum_exp_2(self) -> None: self.maxDiff = None observations = {} queries_observed = [log_5()] graph_observed = BMGInference().to_dot(queries_observed, observations) graph_expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=Sample]; N5[label=Sample]; N6[label=Sample]; N7[label=Sample]; N8[label=LogSumExp]; N9[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N2 -> N4; N2 -> N5; N2 -> N6; N2 -> N7; N3 -> N8; N4 -> N8; N5 -> N8; N6 -> N8; N7 -> N8; N8 -> N9; } """ self.assertEqual(graph_observed.strip(), graph_expected.strip()) def test_fix_log_sum_exp_3(self) -> None: self.maxDiff = None observations = {} queries_observed = [log_6()] graph_observed = BMGInference().to_dot(queries_observed, observations) graph_expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label=Sample]; N06[label=Sample]; N07[label=Sample]; N08[label=Exp]; N09[label=Exp]; N10[label=Exp]; N11[label=Exp]; N12[label=Exp]; N13[label="+"]; N14[label="+"]; N15[label=Log]; N16[label=ToReal]; N17[label="+"]; N18[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N02 -> N04; N02 -> N05; N02 -> N06; N02 -> N07; N03 -> N08; N04 -> N09; N05 -> N10; N06 -> N11; N07 -> N12; N08 -> N14; N09 -> N14; N10 -> N14; N11 -> N13; N12 -> N13; N13 -> N14; N13 -> N16; N14 -> N15; N15 -> N17; N16 -> N17; N17 -> N18; } """ self.assertEqual(graph_observed.strip(), graph_expected.strip())
beanmachine-main
tests/ppl/compiler/fix_logsumexp_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm import torch from beanmachine.ppl.inference import BMGInference from torch.distributions import Beta @bm.random_variable def beta(n): return Beta(2, 2) @bm.functional def logaddexp_1(): return torch.logaddexp(beta(0), beta(1)) class FixLogAddExpTest(unittest.TestCase): def test_logaddexp_1(self) -> None: queries = [logaddexp_1()] graph_observed = BMGInference().to_dot(queries, {}, after_transform=False) graph_expected = """ digraph "graph" { N0[label=2.0]; N1[label=Beta]; N2[label=Sample]; N3[label=Sample]; N4[label=LogAddExp]; N5[label=Query]; N0 -> N1; N0 -> N1; N1 -> N2; N1 -> N3; N2 -> N4; N3 -> N4; N4 -> N5; } """ self.assertEqual(graph_observed.strip(), graph_expected.strip()) def test_logaddexp_2(self) -> None: queries = [logaddexp_1()] graph_observed = BMGInference().to_dot(queries, {}) graph_expected = """ digraph "graph" { N0[label=2.0]; N1[label=Beta]; N2[label=Sample]; N3[label=Sample]; N4[label=ToReal]; N5[label=ToReal]; N6[label=LogSumExp]; N7[label=Query]; N0 -> N1; N0 -> N1; N1 -> N2; N1 -> N3; N2 -> N4; N3 -> N5; N4 -> N6; N5 -> N6; N6 -> N7; } """ self.assertEqual(graph_observed.strip(), graph_expected.strip())
beanmachine-main
tests/ppl/compiler/fix_logaddexp_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """End-to-end test for tutorial on Robust Linear Regression""" # This file is a manual replica of the Bento tutorial with the same name ### TODO: This tutorial has a couple of different calls to inference, and currently only the ### first call is being considered. It would be good to go through the other parts as well import logging import unittest # TODO: Check imports for conistency import beanmachine.ppl as bm import torch # from torch import manual_seed, tensor import torch.distributions as dist # from torch.distributions import Bernoulli, Normal, Uniform from beanmachine.ppl.inference.bmg_inference import BMGInference from sklearn import model_selection from torch import tensor # This makes the results deterministic and reproducible. logging.getLogger("beanmachine").setLevel(50) torch.manual_seed(12) # Model @bm.random_variable def beta(): """ Regression Coefficient """ return dist.Normal(0, 1000) @bm.random_variable def alpha(): """ Regression Bias/Offset """ return dist.Normal(0, 1000) @bm.random_variable def sigma_regressor(): """ Deviation parameter for Student's T Controls the magnitude of the errors. """ return dist.HalfNormal(1000) @bm.random_variable def df_nu(): """ Degrees of Freedom of a Student's T Check https://en.wikipedia.org/wiki/Student%27s_t-distribution for effect """ return dist.Gamma(2, 0.1) @bm.random_variable def y_robust(X): """ Heavy-Tailed Noise model for regression utilizing StudentT Student's T : https://en.wikipedia.org/wiki/Student%27s_t-distribution """ return dist.StudentT(df=df_nu(), loc=beta() * X + alpha(), scale=sigma_regressor()) # Creating sample data sigma_data = torch.tensor([20, 40]) rho = -0.95 N = 200 cov = torch.tensor( [ [torch.pow(sigma_data[0], 2), sigma_data[0] * sigma_data[1] * rho], [sigma_data[0] * sigma_data[1] * rho, torch.pow(sigma_data[1], 2)], ] ) dist_clean = dist.MultivariateNormal(loc=torch.zeros(2), covariance_matrix=cov) points = tensor([dist_clean.sample().tolist() for i in range(N)]).view(N, 2) X = X_clean = points[:, 0] Y = Y_clean = points[:, 1] true_beta_1 = 2.0 true_beta_0 = 5.0 true_epsilon = 1.0 points_noisy = points points_noisy[0, :] = torch.tensor([-20, -80]) points_noisy[1, :] = torch.tensor([20, 100]) points_noisy[2, :] = torch.tensor([40, 40]) X_corr = points_noisy[:, 0] Y_corr = points_noisy[:, 1] X_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, Y) # Inference parameters num_samples = ( 2 ###000 - Sample size reduced since it should not affect compilation issues ) num_chains = 4 observations = {y_robust(X_train): Y_train} queries = [beta(), alpha(), sigma_regressor(), df_nu()] ### The following is old code class tutorialRobustLinearRegresionTest(unittest.TestCase): def test_tutorial_Robust_Linear_Regression(self) -> None: """Check BM and BMG inference both terminate""" self.maxDiff = None # Inference with BM # Note: No explicit seed here (in original tutorial model). Should we add one? amh = bm.SingleSiteAncestralMetropolisHastings() # Added local binding _ = amh.infer( queries=queries, observations=observations, num_samples=num_samples, num_chains=num_chains, ) self.assertTrue(True, msg="We just want to check this point is reached") def test_tutorial_Robust_Linear_Regression_to_dot_cpp_python( self, ) -> None: self.maxDiff = None ## Intermediate forms too large w devectorization # observed = BMGInference().to_dot(queries, observations) # expected = """ # """ # self.assertEqual(expected.strip(), observed.strip()) # # observed = BMGInference().to_cpp(queries, observations) # expected = """""" # self.assertEqual(expected.strip(), observed.strip()) # # observed = BMGInference().to_python(queries, observations) # expected = """""" # self.assertEqual(expected.strip(), observed.strip()) _ = BMGInference().infer( queries=queries, observations=observations, num_samples=num_samples )
beanmachine-main
tests/ppl/compiler/tutorial_Robust_Linear_Regression_test.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # BM -> BMG compiler arithmetic tests import math import operator import unittest import beanmachine.ppl as bm import numpy as np import torch from beanmachine.ppl.inference.bmg_inference import BMGInference from torch.distributions import Bernoulli, Beta, Binomial, HalfCauchy, Normal @bm.random_variable def bern(): return Bernoulli(0.5) @bm.random_variable def beta(): return Beta(2.0, 2.0) @bm.random_variable def norm(): return Normal(0.0, 1.0) @bm.random_variable def hc(): return HalfCauchy(1.0) @bm.functional def expm1_prob(): return beta().expm1() @bm.functional def expm1_real(): return torch.expm1(norm()) @bm.functional def expm1_negreal(): return torch.Tensor.expm1(-hc()) @bm.functional def logistic_prob(): return beta().sigmoid() @bm.functional def logistic_real(): return torch.sigmoid(norm()) @bm.functional def logistic_negreal(): return torch.Tensor.sigmoid(-hc()) @bm.random_variable def ordinary_arithmetic(n): return Bernoulli( probs=torch.tensor(0.5) + torch.log(torch.exp(n * torch.tensor(0.1))) ) @bm.random_variable def stochastic_arithmetic(): s = 0.0 # Verify that mutating += works on lists normally: items = [0] items += [1] # Verify that +=, *=, -= all work on graph nodes: for n in items: p = torch.log(torch.tensor(0.01)) p *= ordinary_arithmetic(n) s += p m = 1 m -= torch.exp(input=torch.log(torch.tensor(0.99)) + s) return Bernoulli(m) @bm.functional def mutating_assignments(): # Torch supports mutating tensors in-place, which allows for # aliasing. THE COMPILER DOES NOT CORRECTLY DETECT ALIASING # WHEN A STOCHASTIC QUANTITY IS INVOLVED! x = torch.tensor(1.0) y = x # y is an alias for x y += 2.0 # y is now 3, and so is x y = y + 4.0 # y is now 7, but x is still 3 # So far we're all fine; every mutated tensor has been non-stochastic. b = beta() * x + y # b is beta_sample * 3 + 7 # Now let's see how things go wrong. We'll alias stochastic quantity b: c = b c *= 5.0 # In Python Bean Machine, c and b are now both (beta() * 3 + 7) * 5 # but the compiler does not detect that c and b are aliases, and does # not represent tensor mutations in graph nodes. The compiler thinks # that c is (beta() * 3 + 7) * 5 but b is still (beta() * 3 + 7): return b @bm.random_variable def neg_of_neg(): return Normal(-torch.neg(norm()), 1.0) @bm.functional def subtractions(): # Show that we can handle a bunch of different ways to subtract things # Show that unary plus operations are discarded. n = +norm() b = +beta() h = +hc() return +torch.sub(+n.sub(+b), +b - h) @bm.random_variable def bino(): return Binomial(total_count=3, probs=0.5) @bm.functional def unsupported_add(): # What happens if we use a stochastic quantity in an operation with # a non-tensor, non-number? return bino() + "foo" @bm.functional def log_1(): # Ordinary constant, math.log. Note that a functional is # required to return a tensor. Verify that ordinary # arithmetic still works in a model. return torch.tensor(math.log(1.0)) @bm.functional def log_2(): # Tensor constant, math.log; this is legal. # A multi-valued tensor would be an error. return torch.tensor(math.log(torch.tensor(2.0))) @bm.functional def log_3(): # Tensor constant, Tensor.log. # An ordinary constant would be an error. return torch.Tensor.log(torch.tensor(3.0)) @bm.functional def log_4(): # Tensor constant, instance log return torch.tensor([4.0, 4.0]).log() @bm.functional def log_5(): # Stochastic value, math.log return torch.tensor(math.log(beta() + 5.0)) @bm.functional def log_6(): # Stochastic value, Tensor.log return torch.Tensor.log(beta() + 6.0) @bm.functional def log_7(): # Stochastic value, instance log return (beta() + 7.0).log() @bm.functional def log10_1(): # Tensor constant, torch.log10. return torch.log10(torch.tensor(10.0)) @bm.functional def log10_2(): # Stochastic tensor, torch.log10. return torch.log10(beta() + 2.0) @bm.functional def log10_3(): # Tensor constant, Tensor.log10. return torch.Tensor.log10(torch.tensor(1000.0)) @bm.functional def log10_4(): # Tensor constant, instance log10 return torch.tensor(10000.0).log10() @bm.functional def log10_5(): # Stochastic value, Tensor.log10 return torch.Tensor.log10(beta() + 5.0) @bm.functional def log10_6(): # Stochastic value, instance log10 return (beta() + 6.0).log10() @bm.functional def log1p_1(): # Tensor constant, torch.log1p. return torch.log1p(torch.tensor(1.0)) @bm.functional def log1p_2(): # Stochastic tensor, torch.log1p. return torch.log1p(beta() + 2.0) @bm.functional def log1p_3(): # Tensor constant, torch.special.log1p. return torch.special.log1p(torch.tensor(3.0)) @bm.functional def log1p_4(): # Stochastic tensor, torch.special.log1p. return torch.special.log1p(beta() + 4.0) @bm.functional def log1p_5(): # Tensor constant, Tensor.log1p. return torch.Tensor.log1p(torch.tensor(5.0)) @bm.functional def log1p_6(): # Tensor constant, instance log1p return torch.tensor(6.0).log1p() @bm.functional def log1p_7(): # Stochastic value, Tensor.log1p return torch.Tensor.log1p(beta() + 7.0) @bm.functional def log1p_8(): # Stochastic value, instance log1p return (beta() + 8.0).log1p() @bm.functional def log2_1(): # Tensor constant, torch.log2. return torch.log2(torch.tensor(2.0)) @bm.functional def log2_2(): # Stochastic tensor, torch.log2. return torch.log2(beta() + 2.0) @bm.functional def log2_3(): # Tensor constant, Tensor.log2. return torch.Tensor.log2(torch.tensor(8.0)) @bm.functional def log2_4(): # Tensor constant, instance log2 return torch.tensor(16.0).log2() @bm.functional def log2_5(): # Stochastic value, Tensor.log2 return torch.Tensor.log2(beta() + 5.0) @bm.functional def log2_6(): # Stochastic value, instance log2 return (beta() + 6.0).log2() @bm.functional def sqrt_1(): # Tensor constant, torch.sqrt. return torch.sqrt(torch.tensor(1.0)) @bm.functional def sqrt_2(): # Stochastic tensor, torch.sqrt. return torch.sqrt(beta() + 2.0) @bm.functional def sqrt_3(): # Tensor constant, Tensor.sqrt. return torch.Tensor.sqrt(torch.tensor(9.0)) @bm.functional def sqrt_4(): # Tensor constant, instance sqrt return torch.tensor(16.0).sqrt() @bm.functional def sqrt_5(): # Stochastic value, Tensor.sqrt return torch.Tensor.sqrt(beta() + 5.0) @bm.functional def sqrt_6(): # Stochastic value, instance sqrt return (beta() + 6.0).sqrt() @bm.functional def exp_1(): # Ordinary constant, math.exp. Note that a functional is # required to return a tensor. Verify that ordinary # arithmetic still works in a model. return torch.tensor(math.exp(1.0)) @bm.functional def exp_2(): # Tensor constant, math.exp; this is legal. # A multi-valued tensor would be an error. return torch.tensor(math.exp(torch.tensor(2.0))) @bm.functional def exp_3(): # Tensor constant, Tensor.exp. # An ordinary constant would be an error. return torch.Tensor.exp(torch.tensor(3.0)) @bm.functional def exp_4(): # Tensor constant, instance exp return torch.tensor([4.0, 4.0]).exp() @bm.functional def exp_5(): # Stochastic value, math.exp return torch.tensor(math.exp(beta() + 5.0)) @bm.functional def exp_6(): # Stochastic value, Tensor.exp return torch.Tensor.exp(beta() + 6.0) @bm.functional def exp_7(): # Stochastic value, instance exp return (beta() + 7.0).exp() @bm.functional def exp2_1(): # Tensor constant, torch.exp2. return torch.exp2(torch.tensor(1.0)) @bm.functional def exp2_2(): # Stochastic tensor, torch.exp2. return torch.exp2(beta() + 2.0) @bm.functional def exp2_3(): # Tensor constant, torch.special.exp2. return torch.special.exp2(torch.tensor(3.0)) @bm.functional def exp2_4(): # Stochastic tensor, torch.special.exp2. return torch.special.exp2(beta() + 4.0) @bm.functional def exp2_5(): # Tensor constant, Tensor.exp2. return torch.Tensor.exp2(torch.tensor(5.0)) @bm.functional def exp2_6(): # Tensor constant, instance exp2 return torch.tensor(6.0).exp2() @bm.functional def exp2_7(): # Stochastic value, Tensor.exp2 return torch.Tensor.exp2(beta() + 7.0) @bm.functional def exp2_8(): # Stochastic value, instance exp2 return (beta() + 8.0).exp2() @bm.functional def pow_1(): # Ordinary constant, power operator. Note that a functional is # required to return a tensor. Verify that ordinary # arithmetic still works in a model. return torch.tensor(1.0**10.0) @bm.functional def pow_2(): # Tensor constant, power operator. return torch.tensor(2.0) ** 2.0 @bm.functional def pow_3(): # Tensor constant, Tensor.pow, named argument. return torch.Tensor.pow(torch.tensor(3.0), exponent=torch.tensor(3.0)) @bm.functional def pow_4(): # Tensor constant, instance pow, named argument return torch.tensor(4.0).pow(exponent=torch.tensor(4.0)) @bm.functional def pow_5(): # Stochastic value, power operator return beta() ** 5.0 @bm.functional def pow_6(): # Stochastic value, Tensor.pow return torch.Tensor.pow(torch.tensor(6.0), exponent=beta()) @bm.functional def pow_7(): # Stochastic value, instance exp return torch.tensor(7.0).pow(exponent=beta()) @bm.functional def pow_8(): # Constant values, operator.pow return operator.pow(torch.tensor(8.0), torch.tensor(2.0)) @bm.functional def pow_9(): # Stochastic values, operator.pow return operator.pow(beta(), torch.tensor(9.0)) @bm.functional def to_real_1(): # Calling float() causes a TO_REAL node to be emitted into the graph. # TODO: Is this actually a good idea? We already automatically insert # TO_REAL when necessary to make the type system happy. float() could # just be an identity on graph nodes instead of adding TO_REAL. # # Once again, a functional is required to return a tensor. return torch.tensor([float(bern()), 1.0]) @bm.functional def to_real_2(): # Similarly for the tensor float() instance method. return bern().float() @bm.functional def not_1(): # Ordinary constant, not operator. Note that a functional is # required to return a tensor. Verify that ordinary # arithmetic still works in a model. return torch.tensor(not 1.0) @bm.functional def not_2(): # Tensor constant; not operator. This is legal. # A multi-valued tensor would be an error. return torch.tensor(not torch.tensor(2.0)) @bm.functional def not_3(): # Tensor constant, Tensor.logical_not. # An ordinary constant would be an error. return torch.Tensor.logical_not(torch.tensor(3.0)) @bm.functional def not_4(): # Tensor constant, instance logical_not return torch.tensor(4.0).logical_not() @bm.functional def not_5(): # Stochastic value, not operator return torch.tensor(not (beta() + 5.0)) @bm.functional def not_6(): # Stochastic value, Tensor.logical_not return torch.Tensor.logical_not(beta() + 6.0) @bm.functional def not_7(): # Stochastic value, instance logical_not return (beta() + 7.0).logical_not() @bm.functional def not_8(): # Constant value, operator.not_ return torch.tensor(operator.not_(torch.tensor(8.0))) @bm.functional def not_9(): # Stochastic value, operator.not_ return torch.tensor(operator.not_(beta() + 9.0)) @bm.functional def neg_1(): # Ordinary constant, - operator. Note that a functional is # required to return a tensor. Verify that ordinary # arithmetic still works in a model. return torch.tensor(-1.0) @bm.functional def neg_2(): # Tensor constant; - operator. return -torch.tensor(2.0) @bm.functional def neg_3(): # Tensor constant, Tensor.neg. return torch.Tensor.neg(torch.tensor(3.0)) @bm.functional def neg_4(): # Tensor constant, instance neg return torch.tensor(4.0).neg() @bm.functional def neg_5(): # Stochastic value, - operator return -(beta() + 5.0) @bm.functional def neg_6(): # Stochastic value, Tensor.neg. # TODO: "negative" is a synonym; make it work too. return torch.Tensor.neg(beta() + 6.0) @bm.functional def neg_7(): # Stochastic value, instance neg # TODO: "negative" is a synonym; make it work too. return (beta() + 7.0).neg() @bm.functional def neg_8(): # Constant value, operator.neg return operator.neg(torch.tensor(8.0)) @bm.functional def neg_9(): # Stochastic value, operator.neg return operator.neg(beta() + 9.0) @bm.functional def add_1(): # Ordinary arithmetic, + operator return torch.tensor(1.0 + 1.0) @bm.functional def add_2(): # Tensor arithmetic, + operator return torch.tensor(2.0) + torch.tensor(2.0) @bm.functional def add_3(): # Tensor constants, Tensor.add. # TODO: Tensor.add takes an optional third argument with the semantics # add(a, b, c) --> a + b * c. Test that as well. return torch.Tensor.add(torch.tensor(3.0), torch.tensor(3.0)) @bm.functional def add_4(): # Tensor constant, instance add # TODO: Tensor.add takes an optional third argument with the semantics # a.add(b, c) --> a + b * c. Test that as well. return torch.tensor(4.0).add(torch.tensor(4.0)) @bm.functional def add_5(): # Stochastic value, + operator return beta() + 5.0 @bm.functional def add_6(): # Stochastic value, Tensor.add. return torch.Tensor.add(beta(), torch.tensor(6.0)) @bm.functional def add_7(): # Stochastic value, instance add return beta().add(torch.tensor(7.0)) @bm.functional def add_8(): # Constant values, operator.add return operator.add(torch.tensor(8.0), torch.tensor(8.0)) @bm.functional def add_9(): # Stochastic values, operator.add return operator.add(beta(), torch.tensor(9.0)) @bm.functional def and_1(): # Ordinary arithmetic, & operator return torch.tensor(1 & 3) @bm.functional def and_2(): # Tensor arithmetic, & operator return torch.tensor(6) & torch.tensor(2) @bm.functional def and_3(): # Tensor constants, Tensor.bitwise_and. return torch.Tensor.bitwise_and(torch.tensor(7), torch.tensor(3)) @bm.functional def and_4(): # Tensor constant, instance bitwise_and return torch.tensor(7).bitwise_and(torch.tensor(4)) @bm.functional def and_5(): # Stochastic value, & operator return beta() & 2 @bm.functional def and_6(): # Stochastic value, Tensor.bitwise_and return torch.Tensor.bitwise_and(beta(), torch.tensor(4)) @bm.functional def and_7(): # Stochastic value, instance bitwise_and return beta().bitwise_and(torch.tensor(8)) @bm.functional def and_8(): # Constant values, operator.and_ return operator.and_(torch.tensor(15), torch.tensor(8)) @bm.functional def and_9(): # Stochastic values, operator.and_ return operator.and_(beta(), torch.tensor(16)) @bm.functional def div_1(): # Ordinary arithmetic, / operator return torch.tensor(1.0 / 1.0) @bm.functional def div_2(): # Tensor arithmetic, / operator return torch.tensor(4.0) / torch.tensor(2.0) @bm.functional def div_3(): # Tensor constants, Tensor.div. # TODO: div also takes an optional rounding flag; test that. return torch.Tensor.div(torch.tensor(6.0), torch.tensor(2.0)) @bm.functional def div_4(): # Tensor constant, instance divide (a synonym). return torch.tensor(8.0).divide(torch.tensor(2.0)) @bm.functional def div_5(): # Stochastic value, / operator return beta() / 2.0 @bm.functional def div_6(): # Stochastic value, Tensor.true_divide (a synonym) return torch.Tensor.true_divide(beta(), torch.tensor(4.0)) @bm.functional def div_7(): # Stochastic value, instance div return beta().div(torch.tensor(8.0)) @bm.functional def div_8(): # Constant values, operator.truediv return operator.truediv(torch.tensor(16.0), torch.tensor(2.0)) @bm.functional def div_9(): # Stochastic values, operator.truediv return operator.truediv(beta(), torch.tensor(16.0)) @bm.functional def eq_1(): # Ordinary arithmetic, == operator return torch.tensor(1.0 == 1.0) @bm.functional def eq_2(): # Tensor arithmetic, == operator return torch.tensor(4.0) == torch.tensor(2.0) @bm.functional def eq_3(): # Tensor constants, Tensor.eq. return torch.Tensor.eq(torch.tensor(6.0), torch.tensor(2.0)) @bm.functional def eq_4(): # Tensor constant, instance eq return torch.tensor(8.0).eq(torch.tensor(2.0)) @bm.functional def eq_5(): # Stochastic value, == operator return beta() == 4.0 @bm.functional def eq_6(): # Stochastic value, Tensor.equal (a synonym) return torch.Tensor.equal(beta(), torch.tensor(8.0)) @bm.functional def eq_7(): # Stochastic value, instance equal return beta().equal(torch.tensor(16.0)) @bm.functional def eq_8(): # Constant values, operator.eq return operator.eq(torch.tensor(16.0), torch.tensor(2.0)) @bm.functional def eq_9(): # Stochastic values, operator.eq return operator.eq(beta(), torch.tensor(32.0)) @bm.functional def floordiv_1(): # Ordinary arithmetic, // operator return torch.tensor(1.0 // 1.0) @bm.functional def floordiv_2(): # Tensor arithmetic, // operator return torch.tensor(4.0) // torch.tensor(2.0) @bm.functional def floordiv_3(): # Tensor constants, Tensor.floor_divide. return torch.Tensor.floor_divide(torch.tensor(6.0), torch.tensor(2.0)) @bm.functional def floordiv_4(): # Tensor constant, instance floor_divide return torch.tensor(8.0).floor_divide(torch.tensor(2.0)) @bm.functional def floordiv_5(): # Stochastic value, // operator return beta() // 4.0 @bm.functional def floordiv_6(): # Stochastic value, Tensor.floor_divide return torch.Tensor.floor_divide(beta(), torch.tensor(8.0)) @bm.functional def floordiv_7(): # Stochastic value, instance floor_divide return beta().floor_divide(torch.tensor(16.0)) @bm.functional def floordiv_8(): # Constant values, operator.floordiv return operator.floordiv(torch.tensor(16.0), torch.tensor(2.0)) @bm.functional def floordiv_9(): # Stochastic values, operator.floordiv return operator.floordiv(beta(), torch.tensor(32.0)) @bm.functional def ge_1(): # Ordinary arithmetic, >= operator return torch.tensor(1.0 >= 1.0) @bm.functional def ge_2(): # Tensor arithmetic, >= operator return torch.tensor(4.0) >= torch.tensor(2.0) @bm.functional def ge_3(): # Tensor constants, Tensor.ge. return torch.Tensor.ge(torch.tensor(6.0), torch.tensor(2.0)) @bm.functional def ge_4(): # Tensor constant, instance ge return torch.tensor(8.0).ge(torch.tensor(2.0)) @bm.functional def ge_5(): # Stochastic value, >= operator return beta() >= 4.0 @bm.functional def ge_6(): # Stochastic value, Tensor.greater_equal (a synonym) return torch.Tensor.greater_equal(beta(), torch.tensor(8.0)) @bm.functional def ge_7(): # Stochastic value, instance greater_equal return beta().greater_equal(torch.tensor(16.0)) @bm.functional def ge_8(): # Constant values, operator.ge return operator.ge(torch.tensor(16.0), torch.tensor(2.0)) @bm.functional def ge_9(): # Stochastic values, operator.ge return operator.ge(beta(), torch.tensor(32.0)) @bm.functional def gt_1(): # Ordinary arithmetic, > operator return torch.tensor(1.0 > 1.0) @bm.functional def gt_2(): # Tensor arithmetic, > operator return torch.tensor(4.0) > torch.tensor(2.0) @bm.functional def gt_3(): # Tensor constants, Tensor.gt. return torch.Tensor.gt(torch.tensor(6.0), torch.tensor(2.0)) @bm.functional def gt_4(): # Tensor constant, instance gt return torch.tensor(8.0).gt(torch.tensor(2.0)) @bm.functional def gt_5(): # Stochastic value, > operator return beta() > 4.0 @bm.functional def gt_6(): # Stochastic value, Tensor.greater (a synonym) return torch.Tensor.greater(beta(), torch.tensor(8.0)) @bm.functional def gt_7(): # Stochastic value, instance greater return beta().greater(torch.tensor(16.0)) @bm.functional def gt_8(): # Constant values, operator.gt return operator.gt(torch.tensor(16.0), torch.tensor(2.0)) @bm.functional def gt_9(): # Stochastic values, operator.gt return operator.gt(beta(), torch.tensor(32.0)) @bm.functional def in_1(): # Ordinary arithmetic, in operator return torch.tensor(1.0 in [1.0]) @bm.functional def in_2(): # Tensor arithmetic, in operator return torch.tensor(torch.tensor(4.0) in torch.tensor(2.0)) @bm.functional def in_3(): # Stochastic value, in operator return torch.tensor(beta() in torch.tensor(4.0)) @bm.functional def in_4(): # Constant values, operator.contains return torch.tensor(operator.contains(torch.tensor(16.0), torch.tensor(2.0))) @bm.functional def in_5(): # Stochastic values, operator.contains return torch.tensor(operator.contains(torch.tensor(32.0), beta())) @bm.functional def is_1(): # Tensor arithmetic, is operator return torch.tensor(torch.tensor(4.0) is torch.tensor(2.0)) @bm.functional def is_2(): # Stochastic value, is operator return torch.tensor(beta() is torch.tensor(4.0)) @bm.functional def is_3(): # Constant values, operator.is_ return torch.tensor(operator.is_(torch.tensor(16.0), torch.tensor(2.0))) @bm.functional def is_4(): # Stochastic values, operator.is_ return torch.tensor(operator.is_(torch.tensor(32.0), beta())) @bm.functional def inv_1(): # Ordinary constant, ~ operator. return torch.tensor(~1) @bm.functional def inv_2(): # Tensor constant; ~ operator. return ~torch.tensor(2) @bm.functional def inv_3(): # Tensor constant, Tensor.bitwise_not. return torch.Tensor.bitwise_not(torch.tensor(3)) @bm.functional def inv_4(): # Tensor constant, instance bitwise_not return torch.tensor(4).bitwise_not() @bm.functional def inv_5(): # Stochastic value, ~ operator return ~(beta() + 5.0) @bm.functional def inv_6(): # Stochastic value, Tensor.bitwise_not return torch.Tensor.bitwise_not(beta() + 6.0) @bm.functional def inv_7(): # Stochastic value, instance bitwise_not return (beta() + 7.0).bitwise_not() @bm.functional def inv_8(): # Constant value, operator.inv return operator.inv(torch.tensor(8)) @bm.functional def inv_9(): # Stochastic value, operator.inv return operator.inv(beta()) @bm.functional def le_1(): # Ordinary arithmetic, <= operator return torch.tensor(1.0 <= 1.0) @bm.functional def le_2(): # Tensor arithmetic, <= operator return torch.tensor(4.0) <= torch.tensor(2.0) @bm.functional def le_3(): # Tensor constants, Tensor.le. return torch.Tensor.le(torch.tensor(6.0), torch.tensor(2.0)) @bm.functional def le_4(): # Tensor constant, instance le return torch.tensor(8.0).le(torch.tensor(2.0)) @bm.functional def le_5(): # Stochastic value, <= operator return beta() <= 4.0 @bm.functional def le_6(): # Stochastic value, Tensor.less_equal (a synonym) return torch.Tensor.less_equal(beta(), torch.tensor(8.0)) @bm.functional def le_7(): # Stochastic value, instance less_equal return beta().less_equal(torch.tensor(16.0)) @bm.functional def le_8(): # Constant values, operator.le return operator.le(torch.tensor(16.0), torch.tensor(2.0)) @bm.functional def le_9(): # Stochastic values, operator.le return operator.le(beta(), torch.tensor(32.0)) @bm.functional def lshift_1(): # Ordinary arithmetic, << operator return torch.tensor(1 << 1) @bm.functional def lshift_2(): # Tensor arithmetic, << operator return torch.tensor(2) << torch.tensor(2) @bm.functional def lshift_3(): # Tensor constants, Tensor.bitwise_left_shift. return torch.Tensor.bitwise_left_shift(torch.tensor(6), torch.tensor(2)) @bm.functional def lshift_4(): # Tensor constant, instance bitwise_left_shift return torch.tensor(8).bitwise_left_shift(torch.tensor(2)) @bm.functional def lshift_5(): # Stochastic value, << operator return beta() << 4 @bm.functional def lshift_6(): # Stochastic value, Tensor.bitwise_left_shift return torch.Tensor.bitwise_left_shift(beta(), torch.tensor(8)) @bm.functional def lshift_7(): # Stochastic value, instance bitwise_left_shift return beta().bitwise_left_shift(torch.tensor(16)) @bm.functional def lshift_8(): # Constant values, operator.lshift return operator.lshift(torch.tensor(16), torch.tensor(2)) @bm.functional def lshift_9(): # Stochastic values, operator.lshift return operator.lshift(beta(), torch.tensor(32)) @bm.functional def lt_1(): # Ordinary arithmetic, < operator return torch.tensor(1.0 < 1.0) @bm.functional def lt_2(): # Tensor arithmetic, < operator return torch.tensor(4.0) < torch.tensor(2.0) @bm.functional def lt_3(): # Tensor constants, Tensor.lt. return torch.Tensor.lt(torch.tensor(6.0), torch.tensor(2.0)) @bm.functional def lt_4(): # Tensor constant, instance lt return torch.tensor(8.0).lt(torch.tensor(2.0)) @bm.functional def lt_5(): # Stochastic value, < operator return beta() < 4.0 @bm.functional def lt_6(): # Stochastic value, Tensor.less (a synonym) return torch.Tensor.less(beta(), torch.tensor(8.0)) @bm.functional def lt_7(): # Stochastic value, instance less return beta().less(torch.tensor(16.0)) @bm.functional def lt_8(): # Constant values, operator.lt return operator.lt(torch.tensor(16.0), torch.tensor(2.0)) @bm.functional def lt_9(): # Stochastic values, operator.lt return operator.lt(beta(), torch.tensor(32.0)) @bm.functional def mod_1(): # Ordinary arithmetic, % operator return torch.tensor(1 % 1) @bm.functional def mod_2(): # Tensor arithmetic, % operator return torch.tensor(5.0) % torch.tensor(3.0) @bm.functional def mod_3(): # Tensor constants, Tensor.fmod. return torch.Tensor.fmod(torch.tensor(11.0), torch.tensor(4.0)) @bm.functional def mod_4(): # Tensor constant, instance remainder (a near synonym). return torch.tensor(9.0).remainder(torch.tensor(5.0)) @bm.functional def mod_5(): # Stochastic value, % operator return beta() % 5.0 @bm.functional def mod_6(): # Stochastic value, Tensor.fmod return torch.Tensor.fmod(beta(), torch.tensor(6.0)) @bm.functional def mod_7(): # Stochastic value, instance fmod return beta().fmod(torch.tensor(7.0)) @bm.functional def mod_8(): # Constant values, operator.mod return operator.mod(torch.tensor(17.0), torch.tensor(9.0)) @bm.functional def mod_9(): # Stochastic values, operator.mod return operator.mod(beta(), torch.tensor(9.0)) @bm.functional def mul_1(): # Ordinary arithmetic, * operator return torch.tensor(1.0 * 1.0) @bm.functional def mul_2(): # Tensor arithmetic, * operator return torch.tensor(2.0) * torch.tensor(2.0) @bm.functional def mul_3(): # Tensor constants, Tensor.mul. return torch.Tensor.mul(torch.tensor(3.0), torch.tensor(3.0)) @bm.functional def mul_4(): # Tensor constant, instance multiply (a synonym). return torch.tensor(4.0).multiply(torch.tensor(4.0)) @bm.functional def mul_5(): # Stochastic value, * operator return beta() * 5.0 @bm.functional def mul_6(): # Stochastic value, Tensor.multiply. return torch.Tensor.multiply(beta(), torch.tensor(6.0)) @bm.functional def mul_7(): # Stochastic value, instance mul return beta().mul(torch.tensor(7.0)) @bm.functional def mul_8(): # Constant values, operator.mul return operator.mul(torch.tensor(8.0), torch.tensor(8.0)) @bm.functional def mul_9(): # Stochastic values, operator.mul return operator.mul(beta(), torch.tensor(9.0)) @bm.functional def ne_1(): # Ordinary arithmetic, != operator return torch.tensor(1.0 != 1.0) @bm.functional def ne_2(): # Tensor arithmetic, != operator return torch.tensor(4.0) != torch.tensor(2.0) @bm.functional def ne_3(): # Tensor constants, Tensor.ne. return torch.Tensor.ne(torch.tensor(6.0), torch.tensor(2.0)) @bm.functional def ne_4(): # Tensor constant, instance ne return torch.tensor(8.0).ne(torch.tensor(2.0)) @bm.functional def ne_5(): # Stochastic value, != operator return beta() != 4.0 @bm.functional def ne_6(): # Stochastic value, Tensor.not_equal (a synonym) return torch.Tensor.not_equal(beta(), torch.tensor(8.0)) @bm.functional def ne_7(): # Stochastic value, instance not_equal return beta().not_equal(torch.tensor(16.0)) @bm.functional def ne_8(): # Constant values, operator.ne return operator.ne(torch.tensor(16.0), torch.tensor(2.0)) @bm.functional def ne_9(): # Stochastic values, operator.ne return operator.ne(beta(), torch.tensor(32.0)) @bm.functional def not_in_1(): # Ordinary arithmetic, not in operator return torch.tensor(1.0 not in [1.0]) @bm.functional def not_in_2(): # Tensor arithmetic, not in operator return torch.tensor(torch.tensor(4.0) not in torch.tensor(2.0)) @bm.functional def not_in_3(): # Stochastic value, not in operator return torch.tensor(beta() not in torch.tensor(4.0)) @bm.functional def is_not_1(): # Tensor arithmetic, is not operator return torch.tensor(torch.tensor(4.0) is not torch.tensor(2.0)) @bm.functional def is_not_2(): # Stochastic value, is not operator return torch.tensor(beta() is not torch.tensor(4.0)) @bm.functional def is_not_3(): # Constant values, operator.is_not return torch.tensor(operator.is_not(torch.tensor(16.0), torch.tensor(2.0))) @bm.functional def is_not_4(): # Stochastic values, operator.is_not return torch.tensor(operator.is_not(torch.tensor(32.0), beta())) @bm.functional def or_1(): # Ordinary arithmetic, | operator return torch.tensor(1 | 3) @bm.functional def or_2(): # Tensor arithmetic, | operator return torch.tensor(6) | torch.tensor(2) @bm.functional def or_3(): # Tensor constants, Tensor.bitwise_or. return torch.Tensor.bitwise_or(torch.tensor(7), torch.tensor(3)) @bm.functional def or_4(): # Tensor constant, instance bitwise_or return torch.tensor(7).bitwise_or(torch.tensor(4)) @bm.functional def or_5(): # Stochastic value, | operator return beta() | 2 @bm.functional def or_6(): # Stochastic value, Tensor.bitwise_or return torch.Tensor.bitwise_or(beta(), torch.tensor(4)) @bm.functional def or_7(): # Stochastic value, instance bitwise_or return beta().bitwise_or(torch.tensor(8)) @bm.functional def or_8(): # Constant values, operator.or_ return operator.or_(torch.tensor(15), torch.tensor(8)) @bm.functional def or_9(): # Stochastic values, operator.or_ return operator.or_(beta(), torch.tensor(16)) @bm.functional def pos_1(): # Ordinary constant, + operator. return torch.tensor(+1.0) @bm.functional def pos_2(): # Tensor constant; + operator. return +torch.tensor(2.0) @bm.functional def pos_5(): # Stochastic value, + operator return +(beta() + 5.0) @bm.functional def pos_8(): # Constant value, operator.pos return operator.pos(torch.tensor(8.0)) @bm.functional def pos_9(): # Stochastic value, operator.pos return operator.pos(beta() + 9.0) @bm.functional def rshift_1(): # Ordinary arithmetic, >> operator return torch.tensor(2 >> 1) @bm.functional def rshift_2(): # Tensor arithmetic, << operator return torch.tensor(4) >> torch.tensor(2) @bm.functional def rshift_3(): # Tensor constants, Tensor.bitwise_right_shift. return torch.Tensor.bitwise_right_shift(torch.tensor(6), torch.tensor(2)) @bm.functional def rshift_4(): # Tensor constant, instance bitwise_right_shift return torch.tensor(8).bitwise_right_shift(torch.tensor(2)) @bm.functional def rshift_5(): # Stochastic value, >> operator return beta() >> 4 @bm.functional def rshift_6(): # Stochastic value, Tensor.bitwise_right_shift return torch.Tensor.bitwise_right_shift(beta(), torch.tensor(8)) @bm.functional def rshift_7(): # Stochastic value, instance bitwise_right_shift return beta().bitwise_right_shift(torch.tensor(16)) @bm.functional def rshift_8(): # Constant values, operator.rshift return operator.rshift(torch.tensor(16), torch.tensor(2)) @bm.functional def rshift_9(): # Stochastic values, operator.rshift return operator.rshift(beta(), torch.tensor(32)) @bm.functional def sub_1(): # Ordinary arithmetic, - operator return torch.tensor(2.0 - 1.0) @bm.functional def sub_2(): # Tensor arithmetic, - operator return torch.tensor(5.0) - torch.tensor(2.0) @bm.functional def sub_3(): # Tensor constants, Tensor.sub. # TODO: Tensor.sub takes an optional third argument with the semantics # sub(a, b, c) --> a - b * c. Test that as well. return torch.Tensor.sub(torch.tensor(6.0), torch.tensor(3.0)) @bm.functional def sub_4(): # Tensor constant, instance add # TODO: Tensor.add takes an optional third argument with the semantics # a.sub(b, c) --> a - b * c. Test that as well. return torch.tensor(8.0).sub(torch.tensor(4.0)) @bm.functional def sub_5(): # Stochastic value, - operator return beta() - 5.0 @bm.functional def sub_6(): # Stochastic value, Tensor.subtract (a synonym) return torch.Tensor.subtract(beta(), torch.tensor(6.0)) @bm.functional def sub_7(): # Stochastic value, instance sub return beta().sub(torch.tensor(7.0)) @bm.functional def sub_8(): # Constant values, operator.sub return operator.sub(torch.tensor(16.0), torch.tensor(8.0)) @bm.functional def sub_9(): # Stochastic values, operator.sub return operator.sub(beta(), torch.tensor(9.0)) @bm.functional def sum_1(): # Constant value, Tensor.sum. return torch.Tensor.sum(torch.tensor([1.0, 1.0, 1.0])) @bm.functional def sum_2(): # Constant value, instance sum return torch.tensor([2.0, 2.0, 2.0]).sum() @bm.functional def sum_3(): # Stochastic value, Tensor.sum return torch.Tensor.sum(torch.tensor([beta(), norm(), 3.0])) @bm.functional def sum_4(): # Stochastic value, instance sum return torch.tensor([beta(), norm(), 4.0]).sum() @bm.functional def xor_1(): # Ordinary arithmetic, ^ operator return torch.tensor(1 ^ 3) @bm.functional def xor_2(): # Tensor arithmetic, ^ operator return torch.tensor(6) ^ torch.tensor(2) @bm.functional def xor_3(): # Tensor constants, Tensor.bitwise_xor. return torch.Tensor.bitwise_xor(torch.tensor(7), torch.tensor(3)) @bm.functional def xor_4(): # Tensor constant, instance bitwise_xor return torch.tensor(7).bitwise_xor(torch.tensor(4)) @bm.functional def xor_5(): # Stochastic value, ^ operator return beta() ^ 2 @bm.functional def xor_6(): # Stochastic value, Tensor.bitwise_xor return torch.Tensor.bitwise_xor(beta(), torch.tensor(4)) @bm.functional def xor_7(): # Stochastic value, instance bitwise_xor return beta().bitwise_xor(torch.tensor(8)) @bm.functional def xor_8(): # Constant values, operator.xor return operator.xor(torch.tensor(15), torch.tensor(8)) @bm.functional def xor_9(): # Stochastic values, operator.xor return operator.xor(beta(), torch.tensor(16)) @bm.functional def numpy_operand(): a = np.array([0.5, 0.25]) return a * beta() class BMGArithmeticTest(unittest.TestCase): def test_bmg_arithmetic_logical_not(self) -> None: self.maxDiff = None # "not" operators are not yet properly supported by the compiler/BMG; # update this test when we get them working. # TODO: Add test cases for not operators on Bernoulli samples. queries = [ not_1(), not_2(), not_3(), not_4(), not_5(), not_6(), not_7(), not_8(), not_9(), ] with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, {}, 1) expected = """ The model uses a 'not' operation unsupported by Bean Machine Graph. The unsupported node was created in function call not_5(). The model uses a 'not' operation unsupported by Bean Machine Graph. The unsupported node was created in function call not_6(). The model uses a 'not' operation unsupported by Bean Machine Graph. The unsupported node was created in function call not_7(). The model uses a 'not' operation unsupported by Bean Machine Graph. The unsupported node was created in function call not_9(). """ observed = str(ex.exception) self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_float(self) -> None: self.maxDiff = None observed = BMGInference().to_dot([to_real_1(), to_real_2()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=2]; N4[label=1]; N5[label=ToReal]; N6[label=1.0]; N7[label=ToMatrix]; N8[label=Query]; N9[label=Query]; N0 -> N1; N1 -> N2; N2 -> N5; N3 -> N7; N4 -> N7; N5 -> N7; N5 -> N9; N6 -> N7; N7 -> N8; } """ self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_log(self) -> None: self.maxDiff = None observed = BMGInference().to_dot( [ log_1(), log_2(), log_3(), log_4(), log_5(), log_6(), log_7(), ], {}, ) expected = """ digraph "graph" { N00[label=0.0]; N01[label=Query]; N02[label=0.6931471824645996]; N03[label=Query]; N04[label=1.0986123085021973]; N05[label=Query]; N06[label="[1.3862943649291992,1.3862943649291992]"]; N07[label=Query]; N08[label=2.0]; N09[label=Beta]; N10[label=Sample]; N11[label=ToPosReal]; N12[label=5.0]; N13[label="+"]; N14[label=Log]; N15[label=Query]; N16[label=6.0]; N17[label="+"]; N18[label=Log]; N19[label=Query]; N20[label=7.0]; N21[label="+"]; N22[label=Log]; N23[label=Query]; N00 -> N01; N02 -> N03; N04 -> N05; N06 -> N07; N08 -> N09; N08 -> N09; N09 -> N10; N10 -> N11; N11 -> N13; N11 -> N17; N11 -> N21; N12 -> N13; N13 -> N14; N14 -> N15; N16 -> N17; N17 -> N18; N18 -> N19; N20 -> N21; N21 -> N22; N22 -> N23; }""" self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_log10(self) -> None: self.maxDiff = None observed = BMGInference().to_dot( [ log10_1(), log10_2(), log10_3(), log10_4(), log10_5(), log10_6(), ], {}, ) expected = """ digraph "graph" { N00[label=1.0]; N01[label=Query]; N02[label=2.0]; N03[label=Beta]; N04[label=Sample]; N05[label=ToPosReal]; N06[label="+"]; N07[label=Log]; N08[label=0.43429448190325176]; N09[label="*"]; N10[label=Query]; N11[label=3.0]; N12[label=Query]; N13[label=4.0]; N14[label=Query]; N15[label=5.0]; N16[label="+"]; N17[label=Log]; N18[label="*"]; N19[label=Query]; N20[label=6.0]; N21[label="+"]; N22[label=Log]; N23[label="*"]; N24[label=Query]; N00 -> N01; N02 -> N03; N02 -> N03; N02 -> N06; N03 -> N04; N04 -> N05; N05 -> N06; N05 -> N16; N05 -> N21; N06 -> N07; N07 -> N09; N08 -> N09; N08 -> N18; N08 -> N23; N09 -> N10; N11 -> N12; N13 -> N14; N15 -> N16; N16 -> N17; N17 -> N18; N18 -> N19; N20 -> N21; N21 -> N22; N22 -> N23; N23 -> N24; } """ self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_log1p(self) -> None: self.maxDiff = None observed = BMGInference().to_dot( [ log1p_1(), log1p_2(), log1p_3(), log1p_4(), log1p_5(), log1p_6(), log1p_7(), log1p_8(), ], {}, ) expected = """ digraph "graph" { N00[label=0.6931471824645996]; N01[label=Query]; N02[label=2.0]; N03[label=Beta]; N04[label=Sample]; N05[label=1.0]; N06[label=ToPosReal]; N07[label="+"]; N08[label="+"]; N09[label=Log]; N10[label=Query]; N11[label=1.3862943649291992]; N12[label=Query]; N13[label=4.0]; N14[label="+"]; N15[label="+"]; N16[label=Log]; N17[label=Query]; N18[label=1.7917594909667969]; N19[label=Query]; N20[label=1.945910096168518]; N21[label=Query]; N22[label=7.0]; N23[label="+"]; N24[label="+"]; N25[label=Log]; N26[label=Query]; N27[label=8.0]; N28[label="+"]; N29[label="+"]; N30[label=Log]; N31[label=Query]; N00 -> N01; N02 -> N03; N02 -> N03; N02 -> N07; N03 -> N04; N04 -> N06; N05 -> N08; N05 -> N15; N05 -> N24; N05 -> N29; N06 -> N07; N06 -> N14; N06 -> N23; N06 -> N28; N07 -> N08; N08 -> N09; N09 -> N10; N11 -> N12; N13 -> N14; N14 -> N15; N15 -> N16; N16 -> N17; N18 -> N19; N20 -> N21; N22 -> N23; N23 -> N24; N24 -> N25; N25 -> N26; N27 -> N28; N28 -> N29; N29 -> N30; N30 -> N31; } """ self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_log2(self) -> None: self.maxDiff = None observed = BMGInference().to_dot( [ log2_1(), log2_2(), log2_3(), log2_4(), log2_5(), log2_6(), ], {}, ) expected = """ digraph "graph" { N00[label=1.0]; N01[label=Query]; N02[label=2.0]; N03[label=Beta]; N04[label=Sample]; N05[label=ToPosReal]; N06[label="+"]; N07[label=Log]; N08[label=1.4426950408889634]; N09[label="*"]; N10[label=Query]; N11[label=3.0]; N12[label=Query]; N13[label=4.0]; N14[label=Query]; N15[label=5.0]; N16[label="+"]; N17[label=Log]; N18[label="*"]; N19[label=Query]; N20[label=6.0]; N21[label="+"]; N22[label=Log]; N23[label="*"]; N24[label=Query]; N00 -> N01; N02 -> N03; N02 -> N03; N02 -> N06; N03 -> N04; N04 -> N05; N05 -> N06; N05 -> N16; N05 -> N21; N06 -> N07; N07 -> N09; N08 -> N09; N08 -> N18; N08 -> N23; N09 -> N10; N11 -> N12; N13 -> N14; N15 -> N16; N16 -> N17; N17 -> N18; N18 -> N19; N20 -> N21; N21 -> N22; N22 -> N23; N23 -> N24; } """ self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_sqrt(self) -> None: self.maxDiff = None observed = BMGInference().to_dot( [ sqrt_1(), sqrt_2(), sqrt_3(), sqrt_4(), sqrt_5(), sqrt_6(), ], {}, ) expected = """ digraph "graph" { N00[label=1.0]; N01[label=Query]; N02[label=2.0]; N03[label=Beta]; N04[label=Sample]; N05[label=ToPosReal]; N06[label="+"]; N07[label=0.5]; N08[label="**"]; N09[label=Query]; N10[label=3.0]; N11[label=Query]; N12[label=4.0]; N13[label=Query]; N14[label=5.0]; N15[label="+"]; N16[label="**"]; N17[label=Query]; N18[label=6.0]; N19[label="+"]; N20[label="**"]; N21[label=Query]; N00 -> N01; N02 -> N03; N02 -> N03; N02 -> N06; N03 -> N04; N04 -> N05; N05 -> N06; N05 -> N15; N05 -> N19; N06 -> N08; N07 -> N08; N07 -> N16; N07 -> N20; N08 -> N09; N10 -> N11; N12 -> N13; N14 -> N15; N15 -> N16; N16 -> N17; N18 -> N19; N19 -> N20; N20 -> N21; } """ self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_pow(self) -> None: self.maxDiff = None observed = BMGInference().to_dot( [ pow_1(), pow_2(), pow_3(), pow_4(), pow_5(), pow_6(), pow_7(), pow_8(), pow_9(), ], {}, ) expected = """ digraph "graph" { N00[label=1.0]; N01[label=Query]; N02[label=4.0]; N03[label=Query]; N04[label=27.0]; N05[label=Query]; N06[label=256.0]; N07[label=Query]; N08[label=2.0]; N09[label=Beta]; N10[label=Sample]; N11[label=5.0]; N12[label="**"]; N13[label=Query]; N14[label=6.0]; N15[label=ToPosReal]; N16[label="**"]; N17[label=Query]; N18[label=7.0]; N19[label="**"]; N20[label=Query]; N21[label=64.0]; N22[label=Query]; N23[label=9.0]; N24[label="**"]; N25[label=Query]; N00 -> N01; N02 -> N03; N04 -> N05; N06 -> N07; N08 -> N09; N08 -> N09; N09 -> N10; N10 -> N12; N10 -> N15; N10 -> N24; N11 -> N12; N12 -> N13; N14 -> N16; N15 -> N16; N15 -> N19; N16 -> N17; N18 -> N19; N19 -> N20; N21 -> N22; N23 -> N24; N24 -> N25; } """ self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_neg(self) -> None: self.maxDiff = None observed = BMGInference().to_dot( [ neg_1(), neg_2(), neg_3(), neg_4(), neg_5(), neg_6(), neg_7(), neg_8(), neg_9(), ], {}, ) expected = """ digraph "graph" { N00[label=-1.0]; N01[label=Query]; N02[label=-2.0]; N03[label=Query]; N04[label=-3.0]; N05[label=Query]; N06[label=-4.0]; N07[label=Query]; N08[label=2.0]; N09[label=Beta]; N10[label=Sample]; N11[label=ToPosReal]; N12[label=5.0]; N13[label="+"]; N14[label="-"]; N15[label=Query]; N16[label=6.0]; N17[label="+"]; N18[label="-"]; N19[label=Query]; N20[label=7.0]; N21[label="+"]; N22[label="-"]; N23[label=Query]; N24[label=-8.0]; N25[label=Query]; N26[label=9.0]; N27[label="+"]; N28[label="-"]; N29[label=Query]; N00 -> N01; N02 -> N03; N04 -> N05; N06 -> N07; N08 -> N09; N08 -> N09; N09 -> N10; N10 -> N11; N11 -> N13; N11 -> N17; N11 -> N21; N11 -> N27; N12 -> N13; N13 -> N14; N14 -> N15; N16 -> N17; N17 -> N18; N18 -> N19; N20 -> N21; N21 -> N22; N22 -> N23; N24 -> N25; N26 -> N27; N27 -> N28; N28 -> N29; } """ self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_add(self) -> None: self.maxDiff = None observed = BMGInference().to_dot( [ add_1(), add_2(), add_3(), add_4(), add_5(), add_6(), add_7(), add_8(), add_9(), ], {}, ) expected = """ digraph "graph" { N00[label=2.0]; N01[label=Query]; N02[label=4.0]; N03[label=Query]; N04[label=6.0]; N05[label=Query]; N06[label=8.0]; N07[label=Query]; N08[label=2.0]; N09[label=Beta]; N10[label=Sample]; N11[label=ToPosReal]; N12[label=5.0]; N13[label="+"]; N14[label=Query]; N15[label=6.0]; N16[label="+"]; N17[label=Query]; N18[label=7.0]; N19[label="+"]; N20[label=Query]; N21[label=16.0]; N22[label=Query]; N23[label=9.0]; N24[label="+"]; N25[label=Query]; N00 -> N01; N02 -> N03; N04 -> N05; N06 -> N07; N08 -> N09; N08 -> N09; N09 -> N10; N10 -> N11; N11 -> N13; N11 -> N16; N11 -> N19; N11 -> N24; N12 -> N13; N13 -> N14; N15 -> N16; N16 -> N17; N18 -> N19; N19 -> N20; N21 -> N22; N23 -> N24; N24 -> N25; } """ self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_and(self) -> None: self.maxDiff = None # & operators are not yet properly supported by the compiler/BMG; # update this test when we get them working. queries = [ and_1(), and_2(), and_3(), and_4(), and_5(), and_6(), and_7(), and_8(), and_9(), ] with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, {}, 1) expected = """ The model uses a 'bitwise and' (&) operation unsupported by Bean Machine Graph. The unsupported node was created in function call and_5(). The model uses a 'bitwise and' (&) operation unsupported by Bean Machine Graph. The unsupported node was created in function call and_6(). The model uses a 'bitwise and' (&) operation unsupported by Bean Machine Graph. The unsupported node was created in function call and_7(). The model uses a 'bitwise and' (&) operation unsupported by Bean Machine Graph. The unsupported node was created in function call and_9(). """ observed = str(ex.exception) self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_div(self) -> None: self.maxDiff = None observed = BMGInference().to_dot( [ div_1(), div_2(), div_3(), div_4(), div_5(), div_6(), div_7(), div_8(), div_9(), ], {}, ) expected = """ digraph "graph" { N00[label=1.0]; N01[label=Query]; N02[label=2.0]; N03[label=Query]; N04[label=3.0]; N05[label=Query]; N06[label=4.0]; N07[label=Query]; N08[label=2.0]; N09[label=Beta]; N10[label=Sample]; N11[label=0.5]; N12[label="*"]; N13[label=Query]; N14[label=0.25]; N15[label="*"]; N16[label=Query]; N17[label=0.125]; N18[label="*"]; N19[label=Query]; N20[label=8.0]; N21[label=Query]; N22[label=0.0625]; N23[label="*"]; N24[label=Query]; N00 -> N01; N02 -> N03; N04 -> N05; N06 -> N07; N08 -> N09; N08 -> N09; N09 -> N10; N10 -> N12; N10 -> N15; N10 -> N18; N10 -> N23; N11 -> N12; N12 -> N13; N14 -> N15; N15 -> N16; N17 -> N18; N18 -> N19; N20 -> N21; N22 -> N23; N23 -> N24; } """ self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_eq(self) -> None: self.maxDiff = None # "==" operators are not yet properly supported by the compiler/BMG; # update this test when we get them working. queries = [ eq_1(), eq_2(), eq_3(), eq_4(), eq_5(), eq_6(), eq_7(), eq_8(), eq_9(), ] with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, {}, 1) expected = """ The model uses an equality (==) operation unsupported by Bean Machine Graph. The unsupported node was created in function call eq_5(). The model uses an equality (==) operation unsupported by Bean Machine Graph. The unsupported node was created in function call eq_6(). The model uses an equality (==) operation unsupported by Bean Machine Graph. The unsupported node was created in function call eq_7(). The model uses an equality (==) operation unsupported by Bean Machine Graph. The unsupported node was created in function call eq_9(). """ observed = str(ex.exception) self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_floordiv(self) -> None: self.skipTest( "Disabling floordiv tests; produces a deprecation warning in torch." ) self.maxDiff = None # "floordiv" operators are not yet properly supported by the compiler/BMG; # update this test when we get them working. queries = [ floordiv_1(), floordiv_2(), floordiv_3(), floordiv_4(), floordiv_5(), floordiv_6(), floordiv_7(), floordiv_8(), floordiv_9(), ] with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, {}, 1) expected = """ The model uses a // operation unsupported by Bean Machine Graph. The unsupported node was created in function call floordiv_5(). The model uses a // operation unsupported by Bean Machine Graph. The unsupported node was created in function call floordiv_6(). The model uses a // operation unsupported by Bean Machine Graph. The unsupported node was created in function call floordiv_7(). The model uses a // operation unsupported by Bean Machine Graph. The unsupported node was created in function call floordiv_9(). """ observed = str(ex.exception) self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_ge(self) -> None: self.maxDiff = None # ">=" operators are not yet properly supported by the compiler/BMG; # update this test when we get them working. queries = [ ge_1(), ge_2(), ge_3(), ge_4(), ge_5(), ge_6(), ge_7(), ge_8(), ge_9(), ] with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, {}, 1) expected = """ The model uses a 'greater than or equal' (>=) operation unsupported by Bean Machine Graph. The unsupported node was created in function call ge_5(). The model uses a 'greater than or equal' (>=) operation unsupported by Bean Machine Graph. The unsupported node was created in function call ge_6(). The model uses a 'greater than or equal' (>=) operation unsupported by Bean Machine Graph. The unsupported node was created in function call ge_7(). The model uses a 'greater than or equal' (>=) operation unsupported by Bean Machine Graph. The unsupported node was created in function call ge_9(). """ observed = str(ex.exception) self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_gt(self) -> None: self.maxDiff = None # ">=" operators are not yet properly supported by the compiler/BMG; # update this test when we get them working. queries = [ gt_1(), gt_2(), gt_3(), gt_4(), gt_5(), gt_6(), gt_7(), gt_8(), gt_9(), ] with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, {}, 1) expected = """ The model uses a 'greater than' (>) operation unsupported by Bean Machine Graph. The unsupported node was created in function call gt_5(). The model uses a 'greater than' (>) operation unsupported by Bean Machine Graph. The unsupported node was created in function call gt_6(). The model uses a 'greater than' (>) operation unsupported by Bean Machine Graph. The unsupported node was created in function call gt_7(). The model uses a 'greater than' (>) operation unsupported by Bean Machine Graph. The unsupported node was created in function call gt_9(). """ observed = str(ex.exception) self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_in(self) -> None: self.maxDiff = None # in and not in operators are not yet properly supported by the compiler/BMG; # update this test when we get them working. queries = [ in_1(), in_2(), in_3(), in_4(), in_5(), not_in_1(), not_in_2(), not_in_3(), ] with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, {}, 1) expected = """ The model uses a 'not in' operation unsupported by Bean Machine Graph. The unsupported node was created in function call not_in_3(). The model uses an 'in' operation unsupported by Bean Machine Graph. The unsupported node was created in function call in_3(). The model uses an 'in' operation unsupported by Bean Machine Graph. The unsupported node was created in function call in_5(). """ observed = str(ex.exception) self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_is(self) -> None: self.maxDiff = None # is and is not operators are not yet properly supported by the compiler/BMG; # update this test when we get them working. queries = [ is_1(), is_2(), is_3(), is_4(), is_not_1(), is_not_2(), is_not_3(), is_not_4(), ] with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, {}, 1) expected = """ The model uses an 'is not' operation unsupported by Bean Machine Graph. The unsupported node was created in function call is_not_2(). The model uses an 'is not' operation unsupported by Bean Machine Graph. The unsupported node was created in function call is_not_4(). The model uses an 'is' operation unsupported by Bean Machine Graph. The unsupported node was created in function call is_2(). The model uses an 'is' operation unsupported by Bean Machine Graph. The unsupported node was created in function call is_4().""" observed = str(ex.exception) self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_inv(self) -> None: self.maxDiff = None # ~ operators are not yet properly supported by the compiler/BMG; # update this test when we get them working. queries = [ inv_1(), inv_2(), inv_3(), inv_4(), inv_5(), inv_6(), inv_7(), inv_8(), inv_9(), ] with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, {}, 1) expected = """ The model uses a 'bitwise invert' (~) operation unsupported by Bean Machine Graph. The unsupported node was created in function call inv_5(). The model uses a 'bitwise invert' (~) operation unsupported by Bean Machine Graph. The unsupported node was created in function call inv_6(). The model uses a 'bitwise invert' (~) operation unsupported by Bean Machine Graph. The unsupported node was created in function call inv_7(). The model uses a 'bitwise invert' (~) operation unsupported by Bean Machine Graph. The unsupported node was created in function call inv_9(). """ observed = str(ex.exception) self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_le(self) -> None: self.maxDiff = None # "<=" operators are not yet properly supported by the compiler/BMG; # update this test when we get them working. queries = [ le_1(), le_2(), le_3(), le_4(), le_5(), le_6(), le_7(), le_8(), le_9(), ] with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, {}, 1) expected = """ The model uses a 'less than or equal' (<=) operation unsupported by Bean Machine Graph. The unsupported node was created in function call le_5(). The model uses a 'less than or equal' (<=) operation unsupported by Bean Machine Graph. The unsupported node was created in function call le_6(). The model uses a 'less than or equal' (<=) operation unsupported by Bean Machine Graph. The unsupported node was created in function call le_7(). The model uses a 'less than or equal' (<=) operation unsupported by Bean Machine Graph. The unsupported node was created in function call le_9(). """ observed = str(ex.exception) self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_lshift(self) -> None: self.maxDiff = None # << operators are not yet properly supported by the compiler/BMG; # update this test when we get them working. queries = [ lshift_1(), lshift_2(), lshift_3(), lshift_4(), lshift_5(), lshift_6(), lshift_7(), lshift_8(), lshift_9(), ] with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, {}, 1) expected = """ The model uses a 'left shift' (<<) operation unsupported by Bean Machine Graph. The unsupported node was created in function call lshift_5(). The model uses a 'left shift' (<<) operation unsupported by Bean Machine Graph. The unsupported node was created in function call lshift_6(). The model uses a 'left shift' (<<) operation unsupported by Bean Machine Graph. The unsupported node was created in function call lshift_7(). The model uses a 'left shift' (<<) operation unsupported by Bean Machine Graph. The unsupported node was created in function call lshift_9(). """ observed = str(ex.exception) self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_lt(self) -> None: self.maxDiff = None # "<" operators are not yet properly supported by the compiler/BMG; # update this test when we get them working. queries = [ lt_1(), lt_2(), lt_3(), lt_4(), lt_5(), lt_6(), lt_7(), lt_8(), lt_9(), ] with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, {}, 1) expected = """ The model uses a 'less than' (<) operation unsupported by Bean Machine Graph. The unsupported node was created in function call lt_5(). The model uses a 'less than' (<) operation unsupported by Bean Machine Graph. The unsupported node was created in function call lt_6(). The model uses a 'less than' (<) operation unsupported by Bean Machine Graph. The unsupported node was created in function call lt_7(). The model uses a 'less than' (<) operation unsupported by Bean Machine Graph. The unsupported node was created in function call lt_9(). """ observed = str(ex.exception) self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_mod(self) -> None: self.maxDiff = None # % operators are not yet properly supported by the compiler/BMG; # update this test when we get them working. queries = [ mod_1(), mod_2(), mod_3(), mod_4(), mod_5(), mod_6(), mod_7(), mod_8(), mod_9(), ] with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, {}, 1) expected = """ The model uses a modulus (%) operation unsupported by Bean Machine Graph. The unsupported node was created in function call mod_5(). The model uses a modulus (%) operation unsupported by Bean Machine Graph. The unsupported node was created in function call mod_6(). The model uses a modulus (%) operation unsupported by Bean Machine Graph. The unsupported node was created in function call mod_7(). The model uses a modulus (%) operation unsupported by Bean Machine Graph. The unsupported node was created in function call mod_9(). """ observed = str(ex.exception) self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_mul(self) -> None: self.maxDiff = None observed = BMGInference().to_dot( [ mul_1(), mul_2(), mul_3(), mul_4(), mul_5(), mul_6(), mul_7(), mul_8(), mul_9(), ], {}, ) expected = """ digraph "graph" { N00[label=1.0]; N01[label=Query]; N02[label=4.0]; N03[label=Query]; N04[label=9.0]; N05[label=Query]; N06[label=16.0]; N07[label=Query]; N08[label=2.0]; N09[label=Beta]; N10[label=Sample]; N11[label=ToPosReal]; N12[label=5.0]; N13[label="*"]; N14[label=Query]; N15[label=6.0]; N16[label="*"]; N17[label=Query]; N18[label=7.0]; N19[label="*"]; N20[label=Query]; N21[label=64.0]; N22[label=Query]; N23[label=9.0]; N24[label="*"]; N25[label=Query]; N00 -> N01; N02 -> N03; N04 -> N05; N06 -> N07; N08 -> N09; N08 -> N09; N09 -> N10; N10 -> N11; N11 -> N13; N11 -> N16; N11 -> N19; N11 -> N24; N12 -> N13; N13 -> N14; N15 -> N16; N16 -> N17; N18 -> N19; N19 -> N20; N21 -> N22; N23 -> N24; N24 -> N25; } """ self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_ne(self) -> None: self.maxDiff = None # "!=" operators are not yet properly supported by the compiler/BMG; # update this test when we get them working. queries = [ ne_1(), ne_2(), ne_3(), ne_4(), ne_5(), ne_6(), ne_7(), ne_8(), ne_9(), ] with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, {}, 1) expected = """ The model uses an inequality (!=) operation unsupported by Bean Machine Graph. The unsupported node was created in function call ne_5(). The model uses an inequality (!=) operation unsupported by Bean Machine Graph. The unsupported node was created in function call ne_6(). The model uses an inequality (!=) operation unsupported by Bean Machine Graph. The unsupported node was created in function call ne_7(). The model uses an inequality (!=) operation unsupported by Bean Machine Graph. The unsupported node was created in function call ne_9(). """ observed = str(ex.exception) self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_or(self) -> None: self.maxDiff = None # & operators are not yet properly supported by the compiler/BMG; # update this test when we get them working. queries = [ or_1(), or_2(), or_3(), or_4(), or_5(), or_6(), or_7(), or_8(), or_9(), ] with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, {}, 1) expected = """ The model uses a 'bitwise or' (|) operation unsupported by Bean Machine Graph. The unsupported node was created in function call or_5(). The model uses a 'bitwise or' (|) operation unsupported by Bean Machine Graph. The unsupported node was created in function call or_6(). The model uses a 'bitwise or' (|) operation unsupported by Bean Machine Graph. The unsupported node was created in function call or_7(). The model uses a 'bitwise or' (|) operation unsupported by Bean Machine Graph. The unsupported node was created in function call or_9(). """ observed = str(ex.exception) self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_rshift(self) -> None: self.maxDiff = None # >> operators are not yet properly supported by the compiler/BMG; # update this test when we get them working. queries = [ rshift_1(), rshift_2(), rshift_3(), rshift_4(), rshift_5(), rshift_6(), rshift_7(), rshift_8(), rshift_9(), ] with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, {}, 1) expected = """ The model uses a 'right shift' (>>) operation unsupported by Bean Machine Graph. The unsupported node was created in function call rshift_5(). The model uses a 'right shift' (>>) operation unsupported by Bean Machine Graph. The unsupported node was created in function call rshift_6(). The model uses a 'right shift' (>>) operation unsupported by Bean Machine Graph. The unsupported node was created in function call rshift_7(). The model uses a 'right shift' (>>) operation unsupported by Bean Machine Graph. The unsupported node was created in function call rshift_9(). """ observed = str(ex.exception) self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_pos(self) -> None: self.maxDiff = None observed = BMGInference().to_dot( [ pos_1(), pos_2(), pos_5(), pos_8(), pos_9(), ], {}, ) expected = """ digraph "graph" { N00[label=1.0]; N01[label=Query]; N02[label=2.0]; N03[label=Query]; N04[label=2.0]; N05[label=Beta]; N06[label=Sample]; N07[label=ToPosReal]; N08[label=5.0]; N09[label="+"]; N10[label=Query]; N11[label=8.0]; N12[label=Query]; N13[label=9.0]; N14[label="+"]; N15[label=Query]; N00 -> N01; N02 -> N03; N04 -> N05; N04 -> N05; N05 -> N06; N06 -> N07; N07 -> N09; N07 -> N14; N08 -> N09; N09 -> N10; N11 -> N12; N13 -> N14; N14 -> N15; } """ self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_sub(self) -> None: self.maxDiff = None observed = BMGInference().to_dot( [ sub_1(), sub_2(), sub_3(), sub_4(), sub_5(), sub_6(), sub_7(), sub_8(), sub_9(), ], {}, ) expected = """ digraph "graph" { N00[label=1.0]; N01[label=Query]; N02[label=3.0]; N03[label=Query]; N04[label=Query]; N05[label=4.0]; N06[label=Query]; N07[label=2.0]; N08[label=Beta]; N09[label=Sample]; N10[label=ToReal]; N11[label=-5.0]; N12[label="+"]; N13[label=Query]; N14[label=-6.0]; N15[label="+"]; N16[label=Query]; N17[label=-7.0]; N18[label="+"]; N19[label=Query]; N20[label=8.0]; N21[label=Query]; N22[label=-9.0]; N23[label="+"]; N24[label=Query]; N00 -> N01; N02 -> N03; N02 -> N04; N05 -> N06; N07 -> N08; N07 -> N08; N08 -> N09; N09 -> N10; N10 -> N12; N10 -> N15; N10 -> N18; N10 -> N23; N11 -> N12; N12 -> N13; N14 -> N15; N15 -> N16; N17 -> N18; N18 -> N19; N20 -> N21; N22 -> N23; N23 -> N24; } """ self.assertEqual(expected.strip(), observed.strip()) def test_bmg_arithmetic_sum(self) -> None: self.maxDiff = None queries = [ sum_1(), sum_2(), sum_3(), sum_4(), ] expected = """ digraph "graph" { N00[label=3.0]; N01[label=Query]; N02[label=6.0]; N03[label=Query]; N04[label=2.0]; N05[label=Beta]; N06[label=Sample]; N07[label=0.0]; N08[label=1.0]; N09[label=Normal]; N10[label=Sample]; N11[label=3]; N12[label=1]; N13[label=ToReal]; N14[label=3.0]; N15[label=ToMatrix]; N16[label=MatrixSum]; N17[label=Query]; N18[label=4.0]; N19[label=ToMatrix]; N20[label=MatrixSum]; N21[label=Query]; N00 -> N01; N02 -> N03; N04 -> N05; N04 -> N05; N05 -> N06; N06 -> N13; N07 -> N09; N08 -> N09; N09 -> N10; N10 -> N15; N10 -> N19; N11 -> N15; N11 -> N19; N12 -> N15; N12 -> N19; N13 -> N15; N13 -> N19; N14 -> N15; N15 -> N16; N16 -> N17; N18 -> N19; N19 -> N20; N20 -> N21; } """ observed = BMGInference().to_dot(queries, {}) self.assertEqual(expected.strip(), observed.strip()) def test_bmg_arithmetic_xor(self) -> None: self.maxDiff = None # ^ operators are not yet properly supported by the compiler/BMG; # update this test when we get them working. queries = [ xor_1(), xor_2(), xor_3(), xor_4(), xor_5(), xor_6(), xor_7(), xor_8(), xor_9(), ] with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, {}, 1) expected = """ The model uses a 'bitwise xor' (^) operation unsupported by Bean Machine Graph. The unsupported node was created in function call xor_5(). The model uses a 'bitwise xor' (^) operation unsupported by Bean Machine Graph. The unsupported node was created in function call xor_6(). The model uses a 'bitwise xor' (^) operation unsupported by Bean Machine Graph. The unsupported node was created in function call xor_7(). The model uses a 'bitwise xor' (^) operation unsupported by Bean Machine Graph. The unsupported node was created in function call xor_9(). """ observed = str(ex.exception) self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_exp(self) -> None: self.maxDiff = None observed = BMGInference().to_dot( [ exp_1(), exp_2(), exp_3(), exp_4(), exp_5(), exp_6(), exp_7(), ], {}, ) expected = """ digraph "graph" { N00[label=2.7182817459106445]; N01[label=Query]; N02[label=7.389056205749512]; N03[label=Query]; N04[label=20.08553695678711]; N05[label=Query]; N06[label="[54.598148345947266,54.598148345947266]"]; N07[label=Query]; N08[label=2.0]; N09[label=Beta]; N10[label=Sample]; N11[label=ToPosReal]; N12[label=5.0]; N13[label="+"]; N14[label=Exp]; N15[label=Query]; N16[label=6.0]; N17[label="+"]; N18[label=Exp]; N19[label=Query]; N20[label=7.0]; N21[label="+"]; N22[label=Exp]; N23[label=Query]; N00 -> N01; N02 -> N03; N04 -> N05; N06 -> N07; N08 -> N09; N08 -> N09; N09 -> N10; N10 -> N11; N11 -> N13; N11 -> N17; N11 -> N21; N12 -> N13; N13 -> N14; N14 -> N15; N16 -> N17; N17 -> N18; N18 -> N19; N20 -> N21; N21 -> N22; N22 -> N23; } """ self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_exp2(self) -> None: self.maxDiff = None observed = BMGInference().to_dot( [ exp2_1(), exp2_2(), exp2_3(), exp2_4(), exp2_5(), exp2_6(), exp2_7(), exp2_8(), ], {}, ) expected = """ digraph "graph" { N00[label=2.0]; N01[label=Query]; N02[label=2.0]; N03[label=Beta]; N04[label=Sample]; N05[label=ToPosReal]; N06[label="+"]; N07[label="**"]; N08[label=Query]; N09[label=8.0]; N10[label=Query]; N11[label=4.0]; N12[label="+"]; N13[label="**"]; N14[label=Query]; N15[label=32.0]; N16[label=Query]; N17[label=64.0]; N18[label=Query]; N19[label=7.0]; N20[label="+"]; N21[label="**"]; N22[label=Query]; N23[label=8.0]; N24[label="+"]; N25[label="**"]; N26[label=Query]; N00 -> N01; N02 -> N03; N02 -> N03; N02 -> N06; N02 -> N07; N02 -> N13; N02 -> N21; N02 -> N25; N03 -> N04; N04 -> N05; N05 -> N06; N05 -> N12; N05 -> N20; N05 -> N24; N06 -> N07; N07 -> N08; N09 -> N10; N11 -> N12; N12 -> N13; N13 -> N14; N15 -> N16; N17 -> N18; N19 -> N20; N20 -> N21; N21 -> N22; N23 -> N24; N24 -> N25; N25 -> N26; } """ self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_expm1(self) -> None: self.maxDiff = None observed = BMGInference().to_dot([expm1_prob()], {}) expected = """ digraph "graph" { N0[label=2.0]; N1[label=Beta]; N2[label=Sample]; N3[label=ToPosReal]; N4[label=ExpM1]; N5[label=Query]; N0 -> N1; N0 -> N1; N1 -> N2; N2 -> N3; N3 -> N4; N4 -> N5; }""" self.assertEqual(observed.strip(), expected.strip()) observed = BMGInference().to_dot([expm1_real()], {}) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=ExpM1]; N5[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N4; N4 -> N5; }""" self.assertEqual(observed.strip(), expected.strip()) observed = BMGInference().to_dot([expm1_negreal()], {}) expected = """ digraph "graph" { N0[label=1.0]; N1[label=HalfCauchy]; N2[label=Sample]; N3[label="-"]; N4[label=ExpM1]; N5[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; N3 -> N4; N4 -> N5; }""" self.assertEqual(observed.strip(), expected.strip()) def test_bmg_arithmetic_logistic(self) -> None: self.maxDiff = None observed = BMGInference().to_dot([logistic_prob()], {}) expected = """ digraph "graph" { N0[label=2.0]; N1[label=Beta]; N2[label=Sample]; N3[label=ToReal]; N4[label=Logistic]; N5[label=Query]; N0 -> N1; N0 -> N1; N1 -> N2; N2 -> N3; N3 -> N4; N4 -> N5; }""" self.assertEqual(observed.strip(), expected.strip()) observed = BMGInference().to_dot([logistic_real()], {}) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=Logistic]; N5[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N4; N4 -> N5; }""" self.assertEqual(observed.strip(), expected.strip()) observed = BMGInference().to_dot([logistic_negreal()], {}) expected = """ digraph "graph" { N0[label=1.0]; N1[label=HalfCauchy]; N2[label=Sample]; N3[label="-"]; N4[label=ToReal]; N5[label=Logistic]; N6[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; N3 -> N4; N4 -> N5; N5 -> N6; }""" self.assertEqual(observed.strip(), expected.strip()) def test_bmg_misc_arithmetic(self) -> None: self.maxDiff = None observed = BMGInference().to_dot([stochastic_arithmetic()], {}) expected = """ digraph "graph" { N00[label=0.5]; N01[label=Bernoulli]; N02[label=Sample]; N03[label=0.6000000238418579]; N04[label=Bernoulli]; N05[label=Sample]; N06[label=-0.010050326585769653]; N07[label=-4.605170249938965]; N08[label=0.0]; N09[label=if]; N10[label=if]; N11[label="+"]; N12[label=Exp]; N13[label=complement]; N14[label=Bernoulli]; N15[label=Sample]; N16[label=Query]; N00 -> N01; N01 -> N02; N02 -> N09; N03 -> N04; N04 -> N05; N05 -> N10; N06 -> N11; N07 -> N09; N07 -> N10; N08 -> N09; N08 -> N10; N09 -> N11; N10 -> N11; N11 -> N12; N12 -> N13; N13 -> N14; N14 -> N15; N15 -> N16; } """ self.assertEqual(expected.strip(), observed.strip()) def test_bmg_neg_of_neg(self) -> None: # This test shows that we treat torch.neg the same as the unary negation # operator when generating a graph. Note that since this this produces # a neg-of-neg situation, the optimizer then removes both of them. self.maxDiff = None observed = BMGInference().to_dot([neg_of_neg()], {}) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=Normal]; N5[label=Sample]; N6[label=Query]; N0 -> N2; N1 -> N2; N1 -> N4; N2 -> N3; N3 -> N4; N4 -> N5; N5 -> N6; } """ self.assertEqual(observed.strip(), expected.strip()) def test_bmg_subtractions(self) -> None: # TODO: Notice in this code generation we end up with # the path: # # Beta -> Sample -> ToPosReal -> Negate -> ToReal -> MultiAdd # # We could optimize this path to # # Beta -> Sample -> ToReal -> Negate -> MultiAdd self.maxDiff = None observed = BMGInference().to_dot([subtractions()], {}) expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=2.0]; N05[label=Beta]; N06[label=Sample]; N07[label=HalfCauchy]; N08[label=Sample]; N09[label=ToPosReal]; N10[label="-"]; N11[label=ToReal]; N12[label=ToReal]; N13[label="-"]; N14[label=ToReal]; N15[label="+"]; N16[label="-"]; N17[label="+"]; N18[label=Query]; N00 -> N02; N01 -> N02; N01 -> N07; N02 -> N03; N03 -> N17; N04 -> N05; N04 -> N05; N05 -> N06; N06 -> N09; N06 -> N12; N07 -> N08; N08 -> N13; N09 -> N10; N10 -> N11; N11 -> N17; N12 -> N15; N13 -> N14; N14 -> N15; N15 -> N16; N16 -> N17; N17 -> N18; } """ self.assertEqual(expected.strip(), observed.strip()) def test_unsupported_operands(self) -> None: self.maxDiff = None with self.assertRaises(ValueError) as ex: BMGInference().infer([unsupported_add()], {}, 1) expected = ( "A constant value used as an operand of a stochastic " + "operation is required to be bool, int, float or tensor. " + "This model uses a value of type str." ) observed = str(ex.exception) self.assertEqual(expected.strip(), observed.strip()) def test_tensor_mutations_augmented_assignment(self) -> None: self.maxDiff = None # See notes in mutating_assignments() for details observed = BMGInference().to_dot([mutating_assignments()], {}) expected = """ digraph "graph" { N0[label=2.0]; N1[label=Beta]; N2[label=Sample]; N3[label=ToPosReal]; N4[label=3.0]; N5[label="*"]; N6[label=7.0]; N7[label="+"]; N8[label=Query]; N0 -> N1; N0 -> N1; N1 -> N2; N2 -> N3; N3 -> N5; N4 -> N5; N5 -> N7; N6 -> N7; N7 -> N8; } """ self.assertEqual(expected.strip(), observed.strip()) def test_numpy_operand(self) -> None: self.maxDiff = None observed = BMGInference().to_dot([numpy_operand()], {}) expected = """ digraph "graph" { N0[label=2.0]; N1[label=Beta]; N2[label=Sample]; N3[label="[0.5,0.25]"]; N4[label=MatrixScale]; N5[label=Query]; N0 -> N1; N0 -> N1; N1 -> N2; N2 -> N4; N3 -> N4; N4 -> N5; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/bmg_arithmetic_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.gen_bmg_graph import to_bmg_graph from beanmachine.ppl.compiler.gen_dot import to_dot from beanmachine.ppl.model.rv_identifier import RVIdentifier from torch import Size def _rv_id() -> RVIdentifier: return RVIdentifier(lambda a, b: a, (1, 1)) class FixMatrixOpTest(unittest.TestCase): def test_fix_matrix_addition(self) -> None: self.maxDiff = None bmg = BMGraphBuilder() zeros = bmg.add_real_matrix(torch.zeros(2)) ones = bmg.add_pos_real_matrix(torch.ones(2)) tensor_elements = [] for index in range(0, 2): index_node = bmg.add_natural(index) index_mu = bmg.add_vector_index(zeros, index_node) index_sigma = bmg.add_vector_index(ones, index_node) normal = bmg.add_normal(index_mu, index_sigma) sample = bmg.add_sample(normal) tensor_elements.append(sample) matrix = bmg.add_tensor(Size([2]), *tensor_elements) exp = bmg.add_matrix_exp(matrix) mult = bmg.add_elementwise_multiplication(matrix, matrix) add = bmg.add_matrix_addition(exp, mult) bmg.add_query(add, _rv_id()) observed = to_dot(bmg, after_transform=False) expectation = """ digraph "graph" { N00[label="[0.0,0.0]"]; N01[label=0]; N02[label=index]; N03[label="[1.0,1.0]"]; N04[label=index]; N05[label=Normal]; N06[label=Sample]; N07[label=1]; N08[label=index]; N09[label=index]; N10[label=Normal]; N11[label=Sample]; N12[label=Tensor]; N13[label=MatrixExp]; N14[label=ElementwiseMult]; N15[label=MatrixAdd]; N16[label=Query]; N00 -> N02[label=left]; N00 -> N08[label=left]; N01 -> N02[label=right]; N01 -> N04[label=right]; N02 -> N05[label=mu]; N03 -> N04[label=left]; N03 -> N09[label=left]; N04 -> N05[label=sigma]; N05 -> N06[label=operand]; N06 -> N12[label=left]; N07 -> N08[label=right]; N07 -> N09[label=right]; N08 -> N10[label=mu]; N09 -> N10[label=sigma]; N10 -> N11[label=operand]; N11 -> N12[label=right]; N12 -> N13[label=operand]; N12 -> N14[label=left]; N12 -> N14[label=right]; N13 -> N15[label=left]; N14 -> N15[label=right]; N15 -> N16[label=operator]; } """ self.assertEqual(expectation.strip(), observed.strip()) observed = to_dot(bmg, after_transform=True) expectation = """ digraph "graph" { N00[label="[0.0,0.0]"]; N01[label=0]; N02[label=index]; N03[label="[1.0,1.0]"]; N04[label=index]; N05[label=Normal]; N06[label=Sample]; N07[label=1]; N08[label=index]; N09[label=index]; N10[label=Normal]; N11[label=Sample]; N12[label=2]; N13[label=ToMatrix]; N14[label=MatrixExp]; N15[label=ToRealMatrix]; N16[label=ElementwiseMult]; N17[label=MatrixAdd]; N18[label=Query]; N00 -> N02[label=left]; N00 -> N08[label=left]; N01 -> N02[label=right]; N01 -> N04[label=right]; N02 -> N05[label=mu]; N03 -> N04[label=left]; N03 -> N09[label=left]; N04 -> N05[label=sigma]; N05 -> N06[label=operand]; N06 -> N13[label=0]; N07 -> N08[label=right]; N07 -> N09[label=right]; N07 -> N13[label=columns]; N08 -> N10[label=mu]; N09 -> N10[label=sigma]; N10 -> N11[label=operand]; N11 -> N13[label=1]; N12 -> N13[label=rows]; N13 -> N14[label=operand]; N13 -> N16[label=left]; N13 -> N16[label=right]; N14 -> N15[label=operand]; N15 -> N17[label=left]; N16 -> N17[label=right]; N17 -> N18[label=operator]; } """ self.assertEqual(expectation.strip(), observed.strip()) generated_graph = to_bmg_graph(bmg) observed = generated_graph.graph.to_dot() expectation = """ digraph "graph" { N0[label="matrix"]; N1[label="0"]; N2[label="Index"]; N3[label="matrix"]; N4[label="Index"]; N5[label="Normal"]; N6[label="~"]; N7[label="1"]; N8[label="Index"]; N9[label="Index"]; N10[label="Normal"]; N11[label="~"]; N12[label="2"]; N13[label="ToMatrix"]; N14[label="MatrixExp"]; N15[label="ToReal"]; N16[label="ElementwiseMultiply"]; N17[label="MatrixAdd"]; N0 -> N2; N0 -> N8; N1 -> N2; N1 -> N4; N2 -> N5; N3 -> N4; N3 -> N9; N4 -> N5; N5 -> N6; N6 -> N13; N7 -> N8; N7 -> N9; N7 -> N13; N8 -> N10; N9 -> N10; N10 -> N11; N11 -> N13; N12 -> N13; N13 -> N14; N13 -> N16; N13 -> N16; N14 -> N15; N15 -> N17; N16 -> N17; Q0[label="Query"]; N17 -> Q0; } """ self.assertEqual(expectation.strip(), observed.strip()) def test_fix_elementwise_multiply(self) -> None: self.maxDiff = None bmg = BMGraphBuilder() zeros = bmg.add_real_matrix(torch.zeros(2)) ones = bmg.add_pos_real_matrix(torch.ones(2)) tensor_elements = [] for index in range(0, 2): index_node = bmg.add_natural(index) index_mu = bmg.add_vector_index(zeros, index_node) index_sigma = bmg.add_vector_index(ones, index_node) normal = bmg.add_normal(index_mu, index_sigma) sample = bmg.add_sample(normal) tensor_elements.append(sample) matrix = bmg.add_tensor(Size([2]), *tensor_elements) exp = bmg.add_matrix_exp(matrix) add = bmg.add_matrix_addition(matrix, matrix) mult = bmg.add_elementwise_multiplication(exp, add) sum = bmg.add_matrix_sum(mult) bmg.add_query(sum, _rv_id()) observed = to_dot(bmg, after_transform=False) expectation = """ digraph "graph" { N00[label="[0.0,0.0]"]; N01[label=0]; N02[label=index]; N03[label="[1.0,1.0]"]; N04[label=index]; N05[label=Normal]; N06[label=Sample]; N07[label=1]; N08[label=index]; N09[label=index]; N10[label=Normal]; N11[label=Sample]; N12[label=Tensor]; N13[label=MatrixExp]; N14[label=MatrixAdd]; N15[label=ElementwiseMult]; N16[label=MatrixSum]; N17[label=Query]; N00 -> N02[label=left]; N00 -> N08[label=left]; N01 -> N02[label=right]; N01 -> N04[label=right]; N02 -> N05[label=mu]; N03 -> N04[label=left]; N03 -> N09[label=left]; N04 -> N05[label=sigma]; N05 -> N06[label=operand]; N06 -> N12[label=left]; N07 -> N08[label=right]; N07 -> N09[label=right]; N08 -> N10[label=mu]; N09 -> N10[label=sigma]; N10 -> N11[label=operand]; N11 -> N12[label=right]; N12 -> N13[label=operand]; N12 -> N14[label=left]; N12 -> N14[label=right]; N13 -> N15[label=left]; N14 -> N15[label=right]; N15 -> N16[label=operand]; N16 -> N17[label=operator]; } """ self.assertEqual(expectation.strip(), observed.strip()) observed = to_dot(bmg, after_transform=True) expectation = """ digraph "graph" { N00[label="[0.0,0.0]"]; N01[label=0]; N02[label=index]; N03[label="[1.0,1.0]"]; N04[label=index]; N05[label=Normal]; N06[label=Sample]; N07[label=1]; N08[label=index]; N09[label=index]; N10[label=Normal]; N11[label=Sample]; N12[label=2]; N13[label=ToMatrix]; N14[label=MatrixExp]; N15[label=ToRealMatrix]; N16[label=MatrixAdd]; N17[label=ElementwiseMult]; N18[label=MatrixSum]; N19[label=Query]; N00 -> N02[label=left]; N00 -> N08[label=left]; N01 -> N02[label=right]; N01 -> N04[label=right]; N02 -> N05[label=mu]; N03 -> N04[label=left]; N03 -> N09[label=left]; N04 -> N05[label=sigma]; N05 -> N06[label=operand]; N06 -> N13[label=0]; N07 -> N08[label=right]; N07 -> N09[label=right]; N07 -> N13[label=columns]; N08 -> N10[label=mu]; N09 -> N10[label=sigma]; N10 -> N11[label=operand]; N11 -> N13[label=1]; N12 -> N13[label=rows]; N13 -> N14[label=operand]; N13 -> N16[label=left]; N13 -> N16[label=right]; N14 -> N15[label=operand]; N15 -> N17[label=left]; N16 -> N17[label=right]; N17 -> N18[label=operand]; N18 -> N19[label=operator]; } """ self.assertEqual(expectation.strip(), observed.strip()) generated_graph = to_bmg_graph(bmg) observed = generated_graph.graph.to_dot() expectation = """ digraph "graph" { N0[label="matrix"]; N1[label="0"]; N2[label="Index"]; N3[label="matrix"]; N4[label="Index"]; N5[label="Normal"]; N6[label="~"]; N7[label="1"]; N8[label="Index"]; N9[label="Index"]; N10[label="Normal"]; N11[label="~"]; N12[label="2"]; N13[label="ToMatrix"]; N14[label="MatrixExp"]; N15[label="ToReal"]; N16[label="MatrixAdd"]; N17[label="ElementwiseMultiply"]; N18[label="MatrixSum"]; N0 -> N2; N0 -> N8; N1 -> N2; N1 -> N4; N2 -> N5; N3 -> N4; N3 -> N9; N4 -> N5; N5 -> N6; N6 -> N13; N7 -> N8; N7 -> N9; N7 -> N13; N8 -> N10; N9 -> N10; N10 -> N11; N11 -> N13; N12 -> N13; N13 -> N14; N13 -> N16; N13 -> N16; N14 -> N15; N15 -> N17; N16 -> N17; N17 -> N18; Q0[label="Query"]; N18 -> Q0; } """ self.assertEqual(expectation.strip(), observed.strip()) def test_fix_matrix_sum(self) -> None: self.maxDiff = None bmg = BMGraphBuilder() probs = bmg.add_real_matrix(torch.tensor([[0.75, 0.25], [0.125, 0.875]])) tensor_elements = [] for row in range(0, 2): row_node = bmg.add_natural(row) row_prob = bmg.add_column_index(probs, row_node) for column in range(0, 2): col_index = bmg.add_natural(column) prob = bmg.add_vector_index(row_prob, col_index) bernoulli = bmg.add_bernoulli(prob) sample = bmg.add_sample(bernoulli) tensor_elements.append(sample) matrix = bmg.add_tensor(Size([2, 2]), *tensor_elements) sum = bmg.add_matrix_sum(matrix) bmg.add_query(sum, _rv_id()) observed_beanstalk = to_dot(bmg, after_transform=True) expected = """ digraph "graph" { N00[label="[[0.75,0.25],\\\\n[0.125,0.875]]"]; N01[label=0]; N02[label=ColumnIndex]; N03[label=index]; N04[label=ToProb]; N05[label=Bernoulli]; N06[label=Sample]; N07[label=1]; N08[label=index]; N09[label=ToProb]; N10[label=Bernoulli]; N11[label=Sample]; N12[label=ColumnIndex]; N13[label=index]; N14[label=ToProb]; N15[label=Bernoulli]; N16[label=Sample]; N17[label=index]; N18[label=ToProb]; N19[label=Bernoulli]; N20[label=Sample]; N21[label=2]; N22[label=ToMatrix]; N23[label=ToRealMatrix]; N24[label=MatrixSum]; N25[label=Query]; N00 -> N02[label=left]; N00 -> N12[label=left]; N01 -> N02[label=right]; N01 -> N03[label=right]; N01 -> N13[label=right]; N02 -> N03[label=left]; N02 -> N08[label=left]; N03 -> N04[label=operand]; N04 -> N05[label=probability]; N05 -> N06[label=operand]; N06 -> N22[label=0]; N07 -> N08[label=right]; N07 -> N12[label=right]; N07 -> N17[label=right]; N08 -> N09[label=operand]; N09 -> N10[label=probability]; N10 -> N11[label=operand]; N11 -> N22[label=1]; N12 -> N13[label=left]; N12 -> N17[label=left]; N13 -> N14[label=operand]; N14 -> N15[label=probability]; N15 -> N16[label=operand]; N16 -> N22[label=2]; N17 -> N18[label=operand]; N18 -> N19[label=probability]; N19 -> N20[label=operand]; N20 -> N22[label=3]; N21 -> N22[label=columns]; N21 -> N22[label=rows]; N22 -> N23[label=operand]; N23 -> N24[label=operand]; N24 -> N25[label=operator]; } """ self.assertEqual(observed_beanstalk.strip(), expected.strip()) generated_graph = to_bmg_graph(bmg) observed_bmg = generated_graph.graph.to_dot() expectation = """ digraph "graph" { N0[label="matrix"]; N1[label="0"]; N2[label="ColumnIndex"]; N3[label="Index"]; N4[label="ToProb"]; N5[label="Bernoulli"]; N6[label="~"]; N7[label="1"]; N8[label="Index"]; N9[label="ToProb"]; N10[label="Bernoulli"]; N11[label="~"]; N12[label="ColumnIndex"]; N13[label="Index"]; N14[label="ToProb"]; N15[label="Bernoulli"]; N16[label="~"]; N17[label="Index"]; N18[label="ToProb"]; N19[label="Bernoulli"]; N20[label="~"]; N21[label="2"]; N22[label="ToMatrix"]; N23[label="ToReal"]; N24[label="MatrixSum"]; N0 -> N2; N0 -> N12; N1 -> N2; N1 -> N3; N1 -> N13; N2 -> N3; N2 -> N8; N3 -> N4; N4 -> N5; N5 -> N6; N6 -> N22; N7 -> N8; N7 -> N12; N7 -> N17; N8 -> N9; N9 -> N10; N10 -> N11; N11 -> N22; N12 -> N13; N12 -> N17; N13 -> N14; N14 -> N15; N15 -> N16; N16 -> N22; N17 -> N18; N18 -> N19; N19 -> N20; N20 -> N22; N21 -> N22; N21 -> N22; N22 -> N23; N23 -> N24; Q0[label="Query"]; N24 -> Q0; } """ self.assertEqual(expectation.strip(), observed_bmg.strip()) def test_fix_matrix_exp_log_phi(self) -> None: self.maxDiff = None bmg = BMGraphBuilder() probs = bmg.add_real_matrix(torch.tensor([[0.75, 0.25], [0.125, 0.875]])) tensor_elements = [] for row in range(0, 2): row_node = bmg.add_natural(row) row_prob = bmg.add_column_index(probs, row_node) for column in range(0, 2): col_index = bmg.add_natural(column) prob = bmg.add_vector_index(row_prob, col_index) bernoulli = bmg.add_bernoulli(prob) sample = bmg.add_sample(bernoulli) tensor_elements.append(sample) matrix = bmg.add_tensor(Size([2, 2]), *tensor_elements) me = bmg.add_matrix_exp(matrix) ml = bmg.add_matrix_log(matrix) mp = bmg.add_matrix_phi(matrix) bmg.add_query(me, _rv_id()) bmg.add_query(ml, _rv_id()) bmg.add_query(mp, _rv_id()) observed_beanstalk = to_dot(bmg, after_transform=True) expectation = """ digraph "graph" { N00[label="[[0.75,0.25],\\\\n[0.125,0.875]]"]; N01[label=0]; N02[label=ColumnIndex]; N03[label=index]; N04[label=ToProb]; N05[label=Bernoulli]; N06[label=Sample]; N07[label=1]; N08[label=index]; N09[label=ToProb]; N10[label=Bernoulli]; N11[label=Sample]; N12[label=ColumnIndex]; N13[label=index]; N14[label=ToProb]; N15[label=Bernoulli]; N16[label=Sample]; N17[label=index]; N18[label=ToProb]; N19[label=Bernoulli]; N20[label=Sample]; N21[label=2]; N22[label=ToMatrix]; N23[label=ToRealMatrix]; N24[label=MatrixExp]; N25[label=Query]; N26[label=ToPosRealMatrix]; N27[label=MatrixLog]; N28[label=Query]; N29[label=MatrixPhi]; N30[label=Query]; N00 -> N02[label=left]; N00 -> N12[label=left]; N01 -> N02[label=right]; N01 -> N03[label=right]; N01 -> N13[label=right]; N02 -> N03[label=left]; N02 -> N08[label=left]; N03 -> N04[label=operand]; N04 -> N05[label=probability]; N05 -> N06[label=operand]; N06 -> N22[label=0]; N07 -> N08[label=right]; N07 -> N12[label=right]; N07 -> N17[label=right]; N08 -> N09[label=operand]; N09 -> N10[label=probability]; N10 -> N11[label=operand]; N11 -> N22[label=1]; N12 -> N13[label=left]; N12 -> N17[label=left]; N13 -> N14[label=operand]; N14 -> N15[label=probability]; N15 -> N16[label=operand]; N16 -> N22[label=2]; N17 -> N18[label=operand]; N18 -> N19[label=probability]; N19 -> N20[label=operand]; N20 -> N22[label=3]; N21 -> N22[label=columns]; N21 -> N22[label=rows]; N22 -> N23[label=operand]; N22 -> N26[label=operand]; N23 -> N24[label=operand]; N23 -> N29[label=operand]; N24 -> N25[label=operator]; N26 -> N27[label=operand]; N27 -> N28[label=operator]; N29 -> N30[label=operator]; } """ self.assertEqual(expectation.strip(), observed_beanstalk.strip()) generated_graph = to_bmg_graph(bmg) observed_bmg = generated_graph.graph.to_dot() expectation = """ digraph "graph" { N0[label="matrix"]; N1[label="0"]; N2[label="ColumnIndex"]; N3[label="Index"]; N4[label="ToProb"]; N5[label="Bernoulli"]; N6[label="~"]; N7[label="1"]; N8[label="Index"]; N9[label="ToProb"]; N10[label="Bernoulli"]; N11[label="~"]; N12[label="ColumnIndex"]; N13[label="Index"]; N14[label="ToProb"]; N15[label="Bernoulli"]; N16[label="~"]; N17[label="Index"]; N18[label="ToProb"]; N19[label="Bernoulli"]; N20[label="~"]; N21[label="2"]; N22[label="ToMatrix"]; N23[label="ToReal"]; N24[label="MatrixExp"]; N25[label="ToPosReal"]; N26[label="MatrixLog"]; N27[label="MatrixPhi"]; N0 -> N2; N0 -> N12; N1 -> N2; N1 -> N3; N1 -> N13; N2 -> N3; N2 -> N8; N3 -> N4; N4 -> N5; N5 -> N6; N6 -> N22; N7 -> N8; N7 -> N12; N7 -> N17; N8 -> N9; N9 -> N10; N10 -> N11; N11 -> N22; N12 -> N13; N12 -> N17; N13 -> N14; N14 -> N15; N15 -> N16; N16 -> N22; N17 -> N18; N18 -> N19; N19 -> N20; N20 -> N22; N21 -> N22; N21 -> N22; N22 -> N23; N22 -> N25; N23 -> N24; N23 -> N27; N25 -> N26; Q0[label="Query"]; N24 -> Q0; Q1[label="Query"]; N26 -> Q1; Q2[label="Query"]; N27 -> Q2; } """ self.assertEqual(expectation.strip(), observed_bmg.strip()) def test_fix_matrix_complement(self) -> None: self.maxDiff = None bmg = BMGraphBuilder() probs = bmg.add_real_matrix(torch.tensor([[0.75, 0.25], [0.125, 0.875]])) tensor_elements = [] # create non constant bool matrix for row in range(0, 2): row_node = bmg.add_natural(row) row_prob = bmg.add_column_index(probs, row_node) for column in range(0, 2): col_index = bmg.add_natural(column) prob = bmg.add_vector_index(row_prob, col_index) bernoulli = bmg.add_bernoulli(prob) sample = bmg.add_sample(bernoulli) tensor_elements.append(sample) matrix = bmg.add_tensor(Size([2, 2]), *tensor_elements) # create constant matrices const_prob_matrix = bmg.add_probability_matrix( torch.tensor([[0.25, 0.75], [0.5, 0.5]]) ) const_bool_matrix = bmg.add_probability_matrix( torch.tensor([[True, False], [False, False]]) ) const_prob_simplex = bmg.add_simplex(torch.tensor([0.5, 0.5])) mc_non_constant_boolean = bmg.add_matrix_complement(matrix) mc_const_prob = bmg.add_matrix_complement(const_prob_matrix) mc_const_bool = bmg.add_matrix_complement(const_bool_matrix) mc_const_simplex = bmg.add_matrix_complement(const_prob_simplex) bmg.add_query(mc_non_constant_boolean, _rv_id()) bmg.add_query(mc_const_prob, _rv_id()) bmg.add_query(mc_const_bool, _rv_id()) bmg.add_query(mc_const_simplex, _rv_id()) observed_beanstalk = to_dot(bmg, after_transform=True) expectation = """ digraph "graph" { N00[label="[[0.75,0.25],\\\\n[0.125,0.875]]"]; N01[label=0]; N02[label=ColumnIndex]; N03[label=index]; N04[label=ToProb]; N05[label=Bernoulli]; N06[label=Sample]; N07[label=1]; N08[label=index]; N09[label=ToProb]; N10[label=Bernoulli]; N11[label=Sample]; N12[label=ColumnIndex]; N13[label=index]; N14[label=ToProb]; N15[label=Bernoulli]; N16[label=Sample]; N17[label=index]; N18[label=ToProb]; N19[label=Bernoulli]; N20[label=Sample]; N21[label=2]; N22[label=ToMatrix]; N23[label=MatrixComplement]; N24[label=Query]; N25[label="[[0.25,0.75],\\\\n[0.5,0.5]]"]; N26[label=MatrixComplement]; N27[label=Query]; N28[label="[[True,False],\\\\n[False,False]]"]; N29[label=MatrixComplement]; N30[label=Query]; N31[label="[0.5,0.5]"]; N32[label=MatrixComplement]; N33[label=Query]; N00 -> N02[label=left]; N00 -> N12[label=left]; N01 -> N02[label=right]; N01 -> N03[label=right]; N01 -> N13[label=right]; N02 -> N03[label=left]; N02 -> N08[label=left]; N03 -> N04[label=operand]; N04 -> N05[label=probability]; N05 -> N06[label=operand]; N06 -> N22[label=0]; N07 -> N08[label=right]; N07 -> N12[label=right]; N07 -> N17[label=right]; N08 -> N09[label=operand]; N09 -> N10[label=probability]; N10 -> N11[label=operand]; N11 -> N22[label=1]; N12 -> N13[label=left]; N12 -> N17[label=left]; N13 -> N14[label=operand]; N14 -> N15[label=probability]; N15 -> N16[label=operand]; N16 -> N22[label=2]; N17 -> N18[label=operand]; N18 -> N19[label=probability]; N19 -> N20[label=operand]; N20 -> N22[label=3]; N21 -> N22[label=columns]; N21 -> N22[label=rows]; N22 -> N23[label=operand]; N23 -> N24[label=operator]; N25 -> N26[label=operand]; N26 -> N27[label=operator]; N28 -> N29[label=operand]; N29 -> N30[label=operator]; N31 -> N32[label=operand]; N32 -> N33[label=operator]; } """ self.assertEqual(expectation.strip(), observed_beanstalk.strip()) generated_graph = to_bmg_graph(bmg) observed_bmg = generated_graph.graph.to_dot() expectation = """ digraph "graph" { N0[label="matrix"]; N1[label="0"]; N2[label="ColumnIndex"]; N3[label="Index"]; N4[label="ToProb"]; N5[label="Bernoulli"]; N6[label="~"]; N7[label="1"]; N8[label="Index"]; N9[label="ToProb"]; N10[label="Bernoulli"]; N11[label="~"]; N12[label="ColumnIndex"]; N13[label="Index"]; N14[label="ToProb"]; N15[label="Bernoulli"]; N16[label="~"]; N17[label="Index"]; N18[label="ToProb"]; N19[label="Bernoulli"]; N20[label="~"]; N21[label="2"]; N22[label="ToMatrix"]; N23[label="MatrixComplement"]; N24[label="matrix"]; N25[label="MatrixComplement"]; N26[label="matrix"]; N27[label="MatrixComplement"]; N28[label="simplex"]; N29[label="MatrixComplement"]; N0 -> N2; N0 -> N12; N1 -> N2; N1 -> N3; N1 -> N13; N2 -> N3; N2 -> N8; N3 -> N4; N4 -> N5; N5 -> N6; N6 -> N22; N7 -> N8; N7 -> N12; N7 -> N17; N8 -> N9; N9 -> N10; N10 -> N11; N11 -> N22; N12 -> N13; N12 -> N17; N13 -> N14; N14 -> N15; N15 -> N16; N16 -> N22; N17 -> N18; N18 -> N19; N19 -> N20; N20 -> N22; N21 -> N22; N21 -> N22; N22 -> N23; N24 -> N25; N26 -> N27; N28 -> N29; Q0[label="Query"]; N23 -> Q0; Q1[label="Query"]; N25 -> Q1; Q2[label="Query"]; N27 -> Q2; Q3[label="Query"]; N29 -> Q3; } """ self.assertEqual(expectation.strip(), observed_bmg.strip()) def test_fix_matrix_log1mexp(self) -> None: self.maxDiff = None bmg = BMGraphBuilder() probs = bmg.add_real_matrix(torch.tensor([[0.75, 0.25], [0.125, 0.875]])) tensor_elements = [] # create non constant real matrix for row in range(0, 2): row_node = bmg.add_natural(row) row_prob = bmg.add_column_index(probs, row_node) for column in range(0, 2): col_index = bmg.add_natural(column) prob = bmg.add_vector_index(row_prob, col_index) bern = bmg.add_bernoulli(prob) sample = bmg.add_sample(bern) neg_two = bmg.add_neg_real(-2.0) neg_samples = bmg.add_multiplication(neg_two, sample) tensor_elements.append(neg_samples) matrix = bmg.add_tensor(Size([2, 2]), *tensor_elements) # create constant matrix const_neg_real_matrix = bmg.add_neg_real_matrix( torch.tensor([[-0.25, -0.75], [-0.5, -0.5]]), ) mlog1mexp_non_constant_real = bmg.add_matrix_log1mexp(matrix) mlog1mexp_const_neg_real = bmg.add_matrix_log1mexp(const_neg_real_matrix) bmg.add_query(mlog1mexp_non_constant_real, _rv_id()) bmg.add_query(mlog1mexp_const_neg_real, _rv_id()) observed_beanstalk = to_dot(bmg, after_transform=True) expectation = """ digraph "graph" { N00[label="[[0.75,0.25],\\\\n[0.125,0.875]]"]; N01[label=0]; N02[label=ColumnIndex]; N03[label=index]; N04[label=ToProb]; N05[label=Bernoulli]; N06[label=Sample]; N07[label=1]; N08[label=index]; N09[label=ToProb]; N10[label=Bernoulli]; N11[label=Sample]; N12[label=ColumnIndex]; N13[label=index]; N14[label=ToProb]; N15[label=Bernoulli]; N16[label=Sample]; N17[label=index]; N18[label=ToProb]; N19[label=Bernoulli]; N20[label=Sample]; N21[label=2]; N22[label=-2.0]; N23[label=0.0]; N24[label=if]; N25[label=if]; N26[label=if]; N27[label=if]; N28[label=ToMatrix]; N29[label=MatrixLog1mexp]; N30[label=Query]; N31[label="[[-0.25,-0.75],\\\\n[-0.5,-0.5]]"]; N32[label=MatrixLog1mexp]; N33[label=Query]; N00 -> N02[label=left]; N00 -> N12[label=left]; N01 -> N02[label=right]; N01 -> N03[label=right]; N01 -> N13[label=right]; N02 -> N03[label=left]; N02 -> N08[label=left]; N03 -> N04[label=operand]; N04 -> N05[label=probability]; N05 -> N06[label=operand]; N06 -> N24[label=condition]; N07 -> N08[label=right]; N07 -> N12[label=right]; N07 -> N17[label=right]; N08 -> N09[label=operand]; N09 -> N10[label=probability]; N10 -> N11[label=operand]; N11 -> N25[label=condition]; N12 -> N13[label=left]; N12 -> N17[label=left]; N13 -> N14[label=operand]; N14 -> N15[label=probability]; N15 -> N16[label=operand]; N16 -> N26[label=condition]; N17 -> N18[label=operand]; N18 -> N19[label=probability]; N19 -> N20[label=operand]; N20 -> N27[label=condition]; N21 -> N28[label=columns]; N21 -> N28[label=rows]; N22 -> N24[label=consequence]; N22 -> N25[label=consequence]; N22 -> N26[label=consequence]; N22 -> N27[label=consequence]; N23 -> N24[label=alternative]; N23 -> N25[label=alternative]; N23 -> N26[label=alternative]; N23 -> N27[label=alternative]; N24 -> N28[label=0]; N25 -> N28[label=1]; N26 -> N28[label=2]; N27 -> N28[label=3]; N28 -> N29[label=operand]; N29 -> N30[label=operator]; N31 -> N32[label=operand]; N32 -> N33[label=operator]; } """ self.assertEqual(expectation.strip(), observed_beanstalk.strip()) generated_graph = to_bmg_graph(bmg) observed_bmg = generated_graph.graph.to_dot() expectation = """ digraph "graph" { N0[label="matrix"]; N1[label="0"]; N2[label="ColumnIndex"]; N3[label="Index"]; N4[label="ToProb"]; N5[label="Bernoulli"]; N6[label="~"]; N7[label="1"]; N8[label="Index"]; N9[label="ToProb"]; N10[label="Bernoulli"]; N11[label="~"]; N12[label="ColumnIndex"]; N13[label="Index"]; N14[label="ToProb"]; N15[label="Bernoulli"]; N16[label="~"]; N17[label="Index"]; N18[label="ToProb"]; N19[label="Bernoulli"]; N20[label="~"]; N21[label="2"]; N22[label="-2"]; N23[label="-1e-10"]; N24[label="IfThenElse"]; N25[label="IfThenElse"]; N26[label="IfThenElse"]; N27[label="IfThenElse"]; N28[label="ToMatrix"]; N29[label="MatrixLog1mexp"]; N30[label="matrix"]; N31[label="MatrixLog1mexp"]; N0 -> N2; N0 -> N12; N1 -> N2; N1 -> N3; N1 -> N13; N2 -> N3; N2 -> N8; N3 -> N4; N4 -> N5; N5 -> N6; N6 -> N24; N7 -> N8; N7 -> N12; N7 -> N17; N8 -> N9; N9 -> N10; N10 -> N11; N11 -> N25; N12 -> N13; N12 -> N17; N13 -> N14; N14 -> N15; N15 -> N16; N16 -> N26; N17 -> N18; N18 -> N19; N19 -> N20; N20 -> N27; N21 -> N28; N21 -> N28; N22 -> N24; N22 -> N25; N22 -> N26; N22 -> N27; N23 -> N24; N23 -> N25; N23 -> N26; N23 -> N27; N24 -> N28; N25 -> N28; N26 -> N28; N27 -> N28; N28 -> N29; N30 -> N31; Q0[label="Query"]; N29 -> Q0; Q1[label="Query"]; N31 -> Q1; } """ self.assertEqual(expectation.strip(), observed_bmg.strip())
beanmachine-main
tests/ppl/compiler/fix_matrix_type_test.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Bernoulli compiler tests import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference import BMGInference from torch.distributions import Bernoulli, Beta _bern_ext = Bernoulli(0.5) @bm.random_variable def bern_1(): # Distribution created externally to random variable return _bern_ext @bm.random_variable def bern_2(): # Distribution created in random variable, named argument return Bernoulli(probs=0.25) @bm.random_variable def beta(): return Beta(2.0, 2.0) @bm.random_variable def bern_3(): # Distribution parameterized by another rv return Bernoulli(beta()) @bm.random_variable def bern_4(): # Bernoullis with constant logits are treated as though we had # the probs instead. Notice that this is deduplicated in the graph # with Bern(0.5) (of course it is a different sample because it # is a different RV). return Bernoulli(logits=0.0) @bm.random_variable def bern_5(): # Bernoullis with stochastic logits become a different kind of node. return Bernoulli(logits=beta()) class BernoulliTest(unittest.TestCase): def test_bernoulli(self) -> None: self.maxDiff = None queries = [ bern_1(), bern_2(), bern_3(), bern_4(), bern_5(), ] observations = {} observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N00[label=0.5]; N01[label=Bernoulli]; N02[label=Sample]; N03[label=Query]; N04[label=0.25]; N05[label=Bernoulli]; N06[label=Sample]; N07[label=Query]; N08[label=2.0]; N09[label=Beta]; N10[label=Sample]; N11[label=Bernoulli]; N12[label=Sample]; N13[label=Query]; N14[label=Sample]; N15[label=Query]; N16[label=ToReal]; N17[label="Bernoulli(logits)"]; N18[label=Sample]; N19[label=Query]; N00 -> N01; N01 -> N02; N01 -> N14; N02 -> N03; N04 -> N05; N05 -> N06; N06 -> N07; N08 -> N09; N08 -> N09; N09 -> N10; N10 -> N11; N10 -> N16; N11 -> N12; N12 -> N13; N14 -> N15; N16 -> N17; N17 -> N18; N18 -> N19; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/bernoulli_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm import torch import torch.distributions as dist from beanmachine.ppl.inference import BMGInference @bm.random_variable def foo(): return dist.Normal(0.0, 1.0) @bm.random_variable def bat(): return dist.Normal(0.0, 10.0) @bm.random_variable def bar(i): stmt: float = 1.2 * foo() + bat() return dist.Normal(stmt, 1.0) class AnnotatedAssignmentTest(unittest.TestCase): def test_annotated_assignemnt(self) -> None: bat_value = dist.Normal(0.0, 10.0).sample(torch.Size((1, 1))) foo_value = dist.Normal(0.0, 1.0).sample(torch.Size((1, 1))) observations = {} bar_parent = dist.Normal(foo_value + bat_value, torch.tensor(1.0)) for i in range(0, 1): observations[bar(i)] = bar_parent.sample(torch.Size((1, 1))) observed = BMGInference().to_dot( queries=[foo(), bat()], observations=observations, ) print(observed) expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=10.0]; N05[label=Normal]; N06[label=Sample]; N07[label=1.2]; N08[label="*"]; N09[label="+"]; N10[label=Normal]; N11[label=Sample]; N12[label="Observation 12.937742233276367"]; N13[label=Query]; N14[label=Query]; N00 -> N02; N00 -> N05; N01 -> N02; N01 -> N10; N02 -> N03; N03 -> N08; N03 -> N13; N04 -> N05; N05 -> N06; N06 -> N09; N06 -> N14; N07 -> N08; N08 -> N09; N09 -> N10; N10 -> N11; N11 -> N12; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/annotated_assignment_test.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Categorical compiler tests import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference import BMGInference from torch import tensor from torch.distributions import Categorical, Dirichlet, HalfCauchy t = tensor([0.125, 0.125, 0.25, 0.5]) @bm.random_variable def c_const_simplex(): return Categorical(t) @bm.random_variable def c_const_unnormalized(): # If we have samples of both the normalized and unnormalized distributions # the deduplicator should merge them into the same distribution, since # 2:2:4:8 :: 1/8:1/8:1/4:1/2 return Categorical(t * 16.0) @bm.random_variable def c_const_logit_simplex(): # Note that logits here means log probabilities, not log odds. # Since the argument is just a constant, the runtime should detect # that it can simply reuse the [0.125, 0.125, 0.25, 0.5] node # in the generated graph. return Categorical(logits=t.log()) @bm.random_variable def c_trivial_simplex(): # No sensible person would do this but we should ensure it works anyway. # Categorical(1.0) is already illegal in torch so we don't have to test that. # TODO: We could optimize this to the constant zero I suppose but it is # unlikely to help in realistic code. Better would be to detect this likely # bug and report it as a warning somehow. return Categorical(tensor([1.0])) @bm.random_variable def hc(): return HalfCauchy(0.0) @bm.random_variable def c_random_logit(): return Categorical(logits=tensor([0.0, 0.0, 0.0, -hc()])) @bm.random_variable def d4(): return Dirichlet(tensor([1.0, 1.0, 1.0, 1.0])) @bm.random_variable def cd4(): return Categorical(d4()) @bm.random_variable def c_multi(): return Categorical(tensor([[0.5, 0.5], [0.5, 0.5]])) # NOTE: A random variable indexed by a categorical is tested in # stochastic_control_flow_test.py. # TODO: Once categorical inference is supported in BMG add a test # here which demonstrates that. class CategoricalTest(unittest.TestCase): def test_categorical_trivial(self) -> None: self.maxDiff = None queries = [c_trivial_simplex()] observations = {} observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N0[label="[1.0]"]; N1[label=Categorical]; N2[label=Sample]; N3[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; } """ self.assertEqual(expected.strip(), observed.strip()) def test_categorical_dirichlet(self) -> None: self.maxDiff = None # It should be legal to use the output of a one-column # Dirichlet as the input to a categorical: queries = [cd4()] observations = {} observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N0[label="[1.0,1.0,1.0,1.0]"]; N1[label=Dirichlet]; N2[label=Sample]; N3[label=Categorical]; N4[label=Sample]; N5[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; N3 -> N4; N4 -> N5; } """ self.assertEqual(expected.strip(), observed.strip()) def test_categorical_equivalent_consts(self) -> None: self.maxDiff = None # * If we have a categorical with a constant probability # that does not sum to 1.0 then we automatically normalize it. # * If we have a categorical logits with constant probability # then we automatically convert it to regular probs and # normalize them. # # That means that we automatically deduplicate what looks # like three distinct distributions into three samples from # the same distribution: queries = [ c_const_unnormalized(), c_const_simplex(), c_const_logit_simplex(), ] observations = {} observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N0[label="[0.125,0.125,0.25,0.5]"]; N1[label=Categorical]; N2[label=Sample]; N3[label=Query]; N4[label=Sample]; N5[label=Query]; N6[label=Sample]; N7[label=Query]; N0 -> N1; N1 -> N2; N1 -> N4; N1 -> N6; N2 -> N3; N4 -> N5; N6 -> N7; } """ self.assertEqual(expected.strip(), observed.strip()) # Note that we add a simplex-typed constant: observed = BMGInference().to_python(queries, observations) expected = """ from beanmachine import graph from torch import tensor g = graph.Graph() n0 = g.add_constant_col_simplex_matrix(tensor([[0.125],[0.125],[0.25],[0.5]])) n1 = g.add_distribution( graph.DistributionType.CATEGORICAL, graph.AtomicType.NATURAL, [n0], ) n2 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) q0 = g.query(n2) n3 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) q1 = g.query(n3) n4 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) q2 = g.query(n4) """ self.assertEqual(expected.strip(), observed.strip()) def test_categorical_random_logit(self) -> None: self.maxDiff = None # We do not support Categorical(logits=something_random) # random variables. queries = [ c_random_logit(), ] observations = {} with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, observations, 10) observed = str(ex.exception) expected = """ The model uses a categorical operation unsupported by Bean Machine Graph. The unsupported node was created in function call c_random_logit(). """ self.assertEqual(expected.strip(), observed.strip()) def test_categorical_multi(self) -> None: self.maxDiff = None # We do not support Categorical with multiple dimensions. # TODO: This error message is not very well worded; what we want to communicate # is that ANY one-column simplex is the requirement. queries = [ c_multi(), ] observations = {} with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, observations, 10) observed = str(ex.exception) expected = """ The probability of a categorical is required to be a 2 x 1 simplex matrix but is a 2 x 2 simplex matrix. The categorical was created in function call c_multi(). """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/categorical_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.compiler.runtime import BMGRuntime from torch.distributions import Bernoulli, Beta @bm.random_variable def beta(): return Beta(2.0, 2.0) @bm.random_variable def flip1(n): return Bernoulli(beta()) @bm.functional def sum1(): return flip1(0) + 1.0 def sum2(n, m): # Note that sum2 is NOT a functional. # The returned addition node should deduplicate with # the one returned by sum1(). return flip1(0) + (n * m) @bm.functional def prod1(n): # Try a named argument. return sum1() * sum2(1.0, m=1.0) @bm.functional def log1(n): return prod1(n).log() def _dict_to_str(d) -> str: return "\n".join( sorted( type(key).__name__ + ":{" + ",".join(sorted(str(v) for v in d[key])) + "}" for key in d ) ) class NodeContextTest(unittest.TestCase): def test_node_context(self) -> None: self.maxDiff = None rt = BMGRuntime() rt.accumulate_graph([log1(123)], {}) expected = """ AdditionNode:{sum1(),sum2(1.0,m=1.0)} BernoulliNode:{flip1(0)} BetaNode:{beta()} LogNode:{log1(123)} MultiplicationNode:{prod1(123)} SampleNode:{beta()} SampleNode:{flip1(0)} """ observed = _dict_to_str(rt._context._node_locations) self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/node_context_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Compare original and conjugate prior transformed model""" import random import unittest import scipy import torch from beanmachine.ppl.examples.conjugate_models.normal_normal import NormalNormalModel from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor from torch.distributions import Normal class NormalNormalConjugacyTest(unittest.TestCase): def test_conjugate_graph(self) -> None: bmg = BMGInference() model = NormalNormalModel(10.0, 2.0, 5.0) queries = [model.normal_p()] observations = {model.normal(): tensor(15.9)} observed_bmg = bmg.to_dot(queries, observations, skip_optimizations=set()) expected_bmg = """ digraph "graph" { N0[label=10.813793182373047]; N1[label=1.8569534304710584]; N2[label=Normal]; N3[label=Sample]; N4[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N4; } """ self.assertEqual(observed_bmg.strip(), expected_bmg.strip()) def test_normal_normal_conjugate(self) -> None: """ KS test to check if samples from the original NormalNormalModel and transformed model is within a certain bound. We initialize the seed to ensure the test is deterministic. """ seed = 0 torch.manual_seed(seed) random.seed(seed) true_mu = 0.5 true_y = Normal(true_mu, 10.0) num_samples = 1000 bmg = BMGInference() model = NormalNormalModel(10.0, 2.0, 5.0) queries = [model.normal_p()] observations = { model.normal(): true_y.sample(), } skip_optimizations = {"normal_normal_conjugate_fixer"} original_posterior = bmg.infer( queries, observations, num_samples, 1, skip_optimizations=skip_optimizations ) original_samples = original_posterior[model.normal_p()][0] transformed_posterior = bmg.infer( queries, observations, num_samples, 1, skip_optimizations=set() ) transformed_samples = transformed_posterior[model.normal_p()][0] self.assertEqual( type(original_samples), type(transformed_samples), "Sample type of original and transformed model should be the same.", ) self.assertEqual( len(original_samples), len(transformed_samples), "Sample size of original and transformed model should be the same.", ) self.assertGreaterEqual( scipy.stats.ks_2samp(original_samples, transformed_samples).pvalue, 0.05, )
beanmachine-main
tests/ppl/compiler/fix_normal_normal_basic_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """End-to-end compiler test for Bayesian Meta-Analysis model""" import platform import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor from torch.distributions import HalfCauchy, Normal, StudentT class Group: level = 2 class Team: group: Group level = 1 def __init__(self, group: Group): self.group = group class Experiment: result: float stddev: float team: Team level = 0 def __init__(self, result: float, stddev: float, team: Team): self.result = result self.stddev = stddev self.team = team group1 = Group() group2 = Group() team1 = Team(group1) team2 = Team(group1) team3 = Team(group2) team4 = Team(group2) # I generated sample values for everything that conform to this model: # * true value is 10.0 # * experiment bias stddev is 2.10 # * team bias stddev is 1.34 # * group bias stddev is 1.52 # * experiment biases are # -0.82, -1.58, 0.45, 0.23, 1.30, -1.25, -1.26, -1.14 # * team biases are -2.19, -1.41, -0.26, 1.16 # * group biases are 0.19, 0.79 # * experiment stddevs and results are given below. experiments = [ Experiment(7.36, 0.3, team1), Experiment(6.47, 0.5, team1), Experiment(8.87, 0.2, team2), Experiment(9.17, 1.0, team2), Experiment(11.19, 2.4, team3), Experiment(10.30, 1.5, team3), Experiment(11.06, 0.9, team4), Experiment(10.74, 0.8, team4), ] @bm.random_variable def true_value(): return StudentT(1.0) @bm.random_variable def bias_size(level): return HalfCauchy(1.0) @bm.random_variable def node_bias(node): return Normal(0, bias_size(node.level)) @bm.random_variable def result(experiment): mean = ( true_value() + node_bias(experiment) + node_bias(experiment.team) + node_bias(experiment.team.group) ) return Normal(mean, experiment.stddev) class BMATest(unittest.TestCase): @unittest.skipIf( platform.system() in ["Darwin", "Windows"], reason="Numerical behavior seems to be different on MacOS/Windows", ) def test_bma_inference(self) -> None: queries = [true_value(), bias_size(0), bias_size(1), bias_size(2)] observations = {result(x): tensor(x.result) for x in experiments} # Eight experiments, four teams, two groups, is very little data to # make good inferences from, so we should expect that the inference # engine does not get particularly close. # The true value is 10.0, but the observations given best match # a true value of 8.15. expected_true_value = 8.15 # True exp bias size was 2.10 but observations given best match # a exp bias size of 0.70 expected_exp_bias = 0.70 # True team bias size was 1.32 but observations given best match # a team bias of 1.26 expected_team_bias = 1.26 # True group bias size was 1.52 but observations given best match # a group bias of 1.50 expected_group_bias = 1.50 mcsamples = BMGInference().infer(queries, observations, 1000, 1) queries = [true_value(), bias_size(0), bias_size(1), bias_size(2)] observed_true_value = mcsamples[true_value()].mean() observed_exp_bias = mcsamples[bias_size(0)].mean() observed_team_bias = mcsamples[bias_size(1)].mean() observed_group_bias = mcsamples[bias_size(2)].mean() self.assertAlmostEqual(observed_true_value, expected_true_value, delta=0.1) self.assertAlmostEqual(observed_exp_bias, expected_exp_bias, delta=0.1) self.assertAlmostEqual(observed_team_bias, expected_team_bias, delta=0.1) self.assertAlmostEqual(observed_group_bias, expected_group_bias, delta=0.1)
beanmachine-main
tests/ppl/compiler/bma_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.compiler.gen_dot import to_dot from beanmachine.ppl.compiler.runtime import BMGRuntime from beanmachine.ppl.inference import BMGInference from torch import tensor from torch.distributions import Bernoulli, Beta, Dirichlet, Normal # Random variable that takes an argument @bm.random_variable def norm(n): return Normal(loc=0.0, scale=1.0) # Random variable that takes no argument @bm.random_variable def coin(): return Beta(2.0, 2.0) # Call to random variable inside random variable @bm.random_variable def flip(): return Bernoulli(coin()) @bm.random_variable def flips(n): return Bernoulli(0.5) @bm.random_variable def spike_and_slab(n): if n: return Bernoulli(0.5) else: return Normal(0, 1) @bm.functional def if_statement(): # Stochastic control flows using "if" statements are not yet implemented if flip(): return flips(0) else: return flips(1) @bm.functional def while_statement(): # A while statement is logically just a fancy "if"; since we do not support # stochastic "if" yet, neither do we support stochastic "while". while flip(): return flips(0) return flips(1) @bm.random_variable def dirichlet(): return Dirichlet(tensor([1.0, 1.0, 1.0])) @bm.functional def for_statement(): # Stochastic control flows using "for" statements are not yet implemented # TODO: If we know the shape of a graph node then we could implement: # # for x in stochastic_vector(): # ... # # as # # for i in range(vector_length): # x = stochastic_vector()[i] # ... # # and similarly for 2-d matrix tensors; we could iterate the columns. # s = 0.0 for x in dirichlet(): s += x return s @bm.functional def list_comprehension(): # Comprehensions are just a special kind of "for". # We don't support them. return tensor([x + 1.0 for x in dirichlet()]) @bm.functional def set_comprehension(): # Comprehensions are just a special kind of "for". # We don't support them. return tensor(len({x > 0.5 for x in dirichlet()})) @bm.functional def dict_comprehension(): # Comprehensions are just a special kind of "for". # We don't support them. return tensor(len({x: x > 0.5 for x in dirichlet()})) @bm.functional def seq_comprehension(): # Comprehensions are just a special kind of "for". # We don't support them. return tensor(x * 2.0 for x in dirichlet()) # Try out a stochastic control flow where we choose # a mean from one of two distributions depending on # a coin flip. @bm.random_variable def choose_your_mean(): return Normal(spike_and_slab(flip()), 1) # Now let's try what looks like a stochastic workflow but is # actually deterministic. We should detect this and avoid # generating a stochastic workflow. @bm.functional def always_zero(): return tensor(0) @bm.random_variable def any_index_you_want_as_long_as_it_is_zero(): return Normal(spike_and_slab(always_zero()), 1) # Now choose from one of three options; notice that we have # computed a stochastic value inline here rather than putting # it in a functional; that's fine. @bm.random_variable def three_possibilities(): return Normal(spike_and_slab(flips(0) + flips(1)), 1) @bm.random_variable def choice_of_flips(n): if n: return Bernoulli(0.75) return Bernoulli(0.25) @bm.random_variable def composition(): return Normal(spike_and_slab(choice_of_flips(flip())), 1) # Make a choice of four possibilities based on two parameters. @bm.random_variable def multiple_choice(m, n): if n: if m: return Bernoulli(0.125) return Bernoulli(0.25) if m: return Bernoulli(0.75) return Bernoulli(0.875) @bm.random_variable def two_parameters(): return Normal(multiple_choice(flips(0), flips(1)), 1) class StochasticControlFlowTest(unittest.TestCase): def test_stochastic_control_flow_1(self) -> None: self.maxDiff = None queries = [any_index_you_want_as_long_as_it_is_zero()] observations = {} bmg = BMGRuntime().accumulate_graph(queries, observations) # Here we have what looks like a stochastic control flow but # in reality there is only one possibility. We should ensure # that we generate a graph with no choice points. observed = to_dot(bmg, after_transform=True, label_edges=False) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=Normal]; N5[label=Sample]; N6[label=Query]; N0 -> N2; N1 -> N2; N1 -> N4; N2 -> N3; N3 -> N4; N4 -> N5; N5 -> N6; } """ self.assertEqual(expected.strip(), observed.strip()) def test_stochastic_control_flow_2(self) -> None: self.maxDiff = None queries = [choose_your_mean()] observations = {} bmg = BMGRuntime().accumulate_graph(queries, observations) # Note that we generate an if-then-else node here to express the # flip that chooses between two alternatives, and therefore can # lower this to a form that BMG would accept. observed = to_dot(bmg, after_transform=True, label_edges=True) expected = """ digraph "graph" { N00[label=2.0]; N01[label=Beta]; N02[label=Sample]; N03[label=Bernoulli]; N04[label=Sample]; N05[label=0.0]; N06[label=1.0]; N07[label=Normal]; N08[label=Sample]; N09[label=0.5]; N10[label=Bernoulli]; N11[label=Sample]; N12[label=ToReal]; N13[label=if]; N14[label=Normal]; N15[label=Sample]; N16[label=Query]; N00 -> N01[label=alpha]; N00 -> N01[label=beta]; N01 -> N02[label=operand]; N02 -> N03[label=probability]; N03 -> N04[label=operand]; N04 -> N13[label=condition]; N05 -> N07[label=mu]; N06 -> N07[label=sigma]; N06 -> N14[label=sigma]; N07 -> N08[label=operand]; N08 -> N13[label=alternative]; N09 -> N10[label=probability]; N10 -> N11[label=operand]; N11 -> N12[label=operand]; N12 -> N13[label=consequence]; N13 -> N14[label=mu]; N14 -> N15[label=operand]; N15 -> N16[label=operator]; } """ self.assertEqual(expected.strip(), observed.strip()) def test_stochastic_control_flow_3(self) -> None: self.maxDiff = None queries = [three_possibilities()] observations = {} bmg = BMGRuntime().accumulate_graph(queries, observations) # TODO: We cannot yet transform this into a legal BMG graph because # the quantity used to make the choice is a sum of Booleans, and # we treat the sum of bools as a real number, not as a natural. # We can only index on naturals. # TODO: Add a test where we generate supports such as 1, 2, 3 # or 1, 10, 100. observed = to_dot(bmg, after_transform=False, label_edges=True) expected = """ digraph "graph" { N00[label=0.5]; N01[label=Bernoulli]; N02[label=Sample]; N03[label=Sample]; N04[label="+"]; N05[label=0.0]; N06[label=1.0]; N07[label=Normal]; N08[label=Sample]; N09[label=Sample]; N10[label=2.0]; N11[label=Sample]; N12[label=Switch]; N13[label=1]; N14[label=Normal]; N15[label=Sample]; N16[label=Query]; N00 -> N01[label=probability]; N01 -> N02[label=operand]; N01 -> N03[label=operand]; N01 -> N09[label=operand]; N01 -> N11[label=operand]; N02 -> N04[label=left]; N03 -> N04[label=right]; N04 -> N12[label=0]; N05 -> N07[label=mu]; N05 -> N12[label=1]; N06 -> N07[label=sigma]; N06 -> N12[label=3]; N07 -> N08[label=operand]; N08 -> N12[label=2]; N09 -> N12[label=4]; N10 -> N12[label=5]; N11 -> N12[label=6]; N12 -> N14[label=mu]; N13 -> N14[label=sigma]; N14 -> N15[label=operand]; N15 -> N16[label=operator]; } """ self.assertEqual(expected.strip(), observed.strip()) def test_stochastic_control_flow_composition(self) -> None: self.maxDiff = None queries = [composition()] observations = {} # Here we have a case where we have composed one stochastic control flow # as the input to another: # * we flip a beta(2,2) coin # * that flip decides whether the next coin flipped is 0.75 or 0.25 # * which decides whether to sample from a normal or a 0.5 coin # * the result is the mean of a normal. # TODO: Write a similar test that shows composition of categoricals. observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N00[label=2.0]; N01[label=Beta]; N02[label=Sample]; N03[label=Bernoulli]; N04[label=Sample]; N05[label=0.25]; N06[label=Bernoulli]; N07[label=Sample]; N08[label=0.75]; N09[label=Bernoulli]; N10[label=Sample]; N11[label=0.0]; N12[label=1.0]; N13[label=Normal]; N14[label=Sample]; N15[label=0.5]; N16[label=Bernoulli]; N17[label=Sample]; N18[label=if]; N19[label=ToReal]; N20[label=if]; N21[label=Normal]; N22[label=Sample]; N23[label=Query]; N00 -> N01; N00 -> N01; N01 -> N02; N02 -> N03; N03 -> N04; N04 -> N18; N05 -> N06; N06 -> N07; N07 -> N18; N08 -> N09; N09 -> N10; N10 -> N18; N11 -> N13; N12 -> N13; N12 -> N21; N13 -> N14; N14 -> N20; N15 -> N16; N16 -> N17; N17 -> N19; N18 -> N20; N19 -> N20; N20 -> N21; N21 -> N22; N22 -> N23; } """ self.assertEqual(expected.strip(), observed.strip()) def test_stochastic_control_flow_4(self) -> None: self.maxDiff = None queries = [two_parameters()] observations = {} bmg = BMGRuntime().accumulate_graph(queries, observations) # Here we have four possibilities but since each is a Boolean choice # it turns out we can in fact represent it. observed = to_dot(bmg, after_transform=True, label_edges=True) expected = """ digraph "graph" { N00[label=0.5]; N01[label=Bernoulli]; N02[label=Sample]; N03[label=Sample]; N04[label=0.875]; N05[label=Bernoulli]; N06[label=Sample]; N07[label=0.25]; N08[label=Bernoulli]; N09[label=Sample]; N10[label=0.75]; N11[label=Bernoulli]; N12[label=Sample]; N13[label=0.125]; N14[label=Bernoulli]; N15[label=Sample]; N16[label=if]; N17[label=if]; N18[label=if]; N19[label=ToReal]; N20[label=1.0]; N21[label=Normal]; N22[label=Sample]; N23[label=Query]; N00 -> N01[label=probability]; N01 -> N02[label=operand]; N01 -> N03[label=operand]; N02 -> N18[label=condition]; N03 -> N16[label=condition]; N03 -> N17[label=condition]; N04 -> N05[label=probability]; N05 -> N06[label=operand]; N06 -> N17[label=alternative]; N07 -> N08[label=probability]; N08 -> N09[label=operand]; N09 -> N17[label=consequence]; N10 -> N11[label=probability]; N11 -> N12[label=operand]; N12 -> N16[label=alternative]; N13 -> N14[label=probability]; N14 -> N15[label=operand]; N15 -> N16[label=consequence]; N16 -> N18[label=consequence]; N17 -> N18[label=alternative]; N18 -> N19[label=operand]; N19 -> N21[label=mu]; N20 -> N21[label=sigma]; N21 -> N22[label=operand]; N22 -> N23[label=operator]; } """ self.assertEqual(expected.strip(), observed.strip()) def test_stochastic_control_flow_5(self) -> None: self.maxDiff = None queries = [if_statement()] observations = {} with self.assertRaises(ValueError) as ex: BMGRuntime().accumulate_graph(queries, observations) # TODO: Better error message expected = "Stochastic control flows are not yet implemented." self.assertEqual(expected, str(ex.exception)) queries = [for_statement()] with self.assertRaises(ValueError) as ex: BMGRuntime().accumulate_graph(queries, observations) # TODO: Better error message expected = "Stochastic control flows are not yet implemented." self.assertEqual(expected, str(ex.exception)) queries = [while_statement()] with self.assertRaises(ValueError) as ex: BMGRuntime().accumulate_graph(queries, observations) # TODO: Better error message expected = "Stochastic control flows are not yet implemented." self.assertEqual(expected, str(ex.exception)) queries = [dict_comprehension()] with self.assertRaises(ValueError) as ex: BMGRuntime().accumulate_graph(queries, observations) # TODO: Better error message expected = "Stochastic control flows are not yet implemented." self.assertEqual(expected, str(ex.exception)) queries = [list_comprehension()] with self.assertRaises(ValueError) as ex: BMGRuntime().accumulate_graph(queries, observations) # TODO: Better error message expected = "Stochastic control flows are not yet implemented." self.assertEqual(expected, str(ex.exception)) queries = [seq_comprehension()] with self.assertRaises(ValueError) as ex: BMGRuntime().accumulate_graph(queries, observations) # TODO: Better error message expected = "Stochastic control flows are not yet implemented." self.assertEqual(expected, str(ex.exception)) queries = [set_comprehension()] with self.assertRaises(ValueError) as ex: BMGRuntime().accumulate_graph(queries, observations) # TODO: Better error message expected = "Stochastic control flows are not yet implemented." self.assertEqual(expected, str(ex.exception)) # TODO: Test that shows what happens when multiple graph node # arguments are not independent. Can get some false paths # in the graph when this happens. Can we prune them?
beanmachine-main
tests/ppl/compiler/stochastic_control_flow_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """End-to-end test for log1mexp""" import math import unittest import beanmachine.ppl as bm import torch from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor from torch.distributions import Bernoulli, Beta, HalfCauchy # New def log1mexp(lprob): return torch.log(1 - torch.exp(lprob)) def math_log1mexp(lprob): return math.log(1 - math.exp(lprob)) @bm.random_variable def hc(): return HalfCauchy(42) # positive real @bm.functional def right(): return log1mexp(-hc()) # log1mexp takes a negative real @bm.functional def wrong(): return log1mexp(hc()) # log1mexp takes a negative real! @bm.functional def math_right(): return math_log1mexp(-hc()) # log1mexp takes a negative real @bm.functional def math_wrong(): return math_log1mexp(hc()) # log1mexp takes a negative real! # Old @bm.random_variable def beta(): return Beta(2.0, -math_log1mexp(-2.0)) @bm.random_variable def beta2(): return Beta(2.0, -log1mexp(-beta())) @bm.random_variable def flip(n): return Bernoulli(beta()) class Log1mexpTest(unittest.TestCase): def test_log1mexp(self) -> None: """log1mexp""" # New # # First we look at the torch.tensor case # # Example of a model that is OK # queries = [right()] observations = {} observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N0[label=42.0]; N1[label=HalfCauchy]; N2[label=Sample]; N3[label="-"]; N4[label=Log1mexp]; N5[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; N3 -> N4; N4 -> N5; } """ self.assertEqual(expected.strip(), observed.strip()) self.assertTrue( BMGInference().infer(queries, observations, 1, 1), msg="Expected inference to complete successful on this example.", ) # # Example of a model that is not OK, that is, should raise an error # queries = [wrong()] observations = {} with self.assertRaises(ValueError) as ex: observed = BMGInference().to_dot(queries, observations) # TODO: The location in this error message is oddly formatted. # We probably shouldn't be putting descriptions of stochastic # nodes into the call site. expected = """ The operand of a log is required to be a positive real but is a real. The log was created in function call log1mexp(Sample(HalfCauchy(42.0))).""" self.assertEqual(expected.strip(), str(ex.exception).strip()) with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, observations, 1, 1) self.assertEqual(expected.strip(), str(ex.exception)) queries = [right()] observations = {hc(): tensor(1.0)} result = BMGInference().infer(queries, observations, 1, 1) observed = result[right()] expected = log1mexp(tensor(-1.0)) self.assertEqual(observed, expected) # Second we look at the math_ case # # Example of a model that is OK # queries = [math_right()] observations = {} observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N0[label=42.0]; N1[label=HalfCauchy]; N2[label=Sample]; N3[label="-"]; N4[label=Log1mexp]; N5[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; N3 -> N4; N4 -> N5; } """ self.assertEqual(expected.strip(), observed.strip()) self.assertTrue( BMGInference().infer(queries, observations, 1, 1), msg="Expected inference to complete successful on this example.", ) # # Example of a model that is not OK, that is, should raise an error # queries = [math_wrong()] observations = {} with self.assertRaises(ValueError) as ex: observed = BMGInference().to_dot(queries, observations) expected = """ The operand of a log is required to be a positive real but is a real. The log was created in function call math_log1mexp(Sample(HalfCauchy(42.0))).""" self.assertEqual(expected.strip(), str(ex.exception).strip()) with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, observations, 1, 1) self.assertEqual(expected.strip(), str(ex.exception)) queries = [math_right()] observations = {hc(): tensor(1.0)} result = BMGInference().infer(queries, observations, 1, 1) observed = result[math_right()] expected = math_log1mexp(-1.0) self.assertEqual(observed, expected) # ... # Old def test_log1mexp_coin_flip_inference(self) -> None: """Like a test in coin_flip_test.py but with log1mexp""" self.maxDiff = None queries = [beta()] observations = { flip(0): tensor(0.0), flip(1): tensor(0.0), flip(2): tensor(1.0), flip(3): tensor(0.0), } num_samples = 1000 inference = BMGInference() mcsamples = inference.infer(queries, observations, num_samples, 1) samples = mcsamples[beta()] observed = samples.mean() expected = tensor(0.4873) self.assertAlmostEqual(first=observed, second=expected, delta=0.05) def test_log1mexp_coin_flip_to_dot_cpp_python(self) -> None: """Like a test in coin_flip_test.py but with log1mexp""" self.maxDiff = None queries = [beta2()] observations = { flip(0): tensor(0.0), flip(1): tensor(0.0), flip(2): tensor(1.0), flip(3): tensor(0.0), } observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N00[label=2.0]; N01[label=0.14541345834732056]; N02[label=Beta]; N03[label=Sample]; N04[label=Bernoulli]; N05[label=Sample]; N06[label="Observation False"]; N07[label=Sample]; N08[label="Observation False"]; N09[label=Sample]; N10[label="Observation True"]; N11[label=Sample]; N12[label="Observation False"]; N13[label=ToPosReal]; N14[label="-"]; N15[label=Log1mexp]; N16[label="-"]; N17[label=Beta]; N18[label=Sample]; N19[label=Query]; N00 -> N02; N00 -> N17; N01 -> N02; N02 -> N03; N03 -> N04; N03 -> N13; N04 -> N05; N04 -> N07; N04 -> N09; N04 -> N11; N05 -> N06; N07 -> N08; N09 -> N10; N11 -> N12; N13 -> N14; N14 -> N15; N15 -> N16; N16 -> N17; N17 -> N18; N18 -> N19; } """ self.assertEqual(observed.strip(), expected.strip()) observed = BMGInference().to_cpp(queries, observations) expected = """ graph::Graph g; uint n0 = g.add_constant_pos_real(2.0); uint n1 = g.add_constant_pos_real(0.14541345834732056); uint n2 = g.add_distribution( graph::DistributionType::BETA, graph::AtomicType::PROBABILITY, std::vector<uint>({n0, n1})); uint n3 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n2})); uint n4 = g.add_distribution( graph::DistributionType::BERNOULLI, graph::AtomicType::BOOLEAN, std::vector<uint>({n3})); uint n5 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n4})); g.observe(n5, false); uint n6 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n4})); g.observe(n6, false); uint n7 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n4})); g.observe(n7, true); uint n8 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n4})); g.observe(n8, false); uint n9 = g.add_operator( graph::OperatorType::TO_POS_REAL, std::vector<uint>({n3})); uint n10 = g.add_operator( graph::OperatorType::NEGATE, std::vector<uint>({n9})); uint n11 = g.add_operator( graph::OperatorType::LOG1MEXP, std::vector<uint>({n10})); uint n12 = g.add_operator( graph::OperatorType::NEGATE, std::vector<uint>({n11})); uint n13 = g.add_distribution( graph::DistributionType::BETA, graph::AtomicType::PROBABILITY, std::vector<uint>({n0, n12})); uint n14 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n13})); uint q0 = g.query(n14); """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_python(queries, observations) expected = """ from beanmachine import graph from torch import tensor g = graph.Graph() n0 = g.add_constant_pos_real(2.0) n1 = g.add_constant_pos_real(0.14541345834732056) n2 = g.add_distribution( graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [n0, n1], ) n3 = g.add_operator(graph.OperatorType.SAMPLE, [n2]) n4 = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [n3], ) n5 = g.add_operator(graph.OperatorType.SAMPLE, [n4]) g.observe(n5, False) n6 = g.add_operator(graph.OperatorType.SAMPLE, [n4]) g.observe(n6, False) n7 = g.add_operator(graph.OperatorType.SAMPLE, [n4]) g.observe(n7, True) n8 = g.add_operator(graph.OperatorType.SAMPLE, [n4]) g.observe(n8, False) n9 = g.add_operator(graph.OperatorType.TO_POS_REAL, [n3]) n10 = g.add_operator(graph.OperatorType.NEGATE, [n9]) n11 = g.add_operator(graph.OperatorType.LOG1MEXP, [n10]) n12 = g.add_operator(graph.OperatorType.NEGATE, [n11]) n13 = g.add_distribution( graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [n0, n12], ) n14 = g.add_operator(graph.OperatorType.SAMPLE, [n13]) q0 = g.query(n14) """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/log1mexp_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference.bmg_inference import BMGInference from torch.distributions import Bernoulli # TODO: x > y --> if x then not y else false # TODO: x is y --> same as == ? Or should this be illegal? @bm.random_variable def flip(n): return Bernoulli(0.5) # # == # @bm.functional def eq_x_0(): # not flip(0) return flip(0) == 0.0 @bm.functional def eq_x_1(): # flip(0) return flip(0) == 1.0 @bm.functional def eq_0_y(): # not flip(1) return 0 == flip(1) @bm.functional def eq_1_y(): # flip(1) return 1 == flip(1) @bm.functional def eq_x_y(): # if flip(0) then flip(1) else not flip(1) return flip(0) == flip(1) # # != # @bm.functional def neq_x_0(): # flip(0) return flip(0) != 0.0 @bm.functional def neq_x_1(): # not flip(0) return flip(0) != 1.0 @bm.functional def neq_0_y(): # flip(1) return 0 != flip(1) @bm.functional def neq_1_y(): # not flip(1) return 1 != flip(1) @bm.functional def neq_x_y(): # if flip(0) then not flip(1) else flip(1) return flip(0) != flip(1) # # >= # @bm.functional def gte_x_0(): # true return flip(0) >= 0.0 @bm.functional def gte_x_1(): # flip(0) return flip(0) >= 1.0 @bm.functional def gte_0_y(): # not flip(1) return 0 >= flip(1) @bm.functional def gte_1_y(): # true return 1 >= flip(1) @bm.functional def gte_x_y(): # if flip(0) then true else not flip(1) return flip(0) >= flip(1) # # <= # @bm.functional def lte_x_0(): # not flip(0) return flip(0) <= 0.0 @bm.functional def lte_x_1(): # true return flip(0) <= 1.0 @bm.functional def lte_0_y(): # true return 0 <= flip(1) @bm.functional def lte_1_y(): # flip(1) return 1 <= flip(1) @bm.functional def lte_x_y(): # if flip(0) then flip(1) else true return flip(0) <= flip(1) # # < # @bm.functional def lt_x_0(): # false return flip(0) < 0.0 @bm.functional def lt_x_1(): # not flip(0) return flip(0) < 1.0 @bm.functional def lt_0_y(): # flip(1) return 0 < flip(1) @bm.functional def lt_1_y(): # false return 1 < flip(1) @bm.functional def lt_x_y(): # if flip(0) then false else flip(1) return flip(0) < flip(1) # # > # @bm.functional def gt_x_0(): # flip(0) return flip(0) > 0.0 @bm.functional def gt_x_1(): # false return flip(0) > 1.0 @bm.functional def gt_0_y(): # false return 0 > flip(1) @bm.functional def gt_1_y(): # not flip(1) return 1 > flip(1) @bm.functional def gt_x_y(): # if flip(0) then not flip(1) else false return flip(0) > flip(1) class BooleanComparisonsTest(unittest.TestCase): def test_boolean_comparison_eq(self) -> None: self.maxDiff = None observed = BMGInference().to_dot([eq_x_y()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=Sample]; N4[label=complement]; N5[label=if]; N6[label=Query]; N0 -> N1; N1 -> N2; N1 -> N3; N2 -> N5; N3 -> N4; N3 -> N5; N4 -> N5; N5 -> N6; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([eq_x_0()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=complement]; N4[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; N3 -> N4; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([eq_0_y()], {}) self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([eq_x_1()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([eq_1_y()], {}) self.assertEqual(expected.strip(), observed.strip()) def test_boolean_comparison_neq(self) -> None: self.maxDiff = None observed = BMGInference().to_dot([neq_x_y()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=Sample]; N4[label=complement]; N5[label=if]; N6[label=Query]; N0 -> N1; N1 -> N2; N1 -> N3; N2 -> N5; N3 -> N4; N3 -> N5; N4 -> N5; N5 -> N6; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([neq_x_0()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([neq_0_y()], {}) self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([neq_x_1()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=complement]; N4[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; N3 -> N4; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([neq_1_y()], {}) self.assertEqual(expected.strip(), observed.strip()) def test_boolean_comparison_gte(self) -> None: self.maxDiff = None observed = BMGInference().to_dot([gte_x_y()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=Sample]; N4[label=True]; N5[label=complement]; N6[label=if]; N7[label=Query]; N0 -> N1; N1 -> N2; N1 -> N3; N2 -> N6; N3 -> N5; N4 -> N6; N5 -> N6; N6 -> N7; } """ self.assertEqual(expected.strip(), observed.strip()) # TODO: Note that here we keep the sample in the graph even though it is # not queried or observed. We might consider removing it. observed = BMGInference().to_dot([gte_x_0()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=True]; N4[label=Query]; N0 -> N1; N1 -> N2; N3 -> N4; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([gte_0_y()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=complement]; N4[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; N3 -> N4; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([gte_x_1()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([gte_1_y()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=True]; N4[label=Query]; N0 -> N1; N1 -> N2; N3 -> N4; } """ self.assertEqual(expected.strip(), observed.strip()) def test_boolean_comparison_lte(self) -> None: self.maxDiff = None observed = BMGInference().to_dot([lte_x_y()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=Sample]; N4[label=True]; N5[label=if]; N6[label=Query]; N0 -> N1; N1 -> N2; N1 -> N3; N2 -> N5; N3 -> N5; N4 -> N5; N5 -> N6; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([lte_x_0()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=complement]; N4[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; N3 -> N4; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([lte_0_y()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=True]; N4[label=Query]; N0 -> N1; N1 -> N2; N3 -> N4; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([lte_x_1()], {}) self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([lte_1_y()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; } """ self.assertEqual(expected.strip(), observed.strip()) def test_boolean_comparison_lt(self) -> None: self.maxDiff = None observed = BMGInference().to_dot([lt_x_y()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=Sample]; N4[label=False]; N5[label=if]; N6[label=Query]; N0 -> N1; N1 -> N2; N1 -> N3; N2 -> N5; N3 -> N5; N4 -> N5; N5 -> N6; }""" self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([lt_x_0()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=False]; N4[label=Query]; N0 -> N1; N1 -> N2; N3 -> N4; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([lt_1_y()], {}) self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([lt_0_y()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([lt_x_1()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=complement]; N4[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; N3 -> N4; } """ self.assertEqual(expected.strip(), observed.strip()) def test_boolean_comparison_gt(self) -> None: self.maxDiff = None observed = BMGInference().to_dot([gt_x_y()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=Sample]; N4[label=complement]; N5[label=False]; N6[label=if]; N7[label=Query]; N0 -> N1; N1 -> N2; N1 -> N3; N2 -> N6; N3 -> N4; N4 -> N6; N5 -> N6; N6 -> N7; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([gt_x_0()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([gt_1_y()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=complement]; N4[label=Query]; N0 -> N1; N1 -> N2; N2 -> N3; N3 -> N4; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([gt_0_y()], {}) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=False]; N4[label=Query]; N0 -> N1; N1 -> N2; N3 -> N4; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([gt_x_1()], {}) self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/boolean_comparisons_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference import BMGInference from torch import tensor from torch.distributions import Bernoulli, Beta # This is a very simplified version of a CLARA model; this is the sort of model # that we want to apply our workaround of removing observations on. @bm.random_variable def sensitivity(labeler): return Beta(1, 1) @bm.random_variable def specificity(labeler): return Beta(2, 2) @bm.random_variable def prevalence(): return Beta(0.5, 0.5) @bm.random_variable def observation(x): bob = 0 sue = 1 pos_sum = prevalence().log() + sensitivity(bob).log() + sensitivity(sue).log() neg_sum = ( (1 - prevalence()).log() + (1 - specificity(bob)).log() + (1 - specificity(sue)).log() ) log_prob = (pos_sum.exp() + neg_sum.exp()).log() return Bernoulli(log_prob.exp()) class FixObserveTrueTest(unittest.TestCase): def test_fix_observe_true(self) -> None: self.maxDiff = None observations = {observation(0): tensor(1.0), observation(1): tensor(1.0)} queries = [] bmg = BMGInference() observed = bmg.to_dot(queries, observations) # Here's the model as it would be handed off to BMG normally. expected = """ digraph "graph" { N00[label=0.5]; N01[label=Beta]; N02[label=Sample]; N03[label=1.0]; N04[label=Beta]; N05[label=Sample]; N06[label=Sample]; N07[label=2.0]; N08[label=Beta]; N09[label=Sample]; N10[label=Sample]; N11[label=Log]; N12[label=Log]; N13[label=Log]; N14[label="+"]; N15[label=complement]; N16[label=Log]; N17[label=complement]; N18[label=Log]; N19[label=complement]; N20[label=Log]; N21[label="+"]; N22[label=LogSumExp]; N23[label=Exp]; N24[label=ToProb]; N25[label=Bernoulli]; N26[label=Sample]; N27[label="Observation True"]; N28[label=Sample]; N29[label="Observation True"]; N00 -> N01; N00 -> N01; N01 -> N02; N02 -> N11; N02 -> N15; N03 -> N04; N03 -> N04; N04 -> N05; N04 -> N06; N05 -> N12; N06 -> N13; N07 -> N08; N07 -> N08; N08 -> N09; N08 -> N10; N09 -> N17; N10 -> N19; N11 -> N14; N12 -> N14; N13 -> N14; N14 -> N22; N15 -> N16; N16 -> N21; N17 -> N18; N18 -> N21; N19 -> N20; N20 -> N21; N21 -> N22; N22 -> N23; N23 -> N24; N24 -> N25; N25 -> N26; N25 -> N28; N26 -> N27; N28 -> N29; } """ self.assertEqual(expected.strip(), observed.strip()) # Now let's force an additional rewriting pass. Note that there must # be as many factor nodes as we removed observations; factor nodes # are not deduplicated. bmg = BMGInference() bmg._fix_observe_true = True observed = bmg.to_dot(queries, observations) expected = """ digraph "graph" { N00[label=0.5]; N01[label=Beta]; N02[label=Sample]; N03[label=1.0]; N04[label=Beta]; N05[label=Sample]; N06[label=Sample]; N07[label=2.0]; N08[label=Beta]; N09[label=Sample]; N10[label=Sample]; N11[label=Log]; N12[label=Log]; N13[label=Log]; N14[label="+"]; N15[label=complement]; N16[label=Log]; N17[label=complement]; N18[label=Log]; N19[label=complement]; N20[label=Log]; N21[label="+"]; N22[label=LogSumExp]; N23[label=ExpProduct]; N24[label=ExpProduct]; N00 -> N01; N00 -> N01; N01 -> N02; N02 -> N11; N02 -> N15; N03 -> N04; N03 -> N04; N04 -> N05; N04 -> N06; N05 -> N12; N06 -> N13; N07 -> N08; N07 -> N08; N08 -> N09; N08 -> N10; N09 -> N17; N10 -> N19; N11 -> N14; N12 -> N14; N13 -> N14; N14 -> N22; N15 -> N16; N16 -> N21; N17 -> N18; N18 -> N21; N19 -> N20; N20 -> N21; N21 -> N22; N22 -> N23; N22 -> N24; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/fix_observe_true_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference import BMGInference from torch.distributions import Bernoulli, Beta, Normal @bm.random_variable def norm(x): return Normal(0.0, 1.0) @bm.functional def sum_1(): return norm(0) + norm(1) + norm(2) @bm.functional def sum_2(): return norm(3) + norm(4) + norm(5) @bm.functional def sum_3(): return sum_1() + 5.0 @bm.functional def sum_4(): return sum_1() + sum_2() @bm.functional def mult_1(): return norm(0) * norm(1) * norm(2) @bm.functional def mult_2(): return norm(3) * norm(4) * norm(5) @bm.functional def mult_3(): return mult_1() * 5.0 @bm.functional def mult_4(): return mult_1() * mult_2() @bm.random_variable def mult_negs_1(): # Verify that product of three negative reals is a negative real. phi = Normal(0.0, 1.0).cdf p1 = phi(norm(1)) # P p2 = phi(norm(2)) # P p3 = phi(norm(3)) # P lp1 = p1.log() # R- lp2 = p2.log() # R- lp3 = p3.log() # R- prod = lp1 * lp2 * lp3 # Should be R- ex = prod.exp() # Should be P return Bernoulli(ex) # Should be legal @bm.random_variable def mult_negs_2(): phi = Normal(0.0, 1.0).cdf p1 = phi(norm(1)) # P p2 = phi(norm(2)) # P p3 = phi(norm(3)) # P lp1 = p1.log() # R- lp2 = p2.log() # R- lp3 = p3.log() # R- prod = lp1 * lp2 * lp3 # Should be R- return Beta(-prod, 2.0) # Should be legal class FixMultiaryOperatorTest(unittest.TestCase): def test_fix_multiary_addition_1(self) -> None: self.maxDiff = None observations = {} queries = [sum_3(), sum_4()] observed = BMGInference().to_dot(queries, observations, after_transform=False) # The model before optimization expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label="+"]; N06[label=Sample]; N07[label="+"]; N08[label=5.0]; N09[label="+"]; N10[label=Query]; N11[label=Sample]; N12[label=Sample]; N13[label="+"]; N14[label=Sample]; N15[label="+"]; N16[label="+"]; N17[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N02 -> N04; N02 -> N06; N02 -> N11; N02 -> N12; N02 -> N14; N03 -> N05; N04 -> N05; N05 -> N07; N06 -> N07; N07 -> N09; N07 -> N16; N08 -> N09; N09 -> N10; N11 -> N13; N12 -> N13; N13 -> N15; N14 -> N15; N15 -> N16; N16 -> N17; } """ self.assertEqual(expected.strip(), observed.strip()) # After optimization: observed = BMGInference().to_dot(queries, observations, after_transform=True) expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label=Sample]; N06[label="+"]; N07[label=5.0]; N08[label="+"]; N09[label=Query]; N10[label=Sample]; N11[label=Sample]; N12[label=Sample]; N13[label="+"]; N14[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N02 -> N04; N02 -> N05; N02 -> N10; N02 -> N11; N02 -> N12; N03 -> N06; N04 -> N06; N05 -> N06; N06 -> N08; N06 -> N13; N07 -> N08; N08 -> N09; N10 -> N13; N11 -> N13; N12 -> N13; N13 -> N14; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fix_multiary_multiplication(self) -> None: self.maxDiff = None observations = {} queries = [mult_3(), mult_4()] observed = BMGInference().to_dot(queries, observations, after_transform=False) # The model before optimization expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label="*"]; N06[label=Sample]; N07[label="*"]; N08[label=5.0]; N09[label="*"]; N10[label=Query]; N11[label=Sample]; N12[label=Sample]; N13[label="*"]; N14[label=Sample]; N15[label="*"]; N16[label="*"]; N17[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N02 -> N04; N02 -> N06; N02 -> N11; N02 -> N12; N02 -> N14; N03 -> N05; N04 -> N05; N05 -> N07; N06 -> N07; N07 -> N09; N07 -> N16; N08 -> N09; N09 -> N10; N11 -> N13; N12 -> N13; N13 -> N15; N14 -> N15; N15 -> N16; N16 -> N17; } """ self.assertEqual(expected.strip(), observed.strip()) # After optimization: observed = BMGInference().to_dot(queries, observations, after_transform=True) expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label=Sample]; N06[label="*"]; N07[label=5.0]; N08[label="*"]; N09[label=Query]; N10[label=Sample]; N11[label=Sample]; N12[label=Sample]; N13[label="*"]; N14[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N02 -> N04; N02 -> N05; N02 -> N10; N02 -> N11; N02 -> N12; N03 -> N06; N04 -> N06; N05 -> N06; N06 -> N08; N06 -> N13; N07 -> N08; N08 -> N09; N10 -> N13; N11 -> N13; N12 -> N13; N13 -> N14; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fix_multiply_neg_reals_1(self) -> None: self.maxDiff = None observations = {} queries = [mult_negs_1()] observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label=Sample]; N06[label=Phi]; N07[label=Log]; N08[label="-"]; N09[label=Phi]; N10[label=Log]; N11[label="-"]; N12[label=Phi]; N13[label=Log]; N14[label="-"]; N15[label="*"]; N16[label="-"]; N17[label=Exp]; N18[label=Bernoulli]; N19[label=Sample]; N20[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N02 -> N04; N02 -> N05; N03 -> N06; N04 -> N09; N05 -> N12; N06 -> N07; N07 -> N08; N08 -> N15; N09 -> N10; N10 -> N11; N11 -> N15; N12 -> N13; N13 -> N14; N14 -> N15; N15 -> N16; N16 -> N17; N17 -> N18; N18 -> N19; N19 -> N20; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fix_multiply_neg_reals_2(self) -> None: # Make sure we're not introducing negate # on top of negate. self.maxDiff = None observations = {} queries = [mult_negs_2()] observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label=Sample]; N06[label=Phi]; N07[label=Log]; N08[label="-"]; N09[label=Phi]; N10[label=Log]; N11[label="-"]; N12[label=Phi]; N13[label=Log]; N14[label="-"]; N15[label="*"]; N16[label=2.0]; N17[label=Beta]; N18[label=Sample]; N19[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N02 -> N04; N02 -> N05; N03 -> N06; N04 -> N09; N05 -> N12; N06 -> N07; N07 -> N08; N08 -> N15; N09 -> N10; N10 -> N11; N11 -> N15; N12 -> N13; N13 -> N14; N14 -> N15; N15 -> N17; N16 -> N17; N17 -> N18; N18 -> N19; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/fix_multiary_ops_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.compiler.gen_dot import to_dot from beanmachine.ppl.compiler.runtime import BMGRuntime from beanmachine.ppl.inference import BMGInference from torch import logsumexp, tensor from torch.distributions import Bernoulli, Normal @bm.random_variable def norm(n): return Normal(tensor(0.0), tensor(1.0)) @bm.functional def make_a_tensor(): return tensor([norm(1), norm(1), norm(2), 1.25]) @bm.functional def lse1(): return make_a_tensor().logsumexp(dim=0) @bm.functional def lse2(): return logsumexp(make_a_tensor(), dim=0) @bm.functional def lse_bad_1(): # Dim cannot be anything but zero return logsumexp(make_a_tensor(), dim=1) @bm.random_variable def flip(): return Bernoulli(0.5) @bm.functional def lse_bad_2(): # keepdim cannot be anything but false return logsumexp(make_a_tensor(), dim=0, keepdim=flip()) class TensorOperationsTest(unittest.TestCase): def test_tensor_operations_1(self) -> None: self.maxDiff = None bmg = BMGRuntime().accumulate_graph([lse1()], {}) observed = to_dot(bmg) expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label=1.25]; N06[label=Tensor]; N07[label=0]; N08[label=False]; N09[label=LogSumExp]; N10[label=Query]; N00 -> N02[label=mu]; N01 -> N02[label=sigma]; N02 -> N03[label=operand]; N02 -> N04[label=operand]; N03 -> N06[label=0]; N03 -> N06[label=1]; N04 -> N06[label=2]; N05 -> N06[label=3]; N06 -> N09[label=operand]; N07 -> N09[label=dim]; N08 -> N09[label=keepdim]; N09 -> N10[label=operator]; } """ self.assertEqual(expected.strip(), observed.strip()) # Do it again, but this time with the static method flavor of # logsumexp. We should get the same result. bmg = BMGRuntime().accumulate_graph([lse2()], {}) observed = to_dot(bmg) self.assertEqual(expected.strip(), observed.strip()) # Now try generating a BMG from them. The problem fixer should # remove the unsupported tensor node. expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=Sample]; N5[label=1.25]; N6[label=LogSumExp]; N7[label=Query]; N0 -> N2[label=mu]; N1 -> N2[label=sigma]; N2 -> N3[label=operand]; N2 -> N4[label=operand]; N3 -> N6[label=0]; N3 -> N6[label=1]; N4 -> N6[label=2]; N5 -> N6[label=3]; N6 -> N7[label=operator]; } """ bmg = BMGRuntime().accumulate_graph([lse1()], {}) observed = to_dot(bmg, after_transform=True) self.assertEqual(observed.strip(), expected.strip()) def test_unsupported_logsumexp(self) -> None: with self.assertRaises(ValueError) as ex: BMGInference().infer([lse_bad_1()], {}, 1) # TODO: Do a better job here. Say why the operation is unsupported. expected = """ The model uses a logsumexp operation unsupported by Bean Machine Graph. The unsupported node was created in function call lse_bad_1(). """ self.assertEqual(expected.strip(), str(ex.exception).strip()) expected = """ The node logsumexp cannot be sized.The operand sizes may be incompatible or the size may not be computable at compile time. The operand sizes are: [torch.Size([4]), torch.Size([]), torch.Size([])] The unsizable node was created in function call lse_bad_2(). """ with self.assertRaises(ValueError) as ex: BMGInference().infer([lse_bad_2()], {}, 1) self.assertEqual(expected.strip(), str(ex.exception).strip())
beanmachine-main
tests/ppl/compiler/tensor_operations_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Beta-Bernoulli model conjugacy transformation check when hyperparameter is a random variable.""" import unittest import beanmachine.ppl as bm from beanmachine.ppl.examples.conjugate_models.beta_bernoulli import BetaBernoulliModel from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor from torch.distributions import Beta class BetaBernoulliAlphaRVModel(BetaBernoulliModel): def __init__(self): self.beta_ = 2.0 @bm.random_variable def alpha(self): return Beta(5.0, 1.0) @bm.random_variable def theta(self): return Beta(self.alpha(), self.beta_) class BetaBernoulliWithAlphaAsRVConjugateTest(unittest.TestCase): def test_conjugate_graph(self) -> None: """ Test to check that Beta-Bernoulli conjugate transformation is not be applied when parameters of Beta distribution are random variables. """ self.maxDiff = None model = BetaBernoulliAlphaRVModel() queries = [model.theta()] observations = { model.y(0): tensor(0.0), model.y(1): tensor(0.0), model.y(2): tensor(1.0), model.y(3): tensor(0.0), } num_samples = 1000 bmg = BMGInference() # This is the model before beta-bernoulli conjugate rewrite is applied expected_bmg = bmg.to_dot(queries, observations, num_samples) # This is the model after beta-bernoulli conjugate rewrite is applied skip_optimizations = set() observed_bmg = bmg.to_dot( queries, observations, num_samples, skip_optimizations=skip_optimizations ) self.assertEqual(expected_bmg.strip(), observed_bmg.strip())
beanmachine-main
tests/ppl/compiler/fix_beta_bernoulli_alpha_rv_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.compiler.copy_and_replace import copy_and_replace from beanmachine.ppl.compiler.gen_dot import to_dot from beanmachine.ppl.compiler.runtime import BMGRuntime from beanmachine.ppl.compiler.tensorizer_transformer import Tensorizer from torch import mm, tensor from torch.distributions import Beta, Normal @bm.random_variable def norm(n): return Normal(tensor(0.0), tensor(1.0)) @bm.random_variable def beta(n): return Beta(2, 2) @bm.functional def make_matrix(n): return tensor([[norm(n), norm(n)], [norm(n), 1.25]]) @bm.functional def make_prob_matrix(n): return tensor([[beta(n), beta(n)], [beta(n), 0.25]]) @bm.functional def make_tensor(n): return tensor( [[[norm(n), norm(n)], [norm(n), 2.35]], [[norm(n), norm(n)], [norm(n), 1.25]]] ) @bm.functional def operators_are_tensorized(): return (make_matrix(0) + make_matrix(1)).exp().sum() @bm.functional def operators_are_tensorized_2(): return make_prob_matrix(1).log().sum() @bm.functional def matrix_scale_lhs(): return make_matrix(1) * norm(2) @bm.functional def matrix_scale_rhs(): return norm(1) * make_matrix(2) @bm.functional def scalar_mult(): return norm(1) * norm(2) @bm.functional def non_matrix_tensor_mult_lhs(): return make_tensor(1) * norm(2) @bm.functional def non_matrix_tensor_mult_rhs(): return norm(6) * make_tensor(5) @bm.functional def mm_mismatch(): return mm(make_tensor(1), tensor([3.6, 3.1, 3.5])) class TensorizeTransformerTest(unittest.TestCase): def test_tensor_operators(self) -> None: self.maxDiff = None bmg = BMGRuntime().accumulate_graph( [operators_are_tensorized(), operators_are_tensorized_2()], {} ) transformed_graph, error_report = copy_and_replace( bmg, lambda c, s: Tensorizer(c, s) ) before = to_dot(bmg) after = to_dot(transformed_graph) expected_before = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=1.25]; N05[label=Tensor]; N06[label=Sample]; N07[label=Tensor]; N08[label="+"]; N09[label=Exp]; N10[label=Sum]; N11[label=Query]; N12[label=2.0]; N13[label=Beta]; N14[label=Sample]; N15[label=0.25]; N16[label=Tensor]; N17[label=Log]; N18[label=Sum]; N19[label=Query]; N00 -> N02[label=mu]; N01 -> N02[label=sigma]; N02 -> N03[label=operand]; N02 -> N06[label=operand]; N03 -> N05[label=0]; N03 -> N05[label=1]; N03 -> N05[label=2]; N04 -> N05[label=3]; N04 -> N07[label=3]; N05 -> N08[label=left]; N06 -> N07[label=0]; N06 -> N07[label=1]; N06 -> N07[label=2]; N07 -> N08[label=right]; N08 -> N09[label=operand]; N09 -> N10[label=operand]; N10 -> N11[label=operator]; N12 -> N13[label=alpha]; N12 -> N13[label=beta]; N13 -> N14[label=operand]; N14 -> N16[label=0]; N14 -> N16[label=1]; N14 -> N16[label=2]; N15 -> N16[label=3]; N16 -> N17[label=operand]; N17 -> N18[label=operand]; N18 -> N19[label=operator]; } """ expected_after = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=1.25]; N05[label=Tensor]; N06[label=Sample]; N07[label=Tensor]; N08[label=MatrixAdd]; N09[label=MatrixExp]; N10[label=MatrixSum]; N11[label=Query]; N12[label=2.0]; N13[label=Beta]; N14[label=Sample]; N15[label=0.25]; N16[label=Tensor]; N17[label=MatrixLog]; N18[label=MatrixSum]; N19[label=Query]; N00 -> N02[label=mu]; N01 -> N02[label=sigma]; N02 -> N03[label=operand]; N02 -> N06[label=operand]; N03 -> N05[label=0]; N03 -> N05[label=1]; N03 -> N05[label=2]; N04 -> N05[label=3]; N04 -> N07[label=3]; N05 -> N08[label=left]; N06 -> N07[label=0]; N06 -> N07[label=1]; N06 -> N07[label=2]; N07 -> N08[label=right]; N08 -> N09[label=operand]; N09 -> N10[label=operand]; N10 -> N11[label=operator]; N12 -> N13[label=alpha]; N12 -> N13[label=beta]; N13 -> N14[label=operand]; N14 -> N16[label=0]; N14 -> N16[label=1]; N14 -> N16[label=2]; N15 -> N16[label=3]; N16 -> N17[label=operand]; N17 -> N18[label=operand]; N18 -> N19[label=operator]; } """ self.assertEqual(expected_before.strip(), before.strip()) self.assertEqual(expected_after.strip(), after.strip()) def test_matrix_scale(self) -> None: self.maxDiff = None bmg = BMGRuntime().accumulate_graph( [ matrix_scale_rhs(), matrix_scale_lhs(), non_matrix_tensor_mult_lhs(), non_matrix_tensor_mult_rhs(), ], {}, ) transformed_graph, error_report = copy_and_replace( bmg, lambda c, s: Tensorizer(c, s) ) before = to_dot(bmg) expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label=1.25]; N06[label=Tensor]; N07[label="*"]; N08[label=Query]; N09[label=Tensor]; N10[label="*"]; N11[label=Query]; N12[label=2.35]; N13[label=Tensor]; N14[label="*"]; N15[label=Query]; N16[label=Sample]; N17[label=Sample]; N18[label=Tensor]; N19[label="*"]; N20[label=Query]; N00 -> N02[label=mu]; N01 -> N02[label=sigma]; N02 -> N03[label=operand]; N02 -> N04[label=operand]; N02 -> N16[label=operand]; N02 -> N17[label=operand]; N03 -> N07[label=left]; N03 -> N09[label=0]; N03 -> N09[label=1]; N03 -> N09[label=2]; N03 -> N13[label=0]; N03 -> N13[label=1]; N03 -> N13[label=2]; N03 -> N13[label=4]; N03 -> N13[label=5]; N03 -> N13[label=6]; N04 -> N06[label=0]; N04 -> N06[label=1]; N04 -> N06[label=2]; N04 -> N10[label=right]; N04 -> N14[label=right]; N05 -> N06[label=3]; N05 -> N09[label=3]; N05 -> N13[label=7]; N05 -> N18[label=7]; N06 -> N07[label=right]; N07 -> N08[label=operator]; N09 -> N10[label=left]; N10 -> N11[label=operator]; N12 -> N13[label=3]; N12 -> N18[label=3]; N13 -> N14[label=left]; N14 -> N15[label=operator]; N16 -> N19[label=left]; N17 -> N18[label=0]; N17 -> N18[label=1]; N17 -> N18[label=2]; N17 -> N18[label=4]; N17 -> N18[label=5]; N17 -> N18[label=6]; N18 -> N19[label=right]; N19 -> N20[label=operator]; } """ self.assertEqual(expected.strip(), before.strip()) after = to_dot(transformed_graph) expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label=1.25]; N06[label=Tensor]; N07[label=MatrixScale]; N08[label=Query]; N09[label=Tensor]; N10[label=MatrixScale]; N11[label=Query]; N12[label=2.35]; N13[label=Tensor]; N14[label=MatrixScale]; N15[label=Query]; N16[label=Sample]; N17[label=Sample]; N18[label=Tensor]; N19[label=MatrixScale]; N20[label=Query]; N00 -> N02[label=mu]; N01 -> N02[label=sigma]; N02 -> N03[label=operand]; N02 -> N04[label=operand]; N02 -> N16[label=operand]; N02 -> N17[label=operand]; N03 -> N07[label=left]; N03 -> N09[label=0]; N03 -> N09[label=1]; N03 -> N09[label=2]; N03 -> N13[label=0]; N03 -> N13[label=1]; N03 -> N13[label=2]; N03 -> N13[label=4]; N03 -> N13[label=5]; N03 -> N13[label=6]; N04 -> N06[label=0]; N04 -> N06[label=1]; N04 -> N06[label=2]; N04 -> N10[label=left]; N04 -> N14[label=left]; N05 -> N06[label=3]; N05 -> N09[label=3]; N05 -> N13[label=7]; N05 -> N18[label=7]; N06 -> N07[label=right]; N07 -> N08[label=operator]; N09 -> N10[label=right]; N10 -> N11[label=operator]; N12 -> N13[label=3]; N12 -> N18[label=3]; N13 -> N14[label=right]; N14 -> N15[label=operator]; N16 -> N19[label=left]; N17 -> N18[label=0]; N17 -> N18[label=1]; N17 -> N18[label=2]; N17 -> N18[label=4]; N17 -> N18[label=5]; N17 -> N18[label=6]; N18 -> N19[label=right]; N19 -> N20[label=operator]; } """ self.assertEqual(expected.strip(), after.strip()) def test_not_transformed(self) -> None: self.maxDiff = None bmg = BMGRuntime().accumulate_graph( [scalar_mult()], {}, ) transformed_graph, error_report = copy_and_replace( bmg, lambda c, s: Tensorizer(c, s) ) observed = to_dot(transformed_graph) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=Sample]; N5[label="*"]; N6[label=Query]; N0 -> N2[label=mu]; N1 -> N2[label=sigma]; N2 -> N3[label=operand]; N2 -> N4[label=operand]; N3 -> N5[label=left]; N4 -> N5[label=right]; N5 -> N6[label=operator]; } """ self.assertEqual(expected.strip(), observed.strip()) def test_errors(self) -> None: self.maxDiff = None # this case verifies that even if there is nothing replacable it will error out because the errors # in this graph prevent even checking whether this graph can be tensorized bmg = BMGRuntime().accumulate_graph([mm_mismatch()], {}) transformed_graph, error_report = copy_and_replace( bmg, lambda c, s: Tensorizer(c, s) ) if len(error_report.errors) == 1: error = error_report.errors[0].__str__() expected = """ The model uses a matrix multiplication (@) operation unsupported by Bean Machine Graph. The dimensions of the operands are 2x2 and 3x1. The unsupported node was created in function call mm_mismatch(). """ self.assertEqual(expected.strip(), error.strip()) else: self.fail( "A single error message should have been generated. Tensorizing depends on sizing and a size cannot be inferred from an operation whose operand sizes are invalid." )
beanmachine-main
tests/ppl/compiler/tensorize_transformer_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference import BMGInference from torch import tensor from torch.distributions import Normal @bm.random_variable def n(n): return Normal(0, 1) @bm.random_variable def n12(): return Normal(tensor([n(3), n(4)]), 1.0) @bm.random_variable def n21(): return Normal(tensor([[n(1)], [n(2)]]), 1.0) @bm.functional def broadcast_add(): return n12() + n21() @bm.functional def fill_add_1(): return n12() + n(5) @bm.functional def fill_add_2(): return n12() + 123 class BroadcastTest(unittest.TestCase): # TODO: Test broadcast multiplication as well. def test_broadcast_add(self) -> None: self.maxDiff = None observations = {} queries = [broadcast_add()] observed = BMGInference().to_dot(queries, observations, after_transform=False) # The model before the rewrite: expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label=Tensor]; N06[label=1.0]; N07[label=Normal]; N08[label=Sample]; N09[label=Sample]; N10[label=Sample]; N11[label=Tensor]; N12[label=Normal]; N13[label=Sample]; N14[label="+"]; N15[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N02 -> N04; N02 -> N09; N02 -> N10; N03 -> N05; N04 -> N05; N05 -> N07; N06 -> N07; N06 -> N12; N07 -> N08; N08 -> N14; N09 -> N11; N10 -> N11; N11 -> N12; N12 -> N13; N13 -> N14; N14 -> N15; } """ self.assertEqual(expected.strip(), observed.strip()) g, _ = BMGInference().to_graph(queries, observations) observed = g.to_dot() expected = """ digraph "graph" { N0[label="0"]; N1[label="1"]; N2[label="Normal"]; N3[label="~"]; N4[label="~"]; N5[label="Normal"]; N6[label="~"]; N7[label="Normal"]; N8[label="~"]; N9[label="~"]; N10[label="~"]; N11[label="Normal"]; N12[label="~"]; N13[label="Normal"]; N14[label="~"]; N15[label="2"]; N16[label="1"]; N17[label="ToMatrix"]; N18[label="Broadcast"]; N19[label="ToMatrix"]; N20[label="Broadcast"]; N21[label="MatrixAdd"]; N0 -> N2; N1 -> N2; N1 -> N5; N1 -> N7; N1 -> N11; N1 -> N13; N2 -> N3; N2 -> N4; N2 -> N9; N2 -> N10; N3 -> N5; N4 -> N7; N5 -> N6; N6 -> N17; N7 -> N8; N8 -> N17; N9 -> N11; N10 -> N13; N11 -> N12; N12 -> N19; N13 -> N14; N14 -> N19; N15 -> N17; N15 -> N18; N15 -> N18; N15 -> N19; N15 -> N20; N15 -> N20; N16 -> N17; N16 -> N19; N17 -> N18; N18 -> N21; N19 -> N20; N20 -> N21; Q0[label="Query"]; N21 -> Q0; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fill_add_1(self) -> None: self.maxDiff = None observations = {} queries = [fill_add_1()] observed = BMGInference().to_dot(queries, observations, after_transform=False) # The model before the rewrite: expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label=Tensor]; N06[label=1.0]; N07[label=Normal]; N08[label=Sample]; N09[label=Sample]; N10[label="+"]; N11[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N02 -> N04; N02 -> N09; N03 -> N05; N04 -> N05; N05 -> N07; N06 -> N07; N07 -> N08; N08 -> N10; N09 -> N10; N10 -> N11; } """ self.assertEqual(expected.strip(), observed.strip()) # The model after converting to BMG: g, _ = BMGInference().to_graph(queries, observations) observed = g.to_dot() expected = """ digraph "graph" { N0[label="0"]; N1[label="1"]; N2[label="Normal"]; N3[label="~"]; N4[label="~"]; N5[label="Normal"]; N6[label="~"]; N7[label="Normal"]; N8[label="~"]; N9[label="~"]; N10[label="2"]; N11[label="1"]; N12[label="ToMatrix"]; N13[label="FillMatrix"]; N14[label="MatrixAdd"]; N0 -> N2; N1 -> N2; N1 -> N5; N1 -> N7; N2 -> N3; N2 -> N4; N2 -> N9; N3 -> N5; N4 -> N7; N5 -> N6; N6 -> N12; N7 -> N8; N8 -> N12; N9 -> N13; N10 -> N12; N10 -> N13; N11 -> N12; N11 -> N13; N12 -> N14; N13 -> N14; Q0[label="Query"]; N14 -> Q0; } """ self.assertEqual(expected.strip(), observed.strip()) def test_fill_add_2(self) -> None: self.maxDiff = None observations = {} queries = [fill_add_2()] observed = BMGInference().to_dot(queries, observations, after_transform=False) # The model before the rewrite: expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label=Tensor]; N06[label=1.0]; N07[label=Normal]; N08[label=Sample]; N09[label=123]; N10[label="+"]; N11[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N02 -> N04; N03 -> N05; N04 -> N05; N05 -> N07; N06 -> N07; N07 -> N08; N08 -> N10; N09 -> N10; N10 -> N11; } """ self.assertEqual(expected.strip(), observed.strip()) # The model after converting to BMG: # TODO: We could constant-fold the matrix fill here, though that might # be not actually an optimization if the matrix is large enough. g, _ = BMGInference().to_graph(queries, observations) observed = g.to_dot() expected = """ digraph "graph" { N0[label="0"]; N1[label="1"]; N2[label="Normal"]; N3[label="~"]; N4[label="~"]; N5[label="Normal"]; N6[label="~"]; N7[label="Normal"]; N8[label="~"]; N9[label="2"]; N10[label="1"]; N11[label="ToMatrix"]; N12[label="123"]; N13[label="FillMatrix"]; N14[label="MatrixAdd"]; N0 -> N2; N1 -> N2; N1 -> N5; N1 -> N7; N2 -> N3; N2 -> N4; N3 -> N5; N4 -> N7; N5 -> N6; N6 -> N11; N7 -> N8; N8 -> N11; N9 -> N11; N9 -> N13; N10 -> N11; N10 -> N13; N11 -> N14; N12 -> N13; N13 -> N14; Q0[label="Query"]; N14 -> Q0; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/broadcast_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor from torch.distributions import Bernoulli, Beta, Normal @bm.random_variable def beta(): return Beta(2.0, 2.0) @bm.random_variable def flip(n): return Bernoulli(beta() * 0.5) @bm.random_variable def normal(n): return Normal(flip(n), 1.0) class CoinFlipTest(unittest.TestCase): def test_gen_mini(self) -> None: self.maxDiff = None # In the MiniBMG graph, the fact that we've observed # the flip(0) input to Normal(flip(0), 1.0) should ensure # that it is emitted into the graph as Normal(0.0, 1.0) queries = [beta(), normal(0), normal(1)] observations = { flip(0): tensor(0.0), } observed = BMGInference()._to_mini(queries, observations, indent=2) expected = """ { "comment": "Mini BMG", "nodes": [ { "operator": "CONSTANT", "type": "REAL", "value": 2.0, "sequence": 0 }, { "operator": "DISTRIBUTION_BETA", "type": "DISTRIBUTION", "in_nodes": [ 0, 0 ], "sequence": 1 }, { "operator": "SAMPLE", "type": "REAL", "in_nodes": [ 1 ], "sequence": 2 }, { "operator": "CONSTANT", "type": "REAL", "value": 0.5, "sequence": 3 }, { "operator": "MULTIPLY", "type": "REAL", "in_nodes": [ 2, 3 ], "sequence": 4 }, { "operator": "DISTRIBUTION_BERNOULLI", "type": "DISTRIBUTION", "in_nodes": [ 4 ], "sequence": 5 }, { "operator": "CONSTANT", "type": "REAL", "value": 0.0, "sequence": 6 }, { "operator": "OBSERVE", "type": "NONE", "in_nodes": [ 5, 6 ], "sequence": 7 }, { "operator": "QUERY", "type": "NONE", "query_index": 0, "in_nodes": [ 2 ], "sequence": 8 }, { "operator": "CONSTANT", "type": "REAL", "value": 1.0, "sequence": 9 }, { "operator": "DISTRIBUTION_NORMAL", "type": "DISTRIBUTION", "in_nodes": [ 6, 9 ], "sequence": 10 }, { "operator": "SAMPLE", "type": "REAL", "in_nodes": [ 10 ], "sequence": 11 }, { "operator": "QUERY", "type": "NONE", "query_index": 1, "in_nodes": [ 11 ], "sequence": 12 }, { "operator": "SAMPLE", "type": "REAL", "in_nodes": [ 5 ], "sequence": 13 }, { "operator": "DISTRIBUTION_NORMAL", "type": "DISTRIBUTION", "in_nodes": [ 13, 9 ], "sequence": 14 }, { "operator": "SAMPLE", "type": "REAL", "in_nodes": [ 14 ], "sequence": 15 }, { "operator": "QUERY", "type": "NONE", "query_index": 2, "in_nodes": [ 15 ], "sequence": 16 } ] } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/gen_mini_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.gen_dot import to_dot from beanmachine.ppl.model.rv_identifier import RVIdentifier def _rv_id() -> RVIdentifier: return RVIdentifier(lambda a, b: a, (1, 1)) def construct_model_graph(is_nested_cons: bool = True): bmg = BMGraphBuilder() zero = bmg.add_pos_real(0.0) one = bmg.add_pos_real(1.0) two = bmg.add_pos_real(2.0) three = bmg.add_pos_real(3.0) normal_one = bmg.add_normal(three, three) normal_two = bmg.add_normal(one, two) sample_normal_one = bmg.add_sample(normal_one) sample_normal_two = bmg.add_sample(normal_two) half = bmg.add_probability(0.5) bernoulli = bmg.add_bernoulli(half) bern_sample = bmg.add_sample(bernoulli) norm_if = bmg.add_if_then_else(bern_sample, sample_normal_one, sample_normal_two) if is_nested_cons: bern_if = bmg.add_if_then_else(bern_sample, norm_if, zero) else: bern_if = bmg.add_if_then_else(bern_sample, zero, norm_if) scale_two = bmg.add_multiplication(bern_if, two) bmg.add_query(scale_two, _rv_id()) return bmg class FixIfTest(unittest.TestCase): def test_nested_if_cons_fix(self) -> None: # This test case checks the nested if fixer for the cons case # IF(COND, IF(COND, CONS2, ALT2), ALT1) --> IF(COND, CONS2, ALT1) self.maxDiff = None bmg = construct_model_graph(is_nested_cons=True) observed_before = to_dot(bmg, after_transform=False, label_edges=True) expected_before = """ digraph "graph" { N00[label=0.5]; N01[label=Bernoulli]; N02[label=Sample]; N03[label=3.0]; N04[label=Normal]; N05[label=Sample]; N06[label=1.0]; N07[label=2.0]; N08[label=Normal]; N09[label=Sample]; N10[label=if]; N11[label=0.0]; N12[label=if]; N13[label="*"]; N14[label=Query]; N00 -> N01[label=probability]; N01 -> N02[label=operand]; N02 -> N10[label=condition]; N02 -> N12[label=condition]; N03 -> N04[label=mu]; N03 -> N04[label=sigma]; N04 -> N05[label=operand]; N05 -> N10[label=consequence]; N06 -> N08[label=mu]; N07 -> N08[label=sigma]; N07 -> N13[label=right]; N08 -> N09[label=operand]; N09 -> N10[label=alternative]; N10 -> N12[label=consequence]; N11 -> N12[label=alternative]; N12 -> N13[label=left]; N13 -> N14[label=operator]; } """ self.assertEqual(observed_before.strip(), expected_before.strip()) observed = to_dot(bmg, after_transform=True, label_edges=True) expected = """ digraph "graph" { N00[label=0.5]; N01[label=Bernoulli]; N02[label=Sample]; N03[label=3.0]; N04[label=3.0]; N05[label=Normal]; N06[label=Sample]; N07[label=1.0]; N08[label=2.0]; N09[label=Normal]; N10[label=Sample]; N11[label=0.0]; N12[label=if]; N13[label=2.0]; N14[label="*"]; N15[label=Query]; N00 -> N01[label=probability]; N01 -> N02[label=operand]; N02 -> N12[label=condition]; N03 -> N05[label=mu]; N04 -> N05[label=sigma]; N05 -> N06[label=operand]; N06 -> N12[label=consequence]; N07 -> N09[label=mu]; N08 -> N09[label=sigma]; N09 -> N10[label=operand]; N11 -> N12[label=alternative]; N12 -> N14[label=left]; N13 -> N14[label=right]; N14 -> N15[label=operator]; } """ self.assertEqual(observed.strip(), expected.strip()) def test_nested_if_alt_fix(self) -> None: # This test case checks the nested if fixer for the alt case # IF(COND, CONS_1, IF(COND, CONS2, ALT2)) --> IF(COND, CONS1, ALT2) self.maxDiff = None bmg = construct_model_graph(is_nested_cons=False) observed_before = to_dot(bmg, after_transform=False, label_edges=True) expected_before = """ digraph "graph" { N00[label=0.5]; N01[label=Bernoulli]; N02[label=Sample]; N03[label=0.0]; N04[label=3.0]; N05[label=Normal]; N06[label=Sample]; N07[label=1.0]; N08[label=2.0]; N09[label=Normal]; N10[label=Sample]; N11[label=if]; N12[label=if]; N13[label="*"]; N14[label=Query]; N00 -> N01[label=probability]; N01 -> N02[label=operand]; N02 -> N11[label=condition]; N02 -> N12[label=condition]; N03 -> N12[label=consequence]; N04 -> N05[label=mu]; N04 -> N05[label=sigma]; N05 -> N06[label=operand]; N06 -> N11[label=consequence]; N07 -> N09[label=mu]; N08 -> N09[label=sigma]; N08 -> N13[label=right]; N09 -> N10[label=operand]; N10 -> N11[label=alternative]; N11 -> N12[label=alternative]; N12 -> N13[label=left]; N13 -> N14[label=operator]; } """ self.assertEqual(observed_before.strip(), expected_before.strip()) observed = to_dot(bmg, after_transform=True, label_edges=True) expected = """ digraph "graph" { N00[label=0.5]; N01[label=Bernoulli]; N02[label=Sample]; N03[label=3.0]; N04[label=3.0]; N05[label=Normal]; N06[label=Sample]; N07[label=1.0]; N08[label=2.0]; N09[label=Normal]; N10[label=Sample]; N11[label=0.0]; N12[label=if]; N13[label=2.0]; N14[label="*"]; N15[label=Query]; N00 -> N01[label=probability]; N01 -> N02[label=operand]; N02 -> N12[label=condition]; N03 -> N05[label=mu]; N04 -> N05[label=sigma]; N05 -> N06[label=operand]; N07 -> N09[label=mu]; N08 -> N09[label=sigma]; N09 -> N10[label=operand]; N10 -> N12[label=alternative]; N11 -> N12[label=consequence]; N12 -> N14[label=left]; N13 -> N14[label=right]; N14 -> N15[label=operator]; } """ self.assertEqual(observed.strip(), expected.strip())
beanmachine-main
tests/ppl/compiler/fix_if_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """End-to-end test for tutorial on Neal's Funnel""" # This file is a manual replica of the Bento tutorial with the same name # This is a block for Beanstalk OSS readiness # TODO: Check imports for conistency import logging import math import unittest import beanmachine.ppl as bm import torch # from torch import manual_seed, tensor import torch.distributions as dist # from torch.distributions import Bernoulli, Normal, Uniform from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor # This makes the results deterministic and reproducible. logging.getLogger("beanmachine").setLevel(50) torch.manual_seed(11) # Model def normal_log_prob(mu, sigma, x): z = (x - mu) / sigma return (-1.0 / 2.0) * math.log(2.0 * math.pi) - (z**2.0 / 2.0) @bm.random_variable def z(): """ An uninformative (flat) prior for z. """ # TODO(tingley): Replace with Flat once it's part of the framework. return dist.Normal(0, 10000) @bm.random_variable def x(): """ An uninformative (flat) prior for x. """ # TODO(tingley): Replace with Flat once it's part of the framework. return dist.Normal(0, 10000) @bm.random_variable def neals_funnel_coin_flip(): """ Flip a "coin", which is heads with probability equal to the probability of drawing z and x from the true Neal's funnel posterior. """ return dist.Bernoulli( ( normal_log_prob(0.0, 3.0, z()) + normal_log_prob(0.0, (z() / 2.0).exp(), x()) ).exp() ) # Inference parameters num_samples = 1 ###000 num_chains = 4 observations = {neals_funnel_coin_flip(): tensor(1.0)} queries = [z(), x()] class tutorialNealsFunnelTest(unittest.TestCase): def test_tutorial_Neals_Funnel(self) -> None: """Check BM and BMG inference both terminate""" self.maxDiff = None # Inference with BM # Note: No explicit seed here (in original tutorial model). Should we add one? nmc = bm.SingleSiteNewtonianMonteCarlo() _ = nmc.infer( queries=queries, observations=observations, num_samples=num_samples, num_chains=num_chains, ) hmc = bm.SingleSiteHamiltonianMonteCarlo( trajectory_length=0.1, initial_step_size=0.01 ) _ = hmc.infer( queries=queries, observations=observations, num_samples=num_samples, num_chains=num_chains, ) ghmc = bm.CompositionalInference( { (z, x): bm.SingleSiteHamiltonianMonteCarlo( trajectory_length=0.1, initial_step_size=0.01 ), } ) ghmc.infer( queries=queries, observations=observations, num_samples=num_samples, num_chains=num_chains, ) bmg = BMGInference() _ = bmg.infer( queries=queries, observations=observations, num_samples=num_samples, num_chains=1, # TODO[Walid]: 1 should be num_chains ) self.assertTrue(True, msg="We just want to check this point is reached") def test_tutorial_Neals_Funnel_to_dot_cpp_python( self, ) -> None: self.maxDiff = None observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N00[label=0.0]; N01[label=10000.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label=-0.9189385332046727]; N06[label=0.3333333333333333]; N07[label="*"]; N08[label=2.0]; N09[label="**"]; N10[label=0.5]; N11[label="*"]; N12[label="-"]; N13[label="*"]; N14[label=Exp]; N15[label=-1.0]; N16[label="**"]; N17[label=ToReal]; N18[label="*"]; N19[label="**"]; N20[label="*"]; N21[label="-"]; N22[label="+"]; N23[label=Exp]; N24[label=ToProb]; N25[label=Bernoulli]; N26[label=Sample]; N27[label="Observation True"]; N28[label=Query]; N29[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N02 -> N04; N03 -> N07; N03 -> N13; N03 -> N28; N04 -> N18; N04 -> N29; N05 -> N22; N05 -> N22; N06 -> N07; N07 -> N09; N08 -> N09; N08 -> N19; N09 -> N11; N10 -> N11; N10 -> N13; N10 -> N20; N11 -> N12; N12 -> N22; N13 -> N14; N14 -> N16; N15 -> N16; N16 -> N17; N17 -> N18; N18 -> N19; N19 -> N20; N20 -> N21; N21 -> N22; N22 -> N23; N23 -> N24; N24 -> N25; N25 -> N26; N26 -> N27; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_cpp(queries, observations) expected = """ graph::Graph g; uint n0 = g.add_constant_real(0.0); uint n1 = g.add_constant_pos_real(10000.0); uint n2 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n0, n1})); uint n3 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n2})); uint n4 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n2})); uint n5 = g.add_constant_real(-0.9189385332046727); uint n6 = g.add_constant_real(0.3333333333333333); uint n7 = g.add_operator( graph::OperatorType::MULTIPLY, std::vector<uint>({n3, n6})); uint n8 = g.add_constant_pos_real(2.0); uint n9 = g.add_operator( graph::OperatorType::POW, std::vector<uint>({n7, n8})); uint n10 = g.add_constant_real(0.5); uint n11 = g.add_operator( graph::OperatorType::MULTIPLY, std::vector<uint>({n9, n10})); uint n12 = g.add_operator( graph::OperatorType::NEGATE, std::vector<uint>({n11})); uint n13 = g.add_operator( graph::OperatorType::MULTIPLY, std::vector<uint>({n3, n10})); uint n14 = g.add_operator( graph::OperatorType::EXP, std::vector<uint>({n13})); uint n15 = g.add_constant_real(-1.0); uint n16 = g.add_operator( graph::OperatorType::POW, std::vector<uint>({n14, n15})); uint n17 = g.add_operator( graph::OperatorType::TO_REAL, std::vector<uint>({n16})); uint n18 = g.add_operator( graph::OperatorType::MULTIPLY, std::vector<uint>({n4, n17})); uint n19 = g.add_operator( graph::OperatorType::POW, std::vector<uint>({n18, n8})); uint n20 = g.add_operator( graph::OperatorType::MULTIPLY, std::vector<uint>({n19, n10})); uint n21 = g.add_operator( graph::OperatorType::NEGATE, std::vector<uint>({n20})); uint n22 = g.add_operator( graph::OperatorType::ADD, std::vector<uint>({n5, n12, n5, n21})); uint n23 = g.add_operator( graph::OperatorType::EXP, std::vector<uint>({n22})); uint n24 = g.add_operator( graph::OperatorType::TO_PROBABILITY, std::vector<uint>({n23})); uint n25 = g.add_distribution( graph::DistributionType::BERNOULLI, graph::AtomicType::BOOLEAN, std::vector<uint>({n24})); uint n26 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n25})); g.observe(n26, true); uint q0 = g.query(n3); uint q1 = g.query(n4); """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_python(queries, observations) expected = """ from beanmachine import graph from torch import tensor g = graph.Graph() n0 = g.add_constant_real(0.0) n1 = g.add_constant_pos_real(10000.0) n2 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n0, n1], ) n3 = g.add_operator(graph.OperatorType.SAMPLE, [n2]) n4 = g.add_operator(graph.OperatorType.SAMPLE, [n2]) n5 = g.add_constant_real(-0.9189385332046727) n6 = g.add_constant_real(0.3333333333333333) n7 = g.add_operator(graph.OperatorType.MULTIPLY, [n3, n6]) n8 = g.add_constant_pos_real(2.0) n9 = g.add_operator(graph.OperatorType.POW, [n7, n8]) n10 = g.add_constant_real(0.5) n11 = g.add_operator(graph.OperatorType.MULTIPLY, [n9, n10]) n12 = g.add_operator(graph.OperatorType.NEGATE, [n11]) n13 = g.add_operator(graph.OperatorType.MULTIPLY, [n3, n10]) n14 = g.add_operator(graph.OperatorType.EXP, [n13]) n15 = g.add_constant_real(-1.0) n16 = g.add_operator(graph.OperatorType.POW, [n14, n15]) n17 = g.add_operator(graph.OperatorType.TO_REAL, [n16]) n18 = g.add_operator(graph.OperatorType.MULTIPLY, [n4, n17]) n19 = g.add_operator(graph.OperatorType.POW, [n18, n8]) n20 = g.add_operator(graph.OperatorType.MULTIPLY, [n19, n10]) n21 = g.add_operator(graph.OperatorType.NEGATE, [n20]) n22 = g.add_operator( graph.OperatorType.ADD, [n5, n12, n5, n21], ) n23 = g.add_operator(graph.OperatorType.EXP, [n22]) n24 = g.add_operator(graph.OperatorType.TO_PROBABILITY, [n23]) n25 = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [n24], ) n26 = g.add_operator(graph.OperatorType.SAMPLE, [n25]) g.observe(n26, True) q0 = g.query(n3) q1 = g.query(n4) """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/tutorial_Neals_Funnel_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """End-to-end test of realistic linear regression model""" # This is copied from bento workbook N140350, simplified, and # modified to use BMG inference. import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference.bmg_inference import BMGInference from torch.distributions import Normal, Uniform @bm.random_variable def theta_0(): return Normal(0.0, 1.0) @bm.random_variable def theta_1(): return Normal(0.0, 1.0) @bm.random_variable def error(): return Uniform(0.0, 1.0) @bm.random_variable def x(i): return Normal(0.0, 1.0) @bm.random_variable def y(i): return Normal(theta_0() + theta_1() * x(i), error()) class LinearRegressionTest(unittest.TestCase): def test_linear_regression_inference(self) -> None: self.maxDiff = None # We start by generating some test data; we can use the inference engine # as a random number generator if we have no observations. # # Generate an intercept, slope, and n points such that: # # y(i) = theta_0() + theta_1() * x(i) + some normal error n = 100 x_rvs = [x(i) for i in range(n)] y_rvs = [y(i) for i in range(n)] test_samples = BMGInference().infer( [theta_0(), theta_1()] + x_rvs + y_rvs, {}, 1 ) true_intercept = test_samples[theta_0()][0].item() true_slope = test_samples[theta_1()][0].item() points = [(test_samples[x(i)][0], test_samples[y(i)][0]) for i in range(n)] # We are only pseudo-random here so we should always get the same result. expected_true_intercept = -0.05 expected_true_slope = -0.44 self.assertAlmostEqual(true_intercept, expected_true_intercept, delta=0.1) self.assertAlmostEqual(true_slope, expected_true_slope, delta=0.5) # If we then run inference when observing the set of (x, y) points we generated, # what slope and intercept do we infer? It should be close to the actual values. observed_xs = {x(i): points[i][0] for i in range(n)} observed_ys = {y(i): points[i][1] for i in range(n)} observations = {**observed_xs, **observed_ys} queries = [theta_0(), theta_1()] num_samples = 1000 samples = BMGInference().infer(queries, observations, num_samples) inferred_intercept = samples[theta_0()].mean() inferred_slope = samples[theta_1()].mean() expected_inferred_int = -0.05 expected_inferred_slope = -0.33 self.assertAlmostEqual(inferred_intercept, expected_inferred_int, delta=0.2) self.assertAlmostEqual(inferred_slope, expected_inferred_slope, delta=0.5)
beanmachine-main
tests/ppl/compiler/linear_regression_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Compilation test of Todd's Linear Regression Outliers Marginalized model""" import unittest import beanmachine.ppl as bm from beanmachine.ppl.distributions.unit import Unit from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import logaddexp, ones, tensor from torch.distributions import Bernoulli, Beta, Gamma, Normal _x_obs = tensor([0, 3, 9]) _y_obs = tensor([33, 68, 34]) _err_obs = tensor([3.6, 3.9, 2.6]) @bm.random_variable def beta_0(): return Normal(0, 10) @bm.random_variable def beta_1(): return Normal(0, 10) @bm.random_variable def sigma_out(): return Gamma(1, 1) @bm.random_variable def theta(): return Beta(2, 5) @bm.functional def f(): mu = beta_0() + beta_1() * _x_obs ns = Normal(mu, sigma_out()) ne = Normal(mu, _err_obs) log_likelihood_outlier = theta().log() + ns.log_prob(_y_obs) log_likelihood = (1 - theta()).log() + ne.log_prob(_y_obs) return logaddexp(log_likelihood_outlier, log_likelihood) @bm.random_variable def y(): return Unit(f()) # Same model, but with the "Bernoulli trick" instead of a Unit: @bm.random_variable def d(): return Bernoulli(f().exp()) # Same model, but using a logits Bernoulli @bm.random_variable def d2(): log_prob = f() logit = log_prob - (1 - log_prob.exp()).log() return Bernoulli(logits=logit) class LROMMTest(unittest.TestCase): def test_lromm_unit_to_dot(self) -> None: self.maxDiff = None queries = [beta_0(), beta_1(), sigma_out(), theta()] observations = {y(): _y_obs} with self.assertRaises(ValueError) as ex: BMGInference().to_dot(queries, observations) expected = """ Function Unit is not supported by Bean Machine Graph. """ observed = str(ex.exception) self.assertEqual(observed.strip(), expected.strip()) def test_lromm_bern_to_dot(self) -> None: self.maxDiff = None queries = [beta_0(), beta_1(), sigma_out(), theta()] observations = {d(): ones(len(_y_obs))} observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N00[label=2.0]; N01[label=5.0]; N02[label=Beta]; N03[label=Sample]; N04[label=0.0]; N05[label=10.0]; N06[label=Normal]; N07[label=Sample]; N08[label=Sample]; N09[label=1.0]; N10[label=Gamma]; N11[label=Sample]; N12[label=3]; N13[label=1]; N14[label=Log]; N15[label=ToReal]; N16[label=FillMatrix]; N17[label=FillMatrix]; N18[label="[0,3,9]"]; N19[label=MatrixScale]; N20[label=MatrixAdd]; N21[label=0]; N22[label=index]; N23[label=Normal]; N24[label=33.0]; N25[label=LogProb]; N26[label=index]; N27[label=Normal]; N28[label=68.0]; N29[label=LogProb]; N30[label=2]; N31[label=index]; N32[label=Normal]; N33[label=34.0]; N34[label=LogProb]; N35[label=ToMatrix]; N36[label=MatrixAdd]; N37[label=index]; N38[label=complement]; N39[label=Log]; N40[label=ToReal]; N41[label=FillMatrix]; N42[label=3.5999999046325684]; N43[label=Normal]; N44[label=LogProb]; N45[label=3.9000000953674316]; N46[label=Normal]; N47[label=LogProb]; N48[label=2.5999999046325684]; N49[label=Normal]; N50[label=LogProb]; N51[label=ToMatrix]; N52[label=MatrixAdd]; N53[label=index]; N54[label=LogSumExp]; N55[label=index]; N56[label=index]; N57[label=LogSumExp]; N58[label=index]; N59[label=index]; N60[label=LogSumExp]; N61[label=ToMatrix]; N62[label=MatrixExp]; N63[label=index]; N64[label=ToProb]; N65[label=Bernoulli]; N66[label=Sample]; N67[label=index]; N68[label=ToProb]; N69[label=Bernoulli]; N70[label=Sample]; N71[label=index]; N72[label=ToProb]; N73[label=Bernoulli]; N74[label=Sample]; N75[label="Observation True"]; N76[label="Observation True"]; N77[label="Observation True"]; N78[label=Query]; N79[label=Query]; N80[label=Query]; N81[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N03 -> N14; N03 -> N38; N03 -> N81; N04 -> N06; N05 -> N06; N06 -> N07; N06 -> N08; N07 -> N17; N07 -> N78; N08 -> N19; N08 -> N79; N09 -> N10; N09 -> N10; N10 -> N11; N11 -> N23; N11 -> N27; N11 -> N32; N11 -> N80; N12 -> N16; N12 -> N17; N12 -> N35; N12 -> N41; N12 -> N51; N12 -> N61; N13 -> N16; N13 -> N17; N13 -> N26; N13 -> N35; N13 -> N41; N13 -> N51; N13 -> N55; N13 -> N56; N13 -> N61; N13 -> N67; N14 -> N15; N15 -> N16; N16 -> N36; N17 -> N20; N18 -> N19; N19 -> N20; N20 -> N22; N20 -> N26; N20 -> N31; N21 -> N22; N21 -> N37; N21 -> N53; N21 -> N63; N22 -> N23; N22 -> N43; N23 -> N25; N24 -> N25; N24 -> N44; N25 -> N35; N26 -> N27; N26 -> N46; N27 -> N29; N28 -> N29; N28 -> N47; N29 -> N35; N30 -> N31; N30 -> N58; N30 -> N59; N30 -> N71; N31 -> N32; N31 -> N49; N32 -> N34; N33 -> N34; N33 -> N50; N34 -> N35; N35 -> N36; N36 -> N37; N36 -> N55; N36 -> N58; N37 -> N54; N38 -> N39; N39 -> N40; N40 -> N41; N41 -> N52; N42 -> N43; N43 -> N44; N44 -> N51; N45 -> N46; N46 -> N47; N47 -> N51; N48 -> N49; N49 -> N50; N50 -> N51; N51 -> N52; N52 -> N53; N52 -> N56; N52 -> N59; N53 -> N54; N54 -> N61; N55 -> N57; N56 -> N57; N57 -> N61; N58 -> N60; N59 -> N60; N60 -> N61; N61 -> N62; N62 -> N63; N62 -> N67; N62 -> N71; N63 -> N64; N64 -> N65; N65 -> N66; N66 -> N75; N67 -> N68; N68 -> N69; N69 -> N70; N70 -> N76; N71 -> N72; N72 -> N73; N73 -> N74; N74 -> N77; } """ self.assertEqual(expected.strip(), observed.strip()) def test_lromm_logits_to_bmg_dot(self) -> None: self.maxDiff = None queries = [beta_0(), beta_1(), sigma_out(), theta()] observations = {d2(): ones(len(_y_obs))} # Go all the way to BMG. # (This regression-tests the bug described t131976521.) g, _ = BMGInference().to_graph(queries, observations) observed = g.to_dot() expected = """ digraph "graph" { N0[label="2"]; N1[label="5"]; N2[label="Beta"]; N3[label="~"]; N4[label="0"]; N5[label="10"]; N6[label="Normal"]; N7[label="~"]; N8[label="~"]; N9[label="1"]; N10[label="Gamma"]; N11[label="~"]; N12[label="3"]; N13[label="1"]; N14[label="Log"]; N15[label="ToReal"]; N16[label="FillMatrix"]; N17[label="FillMatrix"]; N18[label="matrix"]; N19[label="MatrixScale"]; N20[label="MatrixAdd"]; N21[label="0"]; N22[label="Index"]; N23[label="Normal"]; N24[label="33"]; N25[label="LogProb"]; N26[label="Index"]; N27[label="Normal"]; N28[label="68"]; N29[label="LogProb"]; N30[label="2"]; N31[label="Index"]; N32[label="Normal"]; N33[label="34"]; N34[label="LogProb"]; N35[label="ToMatrix"]; N36[label="MatrixAdd"]; N37[label="Index"]; N38[label="Complement"]; N39[label="Log"]; N40[label="ToReal"]; N41[label="FillMatrix"]; N42[label="3.6"]; N43[label="Normal"]; N44[label="LogProb"]; N45[label="3.9"]; N46[label="Normal"]; N47[label="LogProb"]; N48[label="2.6"]; N49[label="Normal"]; N50[label="LogProb"]; N51[label="ToMatrix"]; N52[label="MatrixAdd"]; N53[label="Index"]; N54[label="LogSumExp"]; N55[label="Index"]; N56[label="Index"]; N57[label="LogSumExp"]; N58[label="Index"]; N59[label="Index"]; N60[label="LogSumExp"]; N61[label="ToMatrix"]; N62[label="1"]; N63[label="FillMatrix"]; N64[label="MatrixExp"]; N65[label="MatrixNegate"]; N66[label="ToReal"]; N67[label="MatrixAdd"]; N68[label="ToPosReal"]; N69[label="MatrixLog"]; N70[label="MatrixNegate"]; N71[label="MatrixAdd"]; N72[label="Index"]; N73[label="BernoulliLogit"]; N74[label="~"]; N75[label="Index"]; N76[label="BernoulliLogit"]; N77[label="~"]; N78[label="Index"]; N79[label="BernoulliLogit"]; N80[label="~"]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N14; N3 -> N38; N4 -> N6; N5 -> N6; N6 -> N7; N6 -> N8; N7 -> N17; N8 -> N19; N9 -> N10; N9 -> N10; N10 -> N11; N11 -> N23; N11 -> N27; N11 -> N32; N12 -> N16; N12 -> N17; N12 -> N35; N12 -> N41; N12 -> N51; N12 -> N61; N12 -> N63; N13 -> N16; N13 -> N17; N13 -> N26; N13 -> N35; N13 -> N41; N13 -> N51; N13 -> N55; N13 -> N56; N13 -> N61; N13 -> N63; N13 -> N75; N14 -> N15; N15 -> N16; N16 -> N36; N17 -> N20; N18 -> N19; N19 -> N20; N20 -> N22; N20 -> N26; N20 -> N31; N21 -> N22; N21 -> N37; N21 -> N53; N21 -> N72; N22 -> N23; N22 -> N43; N23 -> N25; N24 -> N25; N24 -> N44; N25 -> N35; N26 -> N27; N26 -> N46; N27 -> N29; N28 -> N29; N28 -> N47; N29 -> N35; N30 -> N31; N30 -> N58; N30 -> N59; N30 -> N78; N31 -> N32; N31 -> N49; N32 -> N34; N33 -> N34; N33 -> N50; N34 -> N35; N35 -> N36; N36 -> N37; N36 -> N55; N36 -> N58; N37 -> N54; N38 -> N39; N39 -> N40; N40 -> N41; N41 -> N52; N42 -> N43; N43 -> N44; N44 -> N51; N45 -> N46; N46 -> N47; N47 -> N51; N48 -> N49; N49 -> N50; N50 -> N51; N51 -> N52; N52 -> N53; N52 -> N56; N52 -> N59; N53 -> N54; N54 -> N61; N55 -> N57; N56 -> N57; N57 -> N61; N58 -> N60; N59 -> N60; N60 -> N61; N61 -> N64; N61 -> N71; N62 -> N63; N63 -> N67; N64 -> N65; N65 -> N66; N66 -> N67; N67 -> N68; N68 -> N69; N69 -> N70; N70 -> N71; N71 -> N72; N71 -> N75; N71 -> N78; N72 -> N73; N73 -> N74; N75 -> N76; N76 -> N77; N78 -> N79; N79 -> N80; O0[label="Observation"]; N74 -> O0; O1[label="Observation"]; N77 -> O1; O2[label="Observation"]; N80 -> O2; Q0[label="Query"]; N7 -> Q0; Q1[label="Query"]; N8 -> Q1; Q2[label="Query"]; N11 -> Q2; Q3[label="Query"]; N3 -> Q3; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/lromm_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import platform import unittest import beanmachine.ppl as bm from beanmachine.ppl.inference import BMGInference from torch import exp, log from torch.distributions import Normal @bm.random_variable def norm(x): return Normal(0.0, 1.0) @bm.functional def sum_1(counter): sum = 0.0 for i in range(counter): sum = sum + exp(norm(i)) return sum @bm.functional def sum_2(): return log(sum_1(100)) def get_report(skip_optimizations): observations = {} queries = [sum_2()] number_samples = 1000 _, perf_report = BMGInference()._infer( queries, observations, number_samples, skip_optimizations=skip_optimizations ) return perf_report class LogSumExpPerformanceTest(unittest.TestCase): def test_perf_num_nodes_edges(self) -> None: """ Test to check if LogSumExp Transformation reduces the number of nodes and number of edges using the performance report returned by BMGInference. """ if platform.system() == "Windows": self.skipTest("Disabling *_perf_test.py until flakiness is resolved") self.maxDiff = None skip_optimizations = { "beta_bernoulli_conjugate_fixer", "beta_binomial_conjugate_fixer", "normal_normal_conjugate_fixer", } report_w_optimization = get_report(skip_optimizations) self.assertEqual(report_w_optimization.node_count, 104) self.assertEqual(report_w_optimization.edge_count, 202) skip_optimizations = { "logsumexp_fixer", "beta_bernoulli_conjugate_fixer", "beta_binomial_conjugate_fixer", "normal_normal_conjugate_fixer", } report_wo_optimization = get_report(skip_optimizations) self.assertEqual(report_wo_optimization.node_count, 205) self.assertEqual(report_wo_optimization.edge_count, 303)
beanmachine-main
tests/ppl/compiler/fix_logsumexp_perf_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm import torch from beanmachine.ppl.inference import BMGInference from torch.distributions import Normal @bm.random_variable def scalar(): return Normal(0.0, 1.0) matrix = torch.tensor([20, 40]) @bm.functional def scaled(): return scalar() * matrix @bm.functional def scaled_sym(): return matrix * scalar() @bm.functional def scaled2(): return scalar() * torch.tensor([scalar(), scalar()]) @bm.functional def scaled2_sym(): return (torch.tensor([scalar(), scalar()])) * scalar() @bm.functional def multiple_scalars(): return scalar() * scalar() * matrix * scalar() * scalar() class FixMatrixScaleTest(unittest.TestCase): def test_fix_matrix_scale_1(self) -> None: self.maxDiff = None observations = {} queries = [scaled()] num_samples = 1000 num_chains = 1 # Sanity check to make sure the model is valid nmc = bm.SingleSiteNewtonianMonteCarlo() _ = nmc.infer( queries=queries, observations=observations, num_samples=num_samples, num_chains=num_chains, ) observed = BMGInference().to_dot(queries, observations, after_transform=False) # The model before optimization expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label="[20,40]"]; N5[label="*"]; N6[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N5; N4 -> N5; N5 -> N6; } """ self.assertEqual(expected.strip(), observed.strip()) # After optimization: observed = BMGInference().to_dot(queries, observations, after_transform=True) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label="[20,40]"]; N5[label=MatrixScale]; N6[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N5; N4 -> N5; N5 -> N6; }""" self.assertEqual(expected.strip(), observed.strip()) # The model runs on Bean Machine Graph _ = BMGInference().infer(queries, observations, num_samples=num_samples) def test_fix_matrix_scale_1_sym(self) -> None: self.maxDiff = None observations = {} queries = [scaled_sym()] num_samples = 1000 num_chains = 1 # Sanity check to make sure the model is valid nmc = bm.SingleSiteNewtonianMonteCarlo() _ = nmc.infer( queries=queries, observations=observations, num_samples=num_samples, num_chains=num_chains, ) observed = BMGInference().to_dot(queries, observations, after_transform=False) # The model before optimization expected = """ digraph "graph" { N0[label="[20,40]"]; N1[label=0.0]; N2[label=1.0]; N3[label=Normal]; N4[label=Sample]; N5[label="*"]; N6[label=Query]; N0 -> N5; N1 -> N3; N2 -> N3; N3 -> N4; N4 -> N5; N5 -> N6; } """ self.assertEqual(expected.strip(), observed.strip()) # After optimization: observed = BMGInference().to_dot(queries, observations, after_transform=True) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label="[20,40]"]; N5[label=MatrixScale]; N6[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N5; N4 -> N5; N5 -> N6; }""" self.assertEqual(expected.strip(), observed.strip()) # The model runs on Bean Machine Graph _ = BMGInference().infer(queries, observations, num_samples=num_samples) def test_fix_matrix_scale_2(self) -> None: self.maxDiff = None observations = {} queries = [scaled2()] num_samples = 1000 num_chains = 1 # Sanity check to make sure the model is valid nmc = bm.SingleSiteNewtonianMonteCarlo() _ = nmc.infer( queries=queries, observations=observations, num_samples=num_samples, num_chains=num_chains, ) observed = BMGInference().to_dot(queries, observations, after_transform=False) # The model before optimization expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=Tensor]; N5[label="*"]; N6[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N4; N3 -> N4; N3 -> N5; N4 -> N5; N5 -> N6; } """ self.assertEqual(expected.strip(), observed.strip()) # After optimization: observed = BMGInference().to_dot(queries, observations, after_transform=True) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=2]; N5[label=1]; N6[label=ToMatrix]; N7[label=MatrixScale]; N8[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N6; N3 -> N6; N3 -> N7; N4 -> N6; N5 -> N6; N6 -> N7; N7 -> N8; } """ self.assertEqual(expected.strip(), observed.strip()) # The model runs on Bean Machine Graph _ = BMGInference().infer(queries, observations, num_samples=num_samples) def test_fix_matrix_scale_2_sym(self) -> None: self.maxDiff = None observations = {} queries = [scaled2_sym()] num_samples = 1000 num_chains = 1 # Sanity check to make sure the model is valid nmc = bm.SingleSiteNewtonianMonteCarlo() _ = nmc.infer( queries=queries, observations=observations, num_samples=num_samples, num_chains=num_chains, ) observed = BMGInference().to_dot(queries, observations, after_transform=False) # The model before optimization expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=Tensor]; N5[label="*"]; N6[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N4; N3 -> N4; N3 -> N5; N4 -> N5; N5 -> N6; } """ self.assertEqual(expected.strip(), observed.strip()) # After optimization: observed = BMGInference().to_dot(queries, observations, after_transform=True) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=2]; N5[label=1]; N6[label=ToMatrix]; N7[label=MatrixScale]; N8[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N6; N3 -> N6; N3 -> N7; N4 -> N6; N5 -> N6; N6 -> N7; N7 -> N8; } """ self.assertEqual(expected.strip(), observed.strip()) # The model runs on Bean Machine Graph _ = BMGInference().infer(queries, observations, num_samples=num_samples) def test_fix_matrix_scale_3(self) -> None: # TODO: The matrix scale optimizer correctly removes the extra matrix scale # but the multiary multiplication optimizer does not optimize to a single # multiplication node. That optimizer does not optimize nodes where the # outgoing edge count is more than one, but in this case the outgoing # edges are to orphaned nodes, illustrating a flaw in this design. # We might consider always doing the optimization even if there are multiple # outgoing edges -- that risks making a suboptimal graph but that scenario # is likely rare. Or we could write an orphan-trimming pass. self.maxDiff = None observations = {} queries = [multiple_scalars()] observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label="*"]; N5[label="*"]; N6[label="*"]; N7[label="[20,40]"]; N8[label=MatrixScale]; N9[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N4; N3 -> N4; N3 -> N5; N3 -> N6; N4 -> N5; N5 -> N6; N6 -> N8; N7 -> N8; N8 -> N9; } """ self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/fix_matrix_scale_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm import scipy from beanmachine.ppl.inference import BMGInference from torch.distributions import Normal @bm.random_variable def norm(x): return Normal(0.0, 1.0) @bm.functional def sum_1(): return norm(0) + norm(1) + norm(2) @bm.functional def sum_2(): return norm(3) + norm(4) + norm(5) @bm.functional def sum_3(): return sum_1() + 5.0 @bm.functional def sum_4(): return sum_1() + sum_2() class DisableTransformationsTest(unittest.TestCase): def test_multiary_ops_opt_to_dot(self) -> None: self.maxDiff = None observations = {} queries = [sum_3(), sum_4()] skip_optimizations = {"multiary_addition_fixer"} observed = BMGInference().to_dot( queries, observations, skip_optimizations=skip_optimizations ) # Expected model when skipping multiary addition optimization expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label=Sample]; N06[label="+"]; N07[label="+"]; N08[label=5.0]; N09[label="+"]; N10[label=Query]; N11[label=Sample]; N12[label=Sample]; N13[label=Sample]; N14[label="+"]; N15[label="+"]; N16[label="+"]; N17[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N02 -> N04; N02 -> N05; N02 -> N11; N02 -> N12; N02 -> N13; N03 -> N06; N04 -> N06; N05 -> N07; N06 -> N07; N07 -> N09; N07 -> N16; N08 -> N09; N09 -> N10; N11 -> N14; N12 -> N14; N13 -> N15; N14 -> N15; N15 -> N16; N16 -> N17; } """ self.assertEqual(expected.strip(), observed.strip()) # Expected graph without skipping multiary addition optimization: observed = BMGInference().to_dot(queries, observations) expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label=Sample]; N06[label="+"]; N07[label=5.0]; N08[label="+"]; N09[label=Query]; N10[label=Sample]; N11[label=Sample]; N12[label=Sample]; N13[label="+"]; N14[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N02 -> N04; N02 -> N05; N02 -> N10; N02 -> N11; N02 -> N12; N03 -> N06; N04 -> N06; N05 -> N06; N06 -> N08; N06 -> N13; N07 -> N08; N08 -> N09; N10 -> N13; N11 -> N13; N12 -> N13; N13 -> N14; } """ self.assertEqual(expected.strip(), observed.strip()) def test_multiary_ops_opt_inference(self) -> None: observations = {} queries = [sum_3(), sum_4()] num_samples = 1000 skip_optimizations = {"multiary_addition_fixer"} posterior_wo_opt = BMGInference().infer( queries, observations, num_samples, 1, skip_optimizations=skip_optimizations ) sum_3_samples_wo_opt = posterior_wo_opt[sum_3()][0] sum_4_samples_wo_opt = posterior_wo_opt[sum_4()][0] posterior_w_opt = BMGInference().infer(queries, observations, num_samples) sum_3_samples_w_opt = posterior_w_opt[sum_3()][0] sum_4_samples_w_opt = posterior_w_opt[sum_4()][0] self.assertGreaterEqual( scipy.stats.ks_2samp(sum_3_samples_wo_opt, sum_3_samples_w_opt).pvalue, 0.05 ) self.assertGreaterEqual( scipy.stats.ks_2samp(sum_4_samples_wo_opt, sum_4_samples_w_opt).pvalue, 0.05 )
beanmachine-main
tests/ppl/compiler/disable_transformations_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Compare original and conjugate prior transformed Beta-Binomial model""" import random import unittest import beanmachine.ppl as bm import scipy import torch from beanmachine.ppl.examples.conjugate_models.beta_binomial import BetaBinomialModel from beanmachine.ppl.inference.bmg_inference import BMGInference from torch import tensor from torch.distributions import Beta class BetaBinomialTransformedModel(BetaBinomialModel): """Closed-form Posterior due to conjugacy""" @bm.random_variable def theta_transformed(self): # Analytical posterior Beta(alpha + sum x_i, beta + sum N - sum x_i) return Beta(self.alpha_ + 3.0, self.beta_ + (self.n_ - 3.0)) class BetaBinomialConjugateModelTest(unittest.TestCase): def test_beta_binomial_conjugate_graph(self) -> None: original_model = BetaBinomialModel(2.0, 2.0, 4.0) queries = [original_model.theta()] observations = {original_model.x(): tensor(3.0)} skip_optimizations = set() bmg = BMGInference() original_graph = bmg.to_dot( queries, observations, skip_optimizations=skip_optimizations ) transformed_model = BetaBinomialTransformedModel(2.0, 2.0, 4.0) queries_transformed = [transformed_model.theta_transformed()] observations_transformed = {} transformed_graph = bmg.to_dot(queries_transformed, observations_transformed) self.assertEqual(original_graph, transformed_graph) def test_beta_binomial_conjugate(self) -> None: """ KS test to check if theta samples from BetaBinomialModel and BetaBinomialTransformedModel is within a certain bound. We initialize the seed to ensure the test is deterministic. """ seed = 0 torch.manual_seed(seed) random.seed(seed) original_model = BetaBinomialModel(2.0, 2.0, 4.0) queries = [original_model.theta()] observations = {original_model.x(): tensor(3.0)} num_samples = 1000 bmg = BMGInference() posterior_original_model = bmg.infer(queries, observations, num_samples) theta_samples_original = posterior_original_model[original_model.theta()][0] transformed_model = BetaBinomialTransformedModel(2.0, 2.0, 4.0) queries_transformed = [transformed_model.theta_transformed()] observations_transformed = {} posterior_transformed_model = bmg.infer( queries_transformed, observations_transformed, num_samples ) theta_samples_transformed = posterior_transformed_model[ transformed_model.theta_transformed() ][0] self.assertEqual( type(theta_samples_original), type(theta_samples_transformed), "Sample type of original and transformed model should be the same.", ) self.assertEqual( len(theta_samples_original), len(theta_samples_transformed), "Sample size of original and transformed model should be the same.", ) self.assertGreaterEqual( scipy.stats.ks_2samp( theta_samples_original, theta_samples_transformed ).pvalue, 0.05, )
beanmachine-main
tests/ppl/compiler/fix_beta_binomial_basic_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Tests for bm_to_bmg.py""" import unittest import beanmachine.ppl as bm from beanmachine.ppl.compiler.gen_bmg_graph import to_bmg_graph from beanmachine.ppl.compiler.runtime import BMGRuntime from torch import tensor from torch.distributions import ( Bernoulli, Beta, Binomial, Chi2, Gamma, HalfCauchy, Normal, StudentT, Uniform, ) def tidy(s: str) -> str: return "\n".join(c.strip() for c in s.strip().split("\n")).strip() # These are cases where we just have either a straightforward sample from # a distribution parameterized with constants, or a distribution parameterized # with a sample from another distribution. # # * No arithmetic # * No interesting type conversions # * No use of a sample as an index. # @bm.random_variable def flip_straight_constant(): return Bernoulli(tensor(0.5)) @bm.random_variable def flip_logit_constant(): logits = tensor(-2.0) return Bernoulli(logits=logits) @bm.random_variable def standard_normal(): return Normal(0.0, 1.0) @bm.random_variable def flip_logit_normal(): logits = standard_normal() return Bernoulli(logits=logits) @bm.random_variable def beta_constant(): return Beta(1.0, 1.0) @bm.random_variable def hc(i): return HalfCauchy(1.0) @bm.random_variable def beta_hc(): return Beta(hc(1), hc(2)) @bm.random_variable def student_t(): return StudentT(hc(1), standard_normal(), hc(2)) @bm.random_variable def bin_constant(): return Binomial(3, 0.5) @bm.random_variable def gamma(): return Gamma(1.0, 2.0) @bm.random_variable def flat(): return Uniform(0.0, 1.0) @bm.random_variable def chi2(): return Chi2(8.0) expected_bmg_1 = """ 0: CONSTANT(probability 0.5) (out nodes: 1, 22) 1: BERNOULLI(0) (out nodes: 2) 2: SAMPLE(1) (out nodes: ) queried 3: CONSTANT(probability 0.119203) (out nodes: 4) 4: BERNOULLI(3) (out nodes: 5) 5: SAMPLE(4) (out nodes: ) queried 6: CONSTANT(real 0) (out nodes: 8) 7: CONSTANT(positive real 1) (out nodes: 8, 12, 12, 14, 25) 8: NORMAL(6, 7) (out nodes: 9) 9: SAMPLE(8) (out nodes: 10, 19) queried 10: BERNOULLI_LOGIT(9) (out nodes: 11) 11: SAMPLE(10) (out nodes: ) queried 12: BETA(7, 7) (out nodes: 13) 13: SAMPLE(12) (out nodes: ) queried 14: HALF_CAUCHY(7) (out nodes: 15, 16) 15: SAMPLE(14) (out nodes: 17, 19) queried 16: SAMPLE(14) (out nodes: 17, 19) queried 17: BETA(15, 16) (out nodes: 18) 18: SAMPLE(17) (out nodes: ) queried 19: STUDENT_T(15, 9, 16) (out nodes: 20) 20: SAMPLE(19) (out nodes: ) queried 21: CONSTANT(natural 3) (out nodes: 22) 22: BINOMIAL(21, 0) (out nodes: 23) 23: SAMPLE(22) (out nodes: ) queried 24: CONSTANT(positive real 2) (out nodes: 25) 25: GAMMA(7, 24) (out nodes: 26) 26: SAMPLE(25) (out nodes: ) queried 27: FLAT() (out nodes: 28) 28: SAMPLE(27) (out nodes: ) queried 29: CONSTANT(positive real 4) (out nodes: 31) 30: CONSTANT(positive real 0.5) (out nodes: 31) 31: GAMMA(29, 30) (out nodes: 32) 32: SAMPLE(31) (out nodes: ) queried """ # These are cases where we have a type conversion on a sample. @bm.random_variable def normal_from_bools(): # Converts Boolean to real, positive real # This is of course dubious as we would not typically # expect the standard deviation to be zero or one, but # it illustrates that the type conversion works. # TODO: Consider adding a warning for conversion from # TODO: bool to positive real. return Normal(flip_straight_constant(), flip_straight_constant()) @bm.random_variable def binomial_from_bools(): # Converts Boolean to natural and probability return Binomial(flip_straight_constant(), flip_straight_constant()) expected_bmg_2 = """ 0: CONSTANT(probability 0.5) (out nodes: 1) 1: BERNOULLI(0) (out nodes: 2) 2: SAMPLE(1) (out nodes: 3, 4, 9, 12) 3: TO_REAL(2) (out nodes: 5) 4: TO_POS_REAL(2) (out nodes: 5) 5: NORMAL(3, 4) (out nodes: 6) 6: SAMPLE(5) (out nodes: ) queried 7: CONSTANT(natural 1) (out nodes: 9) 8: CONSTANT(natural 0) (out nodes: 9) 9: IF_THEN_ELSE(2, 7, 8) (out nodes: 13) 10: CONSTANT(probability 1) (out nodes: 12) 11: CONSTANT(probability 1e-10) (out nodes: 12) 12: IF_THEN_ELSE(2, 10, 11) (out nodes: 13) 13: BINOMIAL(9, 12) (out nodes: 14) 14: SAMPLE(13) (out nodes: ) queried """ # Here we multiply a bool by a natural, and then use that as a natural. # This cannot be turned into a BMG that uses multiplication because # there is no multiplication defined on naturals or bools; the best # we could do as a multiplication is to turn both into a positive real # and multiply those. But we *can* turn this into an if-then-else # that takes a bool and returns either the given natural or zero, # so that's what we'll do. @bm.random_variable def bool_times_natural(): return Binomial(bin_constant() * flip_straight_constant(), 0.5) expected_bmg_3 = """ 0: CONSTANT(natural 3) (out nodes: 2) 1: CONSTANT(probability 0.5) (out nodes: 2, 4, 8) 2: BINOMIAL(0, 1) (out nodes: 3) 3: SAMPLE(2) (out nodes: 7) 4: BERNOULLI(1) (out nodes: 5) 5: SAMPLE(4) (out nodes: 7) 6: CONSTANT(natural 0) (out nodes: 7) 7: IF_THEN_ELSE(5, 3, 6) (out nodes: 8) 8: BINOMIAL(7, 1) (out nodes: 9) 9: SAMPLE(8) (out nodes: ) queried """ # Tests for math functions @bm.random_variable def math1(): # log(R+) -> R # exp(R+) -> R+ return Normal(hc(0).log(), hc(1).exp()) @bm.random_variable def math2(): # R+ ** R+ -> R+ return HalfCauchy(hc(2) ** hc(3)) @bm.random_variable def math3(): # PHI return Bernoulli(Normal(0.0, 1.0).cdf(hc(4))) @bm.random_variable def math4(): # PHI, alternative syntax # TODO: Add a test where the value passed to cdf is a named argument. return Bernoulli(Normal.cdf(Normal(0.0, 1.0), hc(4))) expected_bmg_4 = """ 0: CONSTANT(positive real 1) (out nodes: 1) 1: HALF_CAUCHY(0) (out nodes: 2, 3, 8, 9, 13) 2: SAMPLE(1) (out nodes: 4) 3: SAMPLE(1) (out nodes: 5) 4: LOG(2) (out nodes: 6) 5: EXP(3) (out nodes: 6) 6: NORMAL(4, 5) (out nodes: 7) 7: SAMPLE(6) (out nodes: ) queried 8: SAMPLE(1) (out nodes: 10) 9: SAMPLE(1) (out nodes: 10) 10: POW(8, 9) (out nodes: 11) 11: HALF_CAUCHY(10) (out nodes: 12) 12: SAMPLE(11) (out nodes: ) queried 13: SAMPLE(1) (out nodes: 14) 14: TO_REAL(13) (out nodes: 15) 15: PHI(14) (out nodes: 16) 16: BERNOULLI(15) (out nodes: 17) 17: SAMPLE(16) (out nodes: ) queried """ # Demonstrate that we generate 1-p as a complement @bm.random_variable def flip_complement(): return Bernoulli(1.0 - beta_constant()) expected_bmg_5 = """ 0: CONSTANT(positive real 1) (out nodes: 1, 1) 1: BETA(0, 0) (out nodes: 2) 2: SAMPLE(1) (out nodes: 3) 3: COMPLEMENT(2) (out nodes: 4) 4: BERNOULLI(3) (out nodes: 5) 5: SAMPLE(4) (out nodes: ) queried """ # Demonstrate that we generate -log(prob) as a positive real. @bm.random_variable def beta_neg_log(): return Beta(-beta_constant().log(), 1.0) expected_bmg_6 = """ 0: CONSTANT(positive real 1) (out nodes: 1, 1, 5) 1: BETA(0, 0) (out nodes: 2) 2: SAMPLE(1) (out nodes: 3) 3: LOG(2) (out nodes: 4) 4: NEGATE(3) (out nodes: 5) 5: BETA(4, 0) (out nodes: 6) 6: SAMPLE(5) (out nodes: ) queried """ # Demonstrate that identity additions and multiplications # are removed from the graph. Here we are computing # 0 + 0 * hc(0) + 1 * hc(1) + 2 * hc(2) # but as you can see, in the final program we generate # the code as though we had written hc(1) + 2 * hc(2). # # TODO: However, note that we still do emit a sample # for hc(0) into the graph, even though it is unused. # We might consider trimming sample operations which # are ancestors of no observation or query. @bm.random_variable def beta_eliminate_identities(): s = 0.0 for i in [0, 1, 2]: s = s + i * hc(i) return Beta(s, 4.0) expected_bmg_7 = """ digraph "graph" { N0[label="1"]; N1[label="HalfCauchy"]; N2[label="~"]; N3[label="~"]; N4[label="~"]; N5[label="2"]; N6[label="*"]; N7[label="+"]; N8[label="4"]; N9[label="Beta"]; N10[label="~"]; N0 -> N1; N1 -> N2; N1 -> N3; N1 -> N4; N3 -> N7; N4 -> N6; N5 -> N6; N6 -> N7; N7 -> N9; N8 -> N9; N9 -> N10; Q0[label="Query"]; N10 -> Q0; } """ class GraphAccumulationTests(unittest.TestCase): def test_accumulate_simple_distributions(self) -> None: self.maxDiff = None queries = [ flip_straight_constant(), flip_logit_constant(), standard_normal(), flip_logit_normal(), beta_constant(), hc(1), hc(2), beta_hc(), student_t(), bin_constant(), gamma(), flat(), chi2(), ] bmg = BMGRuntime().accumulate_graph(queries, {}) observed = to_bmg_graph(bmg).graph.to_string() self.assertEqual(tidy(observed), tidy(expected_bmg_1)) def test_accumulate_bool_conversions(self) -> None: self.maxDiff = None queries = [normal_from_bools(), binomial_from_bools()] bmg = BMGRuntime().accumulate_graph(queries, {}) observed = to_bmg_graph(bmg).graph.to_string() self.assertEqual(tidy(observed), tidy(expected_bmg_2)) def test_accumulate_bool_nat_mult(self) -> None: self.maxDiff = None queries = [bool_times_natural()] bmg = BMGRuntime().accumulate_graph(queries, {}) observed = to_bmg_graph(bmg).graph.to_string() self.assertEqual(tidy(observed), tidy(expected_bmg_3)) def test_accumulate_math(self) -> None: self.maxDiff = None queries = [math1(), math2(), math3()] bmg = BMGRuntime().accumulate_graph(queries, {}) observed = to_bmg_graph(bmg).graph.to_string() self.assertEqual(tidy(observed), tidy(expected_bmg_4)) # Try with a different version of CDF syntax. queries = [math1(), math2(), math4()] bmg = BMGRuntime().accumulate_graph(queries, {}) observed = to_bmg_graph(bmg).graph.to_string() self.assertEqual(tidy(observed), tidy(expected_bmg_4)) def test_accumulate_complement(self) -> None: self.maxDiff = None queries = [flip_complement()] bmg = BMGRuntime().accumulate_graph(queries, {}) observed = to_bmg_graph(bmg).graph.to_string() self.assertEqual(tidy(observed), tidy(expected_bmg_5)) def test_accumulate_neg_log(self) -> None: self.maxDiff = None queries = [beta_neg_log()] bmg = BMGRuntime().accumulate_graph(queries, {}) observed = to_bmg_graph(bmg).graph.to_string() self.assertEqual(tidy(observed), tidy(expected_bmg_6)) def test_accumulate_eliminate_identities(self) -> None: self.maxDiff = None # TODO: We end up with an extraneous zero addend in the # sum; eliminate that. queries = [beta_eliminate_identities()] bmg = BMGRuntime().accumulate_graph(queries, {}) observed = to_bmg_graph(bmg).graph.to_dot() self.assertEqual(expected_bmg_7.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/graph_accumulation_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import beanmachine.ppl as bm from beanmachine.ppl.compiler.gen_bmg_cpp import to_bmg_cpp from beanmachine.ppl.compiler.gen_bmg_graph import to_bmg_graph from beanmachine.ppl.compiler.gen_bmg_python import to_bmg_python from beanmachine.ppl.compiler.gen_dot import to_dot from beanmachine.ppl.compiler.runtime import BMGRuntime from torch import tensor from torch.distributions import Bernoulli, Normal @bm.random_variable def norm(): return Normal(tensor(0.0), tensor(1.0)) @bm.random_variable def flip(): return Bernoulli(0.5) @bm.functional def f1by2(): # A 1x2 tensor in Python becomes a 2x1 matrix in BMG t = tensor([norm().exp(), norm()]) # This should become a LOGSUMEXP BMG node with no TO_MATRIX return t.logsumexp(dim=0) @bm.functional def f2by1(): # A 2x1 tensor in Python becomes a 1x2 matrix in BMG t = tensor([[norm().exp()], [norm()]]) # This should be an error; BMG requires that the matrix have a single column. return t.logsumexp(dim=0) @bm.functional def f2by3(): # A 2x3 tensor in Python becomes a 3x2 matrix in BMG t = tensor([[norm().exp(), 10, 20], [norm(), 30, 40]]) # Randomly choose one of the two columns and LSE it. # This should become an LOGSUMEXP_VECTOR node. return t[flip()].logsumexp(dim=0) class LSEVectorTest(unittest.TestCase): def test_lse1by2(self) -> None: self.maxDiff = None bmg = BMGRuntime().accumulate_graph([f1by2()], {}) observed = to_dot(bmg, after_transform=True, label_edges=False) expected = """ digraph "graph" { N0[label=0.0]; N1[label=1.0]; N2[label=Normal]; N3[label=Sample]; N4[label=Exp]; N5[label=ToReal]; N6[label=LogSumExp]; N7[label=Query]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N4; N3 -> N6; N4 -> N5; N5 -> N6; N6 -> N7; } """ self.assertEqual(expected.strip(), observed.strip()) observed = to_bmg_python(bmg).code expected = """ from beanmachine import graph from torch import tensor g = graph.Graph() n0 = g.add_constant_real(0.0) n1 = g.add_constant_pos_real(1.0) n2 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [n0, n1], ) n3 = g.add_operator(graph.OperatorType.SAMPLE, [n2]) n4 = g.add_operator(graph.OperatorType.EXP, [n3]) n5 = g.add_operator(graph.OperatorType.TO_REAL, [n4]) n6 = g.add_operator(graph.OperatorType.LOGSUMEXP, [n5, n3]) q0 = g.query(n6) """ self.assertEqual(expected.strip(), observed.strip()) observed = to_bmg_graph(bmg).graph.to_dot() expected = """ digraph "graph" { N0[label="0"]; N1[label="1"]; N2[label="Normal"]; N3[label="~"]; N4[label="exp"]; N5[label="ToReal"]; N6[label="LogSumExp"]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N4; N3 -> N6; N4 -> N5; N5 -> N6; Q0[label="Query"]; N6 -> Q0; } """ self.assertEqual(expected.strip(), observed.strip()) def test_lse2by3(self) -> None: self.maxDiff = None bmg = BMGRuntime().accumulate_graph([f2by3()], {}) observed = to_dot(bmg, after_transform=True, label_edges=False) expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=0.5]; N05[label=Bernoulli]; N06[label=Sample]; N07[label=3]; N08[label=2]; N09[label=Exp]; N10[label=ToReal]; N11[label=10.0]; N12[label=20.0]; N13[label=30.0]; N14[label=40.0]; N15[label=ToMatrix]; N16[label=1]; N17[label=0]; N18[label=if]; N19[label=ColumnIndex]; N20[label=LogSumExp]; N21[label=Query]; N00 -> N02; N01 -> N02; N02 -> N03; N03 -> N09; N03 -> N15; N04 -> N05; N05 -> N06; N06 -> N18; N07 -> N15; N08 -> N15; N09 -> N10; N10 -> N15; N11 -> N15; N12 -> N15; N13 -> N15; N14 -> N15; N15 -> N19; N16 -> N18; N17 -> N18; N18 -> N19; N19 -> N20; N20 -> N21; } """ self.assertEqual(expected.strip(), observed.strip()) observed = to_bmg_cpp(bmg).code expected = """ graph::Graph g; uint n0 = g.add_constant_real(0.0); uint n1 = g.add_constant_pos_real(1.0); uint n2 = g.add_distribution( graph::DistributionType::NORMAL, graph::AtomicType::REAL, std::vector<uint>({n0, n1})); uint n3 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n2})); uint n4 = g.add_constant_probability(0.5); uint n5 = g.add_distribution( graph::DistributionType::BERNOULLI, graph::AtomicType::BOOLEAN, std::vector<uint>({n4})); uint n6 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n5})); uint n7 = g.add_constant_natural(3); uint n8 = g.add_constant_natural(2); uint n9 = g.add_operator( graph::OperatorType::EXP, std::vector<uint>({n3})); uint n10 = g.add_operator( graph::OperatorType::TO_REAL, std::vector<uint>({n9})); uint n11 = g.add_constant_real(10.0); uint n12 = g.add_constant_real(20.0); uint n13 = g.add_constant_real(30.0); uint n14 = g.add_constant_real(40.0); uint n15 = g.add_operator( graph::OperatorType::TO_MATRIX, std::vector<uint>({n7, n8, n10, n11, n12, n3, n13, n14})); uint n16 = g.add_constant_natural(1); uint n17 = g.add_constant_natural(0); uint n18 = g.add_operator( graph::OperatorType::IF_THEN_ELSE, std::vector<uint>({n6, n16, n17})); uint n19 = g.add_operator( graph::OperatorType::COLUMN_INDEX, std::vector<uint>({n15, n18})); uint n20 = g.add_operator( graph::OperatorType::LOGSUMEXP_VECTOR, std::vector<uint>({n19})); uint q0 = g.query(n20); """ self.assertEqual(expected.strip(), observed.strip()) observed = to_bmg_graph(bmg).graph.to_dot() expected = """ digraph "graph" { N0[label="0"]; N1[label="1"]; N2[label="Normal"]; N3[label="~"]; N4[label="0.5"]; N5[label="Bernoulli"]; N6[label="~"]; N7[label="3"]; N8[label="2"]; N9[label="exp"]; N10[label="ToReal"]; N11[label="10"]; N12[label="20"]; N13[label="30"]; N14[label="40"]; N15[label="ToMatrix"]; N16[label="1"]; N17[label="0"]; N18[label="IfThenElse"]; N19[label="ColumnIndex"]; N20[label="LogSumExp"]; N0 -> N2; N1 -> N2; N2 -> N3; N3 -> N9; N3 -> N15; N4 -> N5; N5 -> N6; N6 -> N18; N7 -> N15; N8 -> N15; N9 -> N10; N10 -> N15; N11 -> N15; N12 -> N15; N13 -> N15; N14 -> N15; N15 -> N19; N16 -> N18; N17 -> N18; N18 -> N19; N19 -> N20; Q0[label="Query"]; N20 -> Q0; } """ self.assertEqual(expected.strip(), observed.strip()) def test_lse2by1(self) -> None: self.maxDiff = None bmg = BMGRuntime().accumulate_graph([f2by1()], {}) expected = """ The model uses a logsumexp operation unsupported by Bean Machine Graph. The unsupported node was created in function call f2by1(). """ with self.assertRaises(ValueError) as ex: to_dot(bmg, after_transform=True) observed = str(ex.exception) self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/lse_vector_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from typing import Any import beanmachine.ppl as bm from beanmachine.ppl.compiler.runtime import BMGRuntime from beanmachine.ppl.compiler.support import ComputeSupport, Infinite, TooBig from torch import Tensor, tensor from torch.distributions import Bernoulli, Categorical, Normal def tidy(s: str) -> str: return "\n".join(c.strip() for c in s.strip().split("\n")).strip() def tensor_equality(x: Tensor, y: Tensor) -> bool: # Tensor equality is weird. Suppose x and y are both # tensor([1.0, 2.0]). Then x.eq(y) is tensor([True, True]), # and x.eq(y).all() is tensor(True). return bool(x.eq(y).all()) @bm.random_variable def flip1(n): return Bernoulli(0.5) @bm.random_variable def flip2(n): return Bernoulli(tensor([[0.5, 0.5]])) @bm.functional def to_tensor(): return tensor([2.5, flip1(0), flip1(1), flip1(2)]) @bm.random_variable def normal(): return Normal(0.0, 1.0) @bm.functional def sum1(): return flip1(0) + 1.0 @bm.functional def prod1(): return sum1() * sum1() @bm.functional def pow1(): return prod1() ** prod1() @bm.functional def ge1(): return pow1() >= prod1() @bm.functional def and1(): return ge1() & ge1() @bm.functional def negexp1(): return -prod1().exp() @bm.random_variable def cat3(): return Categorical(tensor([0.5, 0.25, 0.25])) @bm.random_variable def cat2_3(): return Categorical(tensor([[0.5, 0.25, 0.25], [0.25, 0.25, 0.5]])) @bm.random_variable def cat8_3(): return Categorical( tensor( [ [0.5, 0.25, 0.25], [0.25, 0.25, 0.5], [0.5, 0.25, 0.25], [0.25, 0.25, 0.5], [0.5, 0.25, 0.25], [0.25, 0.25, 0.5], [0.5, 0.25, 0.25], [0.25, 0.25, 0.5], [0.5, 0.25, 0.25], [0.25, 0.25, 0.5], ] ) ) @bm.random_variable def normal_or_bern(n): if n: return Normal(0.0, 1.0) return Bernoulli(0.5) @bm.random_variable def cat_or_bern(n): if n: return Categorical(tensor([0.5, 0.25, 0.25, 0.25])) return Bernoulli(0.5) @bm.functional def switch_inf(): return normal_or_bern(flip1(0)) @bm.functional def switch_4(): return cat_or_bern(flip1(0)) class NodeSupportTest(unittest.TestCase): def assertEqual(self, x: Any, y: Any) -> None: if isinstance(x, Tensor) and isinstance(y, Tensor): self.assertTrue(tensor_equality(x, y)) else: super().assertEqual(x, y) def test_node_supports(self) -> None: self.maxDiff = None rt = BMGRuntime() rt.accumulate_graph([and1(), negexp1()], {}) cs = ComputeSupport() expected_flip1 = """ tensor(0.) tensor(1.)""" observed_flip1 = str(cs[rt._rv_to_node(flip1(0))]) self.assertEqual(expected_flip1.strip(), observed_flip1.strip()) expected_sum1 = """ tensor(1.) tensor(2.)""" observed_sum1 = str(cs[rt._rv_to_node(sum1())]) self.assertEqual(expected_sum1.strip(), observed_sum1.strip()) expected_prod1 = """ tensor(1.) tensor(2.) tensor(4.)""" observed_prod1 = str(cs[rt._rv_to_node(prod1())]) self.assertEqual(expected_prod1.strip(), observed_prod1.strip()) expected_pow1 = """ tensor(1.) tensor(16.) tensor(2.) tensor(256.) tensor(4.) """ observed_pow1 = str(cs[rt._rv_to_node(pow1())]) self.assertEqual(expected_pow1.strip(), observed_pow1.strip()) expected_ge1 = """ tensor(False) tensor(True) """ observed_ge1 = str(cs[rt._rv_to_node(ge1())]) self.assertEqual(expected_ge1.strip(), observed_ge1.strip()) expected_and1 = expected_ge1 observed_and1 = str(cs[rt._rv_to_node(and1())]) self.assertEqual(expected_and1.strip(), observed_and1.strip()) # Some versions of torch display -exp(4) as -54.5981, and some display it # as -54.5982. (The actual value is -54.5981500331..., which is not an excuse # for some versions getting it wrong.) To avoid this test randomly failing # depending on which version of torch we're using, we'll truncate to integers. expected_exp1 = "['-2', '-54', '-7']" results = [str(int(t)) for t in cs[rt._rv_to_node(negexp1())]] results.sort() self.assertEqual(expected_exp1.strip(), str(results).strip()) def test_bernoulli_support(self) -> None: self.maxDiff = None rt = BMGRuntime() rt.accumulate_graph([flip2(0)], {}) sample = rt._rv_to_node(flip2(0)) s = ComputeSupport() observed = str(s[sample]) expected = """ tensor([[0., 0.]]) tensor([[0., 1.]]) tensor([[1., 0.]]) tensor([[1., 1.]])""" self.assertEqual(expected.strip(), observed.strip()) def test_categorical_support(self) -> None: self.maxDiff = None rt = BMGRuntime() rt.accumulate_graph([cat3(), cat2_3(), cat8_3()], {}) s = ComputeSupport() c3 = rt._rv_to_node(cat3()) observed_c3 = str(s[c3]) expected_c3 = """ tensor(0) tensor(1) tensor(2) """ self.assertEqual(expected_c3.strip(), observed_c3.strip()) c23 = rt._rv_to_node(cat2_3()) observed_c23 = str(s[c23]) expected_c23 = """ tensor([0, 0]) tensor([0, 1]) tensor([0, 2]) tensor([1, 0]) tensor([1, 1]) tensor([1, 2]) tensor([2, 0]) tensor([2, 1]) tensor([2, 2]) """ self.assertEqual(expected_c23.strip(), observed_c23.strip()) c83 = rt._rv_to_node(cat8_3()) observed_c23 = s[c83] self.assertTrue(observed_c23 is TooBig) def test_stochastic_tensor_support(self) -> None: self.maxDiff = None rt = BMGRuntime() rt.accumulate_graph([to_tensor()], {}) tm = rt._rv_to_node(to_tensor()) s = ComputeSupport() observed = str(s[tm]) expected = """ tensor([2.5000, 0.0000, 0.0000, 0.0000]) tensor([2.5000, 0.0000, 0.0000, 1.0000]) tensor([2.5000, 0.0000, 1.0000, 0.0000]) tensor([2.5000, 0.0000, 1.0000, 1.0000]) tensor([2.5000, 1.0000, 0.0000, 0.0000]) tensor([2.5000, 1.0000, 0.0000, 1.0000]) tensor([2.5000, 1.0000, 1.0000, 0.0000]) tensor([2.5000, 1.0000, 1.0000, 1.0000]) """ self.assertEqual(expected.strip(), observed.strip()) def test_infinite_support(self) -> None: self.maxDiff = None rt = BMGRuntime() rt.accumulate_graph([normal()], {}) sample = rt._rv_to_node(normal()) s = ComputeSupport() observed = s[sample] self.assertEqual(Infinite, observed) def test_switch_support(self) -> None: # This is also tested in stochastic_control_flow_test.py. self.maxDiff = None rt = BMGRuntime() rt.accumulate_graph([switch_inf(), switch_4()], {}) s = ComputeSupport() switch_inf_sample = rt._rv_to_node(switch_inf()) observed_inf = s[switch_inf_sample] self.assertEqual(Infinite, observed_inf) switch_4_sample = rt._rv_to_node(switch_4()) observed_4 = str(s[switch_4_sample]) # Notice an oddity here: in torch, Bernoulli produces 0. and 1. -- floats -- # but Categorical produces 0, 1, 2, 3 -- integers. When taking the union we # detect that tensor(0) and tensor(1) are equal to tensor(0.) and tensor(1.); # they are deduplicated. # TODO: Can this cause any problems? Do we need to canonicalize Bernoulli output # to integers? expected_4 = """ tensor(0.) tensor(1.) tensor(2) tensor(3) """ self.assertEqual(expected_4.strip(), observed_4.strip())
beanmachine-main
tests/ppl/compiler/support_test.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Dirichlet compiler tests import unittest import beanmachine.ppl as bm from beanmachine.graph import ( AtomicType, DistributionType, Graph, InferenceType, OperatorType, ValueType, VariableType, ) from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder from beanmachine.ppl.compiler.gen_bmg_cpp import to_bmg_cpp from beanmachine.ppl.compiler.gen_bmg_graph import to_bmg_graph from beanmachine.ppl.compiler.gen_bmg_python import to_bmg_python from beanmachine.ppl.compiler.gen_dot import to_dot from beanmachine.ppl.compiler.runtime import BMGRuntime from beanmachine.ppl.inference import BMGInference, SingleSiteNewtonianMonteCarlo from beanmachine.ppl.model.rv_identifier import RVIdentifier from torch import Size, tensor from torch.distributions import Bernoulli, Dirichlet def tidy(s: str) -> str: return "\n".join(c.strip() for c in s.strip().split("\n")).strip() def _rv_id() -> RVIdentifier: return RVIdentifier(lambda a, b: a, (1, 1)) # Support for Dirichlet distributions has recently been added to BMG; # this is the first time that the compiler will have to deal with # tensor-valued quantities directly so we anticipate having a number # of problems to solve in type analysis and code generation that have # been put off until now. # # We'll start by just taking the BMG code for a spin directly and see # what gives errors and what gives results. dirichlet = DistributionType.DIRICHLET simplex = VariableType.COL_SIMPLEX_MATRIX broadcast = VariableType.BROADCAST_MATRIX real = AtomicType.REAL prob = AtomicType.PROBABILITY sample = OperatorType.SAMPLE s3x1 = ValueType(simplex, prob, 3, 1) r3x1 = ValueType(broadcast, real, 3, 1) nmc = InferenceType.NMC rejection = InferenceType.REJECTION # Here are some simple models we'll use to test the compiler. # TODO: Test Dirichlets with non-constant inputs. @bm.random_variable def d0(): return Dirichlet(tensor([])) # Torch rejects this one. # @bm.random_variable # def d1a(): # return Dirichlet(tensor(0.5)) @bm.random_variable def d1b(): return Dirichlet(tensor([1.0])) @bm.random_variable def d1c(): return Dirichlet(tensor([[1.5]])) @bm.random_variable def d1d(): return Dirichlet(tensor([[[2.0]]])) # Torch rejects this one # @bm.random_variable # def d1e(): # return Dirichlet(tensor([[[-2.0]]])) @bm.random_variable def d2a(): return Dirichlet(tensor([2.5, 3.0])) @bm.random_variable def d2b(): return Dirichlet(tensor([[3.5, 4.0]])) @bm.random_variable def d2c(): return Dirichlet(tensor([[[4.5, 5.0]]])) @bm.random_variable def d23(): return Dirichlet(tensor([[5.5, 6.0, 6.5], [7.0, 7.5, 8.0]])) @bm.random_variable def d3(): return Dirichlet(tensor([1.0, 1.0, 1.0])) @bm.functional def d3_index_0(): return d3()[0] @bm.random_variable def flip(): return Bernoulli(0.5) @bm.functional def d2a_index_flip(): return d2a()[flip()] class DirichletTest(unittest.TestCase): def test_dirichlet_negative(self) -> None: self.maxDiff = None g = Graph() m1 = tensor([1.5, 1.0, 2.0]) cm1 = g.add_constant_pos_matrix(m1) m2 = tensor([[1.5, 1.0], [2.0, 1.5]]) cm2 = g.add_constant_pos_matrix(m2) two = g.add_constant(2) # Input must be a positive real matrix with one column. with self.assertRaises(ValueError): g.add_distribution(dirichlet, s3x1, [two]) with self.assertRaises(ValueError): g.add_distribution(dirichlet, s3x1, [cm2]) # Must be only one input with self.assertRaises(ValueError): g.add_distribution(dirichlet, s3x1, [cm1, two]) # Output type must be simplex with self.assertRaises(ValueError): g.add_distribution(dirichlet, r3x1, [cm1]) def test_dirichlet_sample(self) -> None: self.maxDiff = None g = Graph() m1 = tensor([1.5, 1.0, 2.0]) cm1 = g.add_constant_pos_matrix(m1) d = g.add_distribution(dirichlet, s3x1, [cm1]) ds = g.add_operator(sample, [d]) g.query(ds) samples = g.infer(1, rejection) # samples has form [[array([[a1],[a2],[a3]])]] result = tensor(samples[0][0]).reshape([3]) # We get a three-element simplex, so it should sum to 1.0. self.assertAlmostEqual(1.0, float(sum(result))) def test_constant_pos_real_matrix(self) -> None: # To make a BMG graph with a Dirichlet distribution the first thing # we'll need to do is make a positive real matrix as its input. # Demonstrate that we can add such a matrix to a graph builder, # do a type analysis, and generate C++ and Python code that builds # the graph. Finally, actually build the graph. self.maxDiff = None bmg = BMGraphBuilder() c1 = bmg.add_pos_real_matrix(tensor(1.0)) c2 = bmg.add_pos_real_matrix(tensor([1.0, 1.5])) c3 = bmg.add_pos_real_matrix(tensor([[1.0, 1.5], [2.0, 2.5]])) c4 = bmg.add_pos_real_matrix(tensor([1.0, 1.5])) # These should be deduplicated self.assertTrue(c4 is c2) # Verify that we can add these nodes to the graph, do a type analysis, # and survive the problem-fixing pass without generating an exception. bmg.add_query(c1, _rv_id()) bmg.add_query(c2, _rv_id()) bmg.add_query(c3, _rv_id()) expected = """ digraph "graph" { N0[label="1.0:R+"]; N1[label="Query:R+"]; N2[label="[1.0,1.5]:MR+[2,1]"]; N3[label="Query:MR+[2,1]"]; N4[label="[[1.0,1.5],\\\\n[2.0,2.5]]:MR+[2,2]"]; N5[label="Query:MR+[2,2]"]; N0 -> N1; N2 -> N3; N4 -> N5; }""" observed = to_dot( bmg, node_types=True, label_edges=False, after_transform=True, ) self.assertEqual(expected.strip(), observed.strip()) # We should be able to generate correct C++ and Python code to build # a graph that contains only positive constant matrices. Note that the # queries are not emitted into the graph because BMG does not allow # a query on a constant. # # NB: m2 is transposed from the source! expected = """ graph::Graph g; Eigen::MatrixXd m0(1, 1); m0 << 1.0; uint n0 = g.add_constant_pos_matrix(m0); uint q0 = g.query(n0); Eigen::MatrixXd m1(2, 1); m1 << 1.0, 1.5; uint n1 = g.add_constant_pos_matrix(m1); uint q1 = g.query(n1); Eigen::MatrixXd m2(2, 2); m2 << 1.0, 2.0, 1.5, 2.5; uint n2 = g.add_constant_pos_matrix(m2); uint q2 = g.query(n2); """ observed = to_bmg_cpp(bmg).code self.assertEqual(expected.strip(), observed.strip()) # Notice that constant matrices are always expressed as a # 2-d matrix, and we transpose them so that they are column-major. expected = """ from beanmachine import graph from torch import tensor g = graph.Graph() n0 = g.add_constant_pos_matrix(tensor([[1.0]])) q0 = g.query(n0) n1 = g.add_constant_pos_matrix(tensor([[1.0],[1.5]])) q1 = g.query(n1) n2 = g.add_constant_pos_matrix(tensor([[1.0,2.0],[1.5,2.5]])) q2 = g.query(n2) """ observed = to_bmg_python(bmg).code self.assertEqual(expected.strip(), observed.strip()) # Let's actually get the graph. # Note that what was a row vector in the original code is now a column vector. expected = """ 0: CONSTANT(matrix<positive real> 1) (out nodes: ) queried 1: CONSTANT(matrix<positive real> 1 1.5) (out nodes: ) queried 2: CONSTANT(matrix<positive real> 1 2 1.5 2.5) (out nodes: ) queried """ observed = to_bmg_graph(bmg).graph.to_string() self.assertEqual(tidy(expected), tidy(observed)) def test_dirichlet_type_analysis(self) -> None: self.maxDiff = None queries = [d0(), d1b(), d1c(), d1d(), d2a(), d2b(), d2c(), d23()] bmg = BMGRuntime().accumulate_graph(queries, {}) observed = to_dot( bmg, node_types=True, edge_requirements=True, after_transform=False, label_edges=False, ) expected = """ digraph "graph" { N00[label="[]:T"]; N01[label="Dirichlet:S[1,1]"]; N02[label="Sample:S[1,1]"]; N03[label="Query:S[1,1]"]; N04[label="[1.0]:OH"]; N05[label="Dirichlet:S[1,1]"]; N06[label="Sample:S[1,1]"]; N07[label="Query:S[1,1]"]; N08[label="[[1.5]]:R+"]; N09[label="Dirichlet:S[1,1]"]; N10[label="Sample:S[1,1]"]; N11[label="Query:S[1,1]"]; N12[label="[[[2.0]]]:N"]; N13[label="Dirichlet:S[1,1]"]; N14[label="Sample:S[1,1]"]; N15[label="Query:S[1,1]"]; N16[label="[2.5,3.0]:MR+[2,1]"]; N17[label="Dirichlet:S[2,1]"]; N18[label="Sample:S[2,1]"]; N19[label="Query:S[2,1]"]; N20[label="[[3.5,4.0]]:MR+[2,1]"]; N21[label="Dirichlet:S[2,1]"]; N22[label="Sample:S[2,1]"]; N23[label="Query:S[2,1]"]; N24[label="[[[4.5,5.0]]]:T"]; N25[label="Dirichlet:S[1,1]"]; N26[label="Sample:S[1,1]"]; N27[label="Query:S[1,1]"]; N28[label="[[5.5,6.0,6.5],\\\\n[7.0,7.5,8.0]]:MR+[3,2]"]; N29[label="Dirichlet:S[3,1]"]; N30[label="Sample:S[3,1]"]; N31[label="Query:S[3,1]"]; N00 -> N01[label="R+"]; N01 -> N02[label="S[1,1]"]; N02 -> N03[label=any]; N04 -> N05[label="R+"]; N05 -> N06[label="S[1,1]"]; N06 -> N07[label=any]; N08 -> N09[label="R+"]; N09 -> N10[label="S[1,1]"]; N10 -> N11[label=any]; N12 -> N13[label="R+"]; N13 -> N14[label="S[1,1]"]; N14 -> N15[label=any]; N16 -> N17[label="MR+[2,1]"]; N17 -> N18[label="S[2,1]"]; N18 -> N19[label=any]; N20 -> N21[label="MR+[2,1]"]; N21 -> N22[label="S[2,1]"]; N22 -> N23[label=any]; N24 -> N25[label="R+"]; N25 -> N26[label="S[1,1]"]; N26 -> N27[label=any]; N28 -> N29[label="MR+[3,1]"]; N29 -> N30[label="S[3,1]"]; N30 -> N31[label=any]; } """ self.assertEqual(expected.strip(), observed.strip()) def test_dirichlet_errors(self) -> None: self.maxDiff = None # If the constant tensor given is not supported at all by BMG because of # its dimensionality then that is the error we will report. If the tensor # is supported by BMG but not valid for a Dirichlet then that's what we say. # TODO: Error message is misleading in that it says that the requirement # is a 3x1 positive real matrix, when the real requirement is that it be # ANY 1-d positive real matrix. expected = ( "The concentration of a Dirichlet is required to be" + " a 3 x 1 positive real matrix but is" + " a 3 x 2 positive real matrix.\n" + "The Dirichlet was created in function call d23()." ) with self.assertRaises(ValueError) as ex: BMGInference().infer([d23()], {}, 1) self.assertEqual(expected.strip(), str(ex.exception).strip()) def test_dirichlet_fix_problems(self) -> None: # Can we take an input that is a valid tensor and deduce that we must # replace it with a positive real constant matrix node? self.maxDiff = None queries = [d2a()] observations = {d2a(): tensor([0.5, 0.5])} bmg = BMGRuntime().accumulate_graph(queries, observations) observed = to_dot( bmg, node_types=True, edge_requirements=True, after_transform=True, label_edges=False, ) expected = """ digraph "graph" { N0[label="[2.5,3.0]:MR+[2,1]"]; N1[label="Dirichlet:S[2,1]"]; N2[label="Sample:S[2,1]"]; N3[label="Observation tensor([0.5000, 0.5000]):S[2,1]"]; N4[label="Query:S[2,1]"]; N0 -> N1[label="MR+[2,1]"]; N1 -> N2[label="S[2,1]"]; N2 -> N3[label=any]; N2 -> N4[label=any]; } """ self.assertEqual(expected.strip(), observed.strip()) # This is the tricky case: the degenerate case where we have only # one value, and we need to make sure that we generate a matrix # constant rather than a regular positive real constant: queries = [d1b()] bmg = BMGRuntime().accumulate_graph(queries, {}) observed = to_dot( bmg, node_types=True, edge_requirements=True, after_transform=True, label_edges=False, ) # This is subtle, but notice that we have a constant matrix here rather # than a constant; the value is [1.0], not 1.0. expected = """ digraph "graph" { N0[label="[1.0]:R+"]; N1[label="Dirichlet:S[1,1]"]; N2[label="Sample:S[1,1]"]; N3[label="Query:S[1,1]"]; N0 -> N1[label="R+"]; N1 -> N2[label="S[1,1]"]; N2 -> N3[label=any]; }""" self.assertEqual(expected.strip(), observed.strip()) def test_dirichlet_bmg_inference(self) -> None: # Get Dirichlet samples; verify that the sample set # is in rows, not columns. self.maxDiff = None # 2-element Dirichlet queries = [d2a()] observations = {} num_samples = 10 results = BMGInference().infer( queries, observations, num_samples, 1, inference_type=nmc ) samples = results[d2a()] self.assertEqual(Size([1, num_samples, 2]), samples.size()) # Make sure we get the same thing when we use Bean Machine proper: results = SingleSiteNewtonianMonteCarlo().infer( queries, observations, num_samples, 1 ) samples = results[d2a()] self.assertEqual(Size([1, num_samples, 2]), samples.size()) # 3-element Dirichlet queries = [d3()] observations = {} num_samples = 20 results = BMGInference().infer( queries, observations, num_samples, 1, inference_type=rejection ) samples = results[d3()] self.assertEqual(Size([1, num_samples, 3]), samples.size()) # If we observe a Dirichlet sample to be a value, we'd better get # that value when we query. queries = [d3()] observations = {d3(): tensor([0.5, 0.25, 0.25])} num_samples = 1 results = BMGInference().infer( queries, observations, num_samples, 1, inference_type=nmc ) samples = results[d3()] expected = "tensor([[[0.5000, 0.2500, 0.2500]]], dtype=torch.float64)" self.assertEqual(expected, str(samples)) # Make sure we get the same thing when we use Bean Machine proper: results = SingleSiteNewtonianMonteCarlo().infer( queries, observations, num_samples, 1 ) samples = results[d3()] expected = "tensor([[[0.5000, 0.2500, 0.2500]]])" self.assertEqual(expected, str(samples)) def test_dirichlet_to_python(self) -> None: self.maxDiff = None queries = [d2a()] observations = {d2a(): tensor([0.5, 0.5])} observed = BMGInference().to_python(queries, observations) expected = """ from beanmachine import graph from torch import tensor g = graph.Graph() n0 = g.add_constant_pos_matrix(tensor([[2.5],[3.0]])) n1 = g.add_distribution( graph.DistributionType.DIRICHLET, graph.ValueType( graph.VariableType.COL_SIMPLEX_MATRIX, graph.AtomicType.PROBABILITY, 2, 1, ), [n0], ) n2 = g.add_operator(graph.OperatorType.SAMPLE, [n1]) g.observe(n2, tensor([0.5000, 0.5000])) q0 = g.query(n2)""" self.assertEqual(expected.strip(), observed.strip()) def test_dirichlet_to_cpp(self) -> None: self.maxDiff = None queries = [d2a()] observations = {d2a(): tensor([0.5, 0.5])} observed = BMGInference().to_cpp(queries, observations) expected = """ graph::Graph g; Eigen::MatrixXd m0(2, 1); m0 << 2.5, 3.0; uint n0 = g.add_constant_pos_matrix(m0); uint n1 = g.add_distribution( graph::DistributionType::DIRICHLET, graph::ValueType( graph::VariableType::COL_SIMPLEX_MATRIX, graph::AtomicType::PROBABILITY, 2, 1 ), std::vector<uint>({n0})); uint n2 = g.add_operator( graph::OperatorType::SAMPLE, std::vector<uint>({n1})); Eigen::MatrixXd o0(2, 1); o0 << 0.5, 0.5; g.observe(n2, o0); uint q0 = g.query(n2);""" self.assertEqual(expected.strip(), observed.strip()) def test_dirichlet_observation_errors(self) -> None: self.maxDiff = None queries = [d2a()] # Wrong size, wrong sum observations = {d2a(): tensor(2.0)} with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, observations, 1) expected = ( "A Dirichlet distribution is observed to have value 2.0 " + "but only produces samples of type 2 x 1 simplex matrix." ) self.assertEqual(expected, str(ex.exception)) # Wrong size, right sum observations = {d2a(): tensor([0.25, 0.25, 0.25, 0.25])} with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, observations, 1) expected = ( "A Dirichlet distribution is observed to have value " + "tensor([0.2500, 0.2500, 0.2500, 0.2500]) " + "but only produces samples of type 2 x 1 simplex matrix." ) self.assertEqual(expected, str(ex.exception)) # Right size, wrong sum observations = {d2a(): tensor([0.25, 0.25])} with self.assertRaises(ValueError) as ex: BMGInference().infer(queries, observations, 1) expected = ( "A Dirichlet distribution is observed to have value " + "tensor([0.2500, 0.2500]) " + "but only produces samples of type 2 x 1 simplex matrix." ) self.assertEqual(expected, str(ex.exception)) def test_dirichlet_index(self) -> None: self.maxDiff = None observed = BMGInference().to_dot([d3_index_0()], {}) expected = """ digraph "graph" { N0[label="[1.0,1.0,1.0]"]; N1[label=Dirichlet]; N2[label=Sample]; N3[label=0]; N4[label=index]; N5[label=Query]; N0 -> N1; N1 -> N2; N2 -> N4; N3 -> N4; N4 -> N5; } """ self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([d2a_index_flip()], {}) expected = """ digraph "graph" { N00[label="[2.5,3.0]"]; N01[label=Dirichlet]; N02[label=Sample]; N03[label=0.5]; N04[label=Bernoulli]; N05[label=Sample]; N06[label=1]; N07[label=0]; N08[label=if]; N09[label=index]; N10[label=Query]; N00 -> N01; N01 -> N02; N02 -> N09; N03 -> N04; N04 -> N05; N05 -> N08; N06 -> N08; N07 -> N08; N08 -> N09; N09 -> N10; } """ self.assertEqual(expected.strip(), observed.strip()) queries = [d2a(), d2a_index_flip()] observations = {flip(): tensor(1.0)} results = BMGInference().infer(queries, observations, 1) d2a_sample = results[d2a()][0, 0] index_sample = results[d2a_index_flip()][0] # The sample and the indexed sample must be the same value self.assertEqual(d2a_sample[1], index_sample)
beanmachine-main
tests/ppl/compiler/dirichlet_test.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Dirichlet compiler tests import unittest import beanmachine.ppl as bm import torch from beanmachine.ppl.inference import BMGInference from torch import tensor from torch.distributions import Bernoulli, Normal @bm.random_variable def norm(x): return Normal(0.0, 1.0) @bm.random_variable def bern(): return Bernoulli(0.5) @bm.functional def cholesky1(): n0 = norm(0) * norm(0) n1 = norm(1) * norm(1) t = tensor([[n0, 0.0], [0.0, n1]]) return torch.linalg.cholesky(t) @bm.functional def cholesky2(): n0 = norm(0) * norm(0) n1 = norm(1) * norm(1) t = tensor([[n0, 0.0], [0.0, n1]]) return torch.Tensor.cholesky(t) @bm.functional def cholesky3(): n0 = norm(0) * norm(0) n1 = norm(1) * norm(1) t = tensor([[n0, 0.0], [0.0, n1]]) return t.cholesky() @bm.functional def cholesky4(): # Matrix of bools should convert to reals t = tensor([[bern(), 0], [0, 1]]) return t.cholesky() @bm.functional def cholesky5(): n0 = norm(0) * norm(0) n1 = norm(1) * norm(1) t = tensor([[n0, 0.0], [0.0, n1]]) L, _ = torch.linalg.cholesky_ex(t) return L # TODO: Test with a non-square matrix, should give an error. class CholeskyTest(unittest.TestCase): def test_cholesky(self) -> None: self.maxDiff = None expected = """ digraph "graph" { N00[label=0.0]; N01[label=1.0]; N02[label=Normal]; N03[label=Sample]; N04[label=Sample]; N05[label=2]; N06[label="*"]; N07[label="*"]; N08[label=ToMatrix]; N09[label=Cholesky]; N10[label=Query]; N00 -> N02; N00 -> N08; N00 -> N08; N01 -> N02; N02 -> N03; N02 -> N04; N03 -> N06; N03 -> N06; N04 -> N07; N04 -> N07; N05 -> N08; N05 -> N08; N06 -> N08; N07 -> N08; N08 -> N09; N09 -> N10; } """ observed = BMGInference().to_dot([cholesky1()], {}) self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([cholesky2()], {}) self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([cholesky3()], {}) self.assertEqual(expected.strip(), observed.strip()) observed = BMGInference().to_dot([cholesky5()], {}) self.assertEqual(expected.strip(), observed.strip()) expected = """ digraph "graph" { N0[label=0.5]; N1[label=Bernoulli]; N2[label=Sample]; N3[label=2]; N4[label=False]; N5[label=True]; N6[label=ToMatrix]; N7[label=ToRealMatrix]; N8[label=Cholesky]; N9[label=Query]; N0 -> N1; N1 -> N2; N2 -> N6; N3 -> N6; N3 -> N6; N4 -> N6; N4 -> N6; N5 -> N6; N6 -> N7; N7 -> N8; N8 -> N9; } """ observed = BMGInference().to_dot([cholesky4()], {}) self.assertEqual(expected.strip(), observed.strip())
beanmachine-main
tests/ppl/compiler/cholesky_test.py
beanmachine-main
tests/ppl/compiler/testlib/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict import beanmachine.ppl as bm import torch.distributions as dist from beanmachine.ppl.model.rv_identifier import RVIdentifier from torch import Tensor class BetaBernoulliBasicModel(object): def __init__(self, alpha: Tensor, beta: Tensor): self.alpha_ = alpha self.beta_ = beta @bm.random_variable def theta(self): return dist.Beta(self.alpha_, self.beta_) @bm.random_variable def y(self, i): return dist.Bernoulli(self.theta()) def gen_obs(self, num_obs: int) -> Dict[RVIdentifier, Tensor]: true_theta = 0.75 obs = {} for i in range(0, num_obs): obs[self.y(i)] = dist.Bernoulli(true_theta).sample() return obs class BetaBernoulliOpsModel(BetaBernoulliBasicModel): @bm.functional def sum_y(self): sum = 0.0 for i in range(0, 5): sum = sum + self.y(i) return sum class BetaBernoulliScaleHyperParameters(BetaBernoulliBasicModel): def scale_alpha(self): factor = 2.0 for i in range(0, 3): factor = factor * i return factor @bm.random_variable def theta(self): return dist.Beta(self.alpha_ + self.scale_alpha(), self.beta_ + 2.0)
beanmachine-main
tests/ppl/compiler/testlib/conjugate_models.py