python_code
stringlengths
0
4.04M
repo_name
stringlengths
7
58
file_path
stringlengths
5
147
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Parameterized test to compare samples from original and conjugate prior transformed models""" import random import pytest import scipy import torch from beanmachine.ppl.inference.bmg_inference import BMGInference from .conjugate_models import ( BetaBernoulliBasicModel, BetaBernoulliOpsModel, BetaBernoulliScaleHyperParameters, ) _alpha = 2.0 _beta = 2.0 test_models = [ (BetaBernoulliBasicModel, "beta_bernoulli_conjugate_fixer"), (BetaBernoulliOpsModel, "beta_bernoulli_conjugate_fixer"), (BetaBernoulliScaleHyperParameters, "beta_bernoulli_conjugate_fixer"), ] @pytest.mark.parametrize("model, opt", test_models) def test_samples_with_ks(model, opt): seed = 0 torch.manual_seed(seed) random.seed(seed) num_samples = 3000 num_obs = 4 bmg = BMGInference() model = model(_alpha, _beta) observations = model.gen_obs(num_obs) queries = [model.theta()] # Generate samples from model when opt is disabled skip_optimizations = {opt} posterior_original = bmg.infer(queries, observations, num_samples) graph_original = bmg.to_dot( queries, observations, skip_optimizations=skip_optimizations ) theta_samples_original = posterior_original[model.theta()][0] # Generate samples from model when opt is enabled skip_optimizations = set() posterior_transformed = bmg.infer( queries, observations, num_samples, 1, skip_optimizations=skip_optimizations ) graph_transformed = bmg.to_dot( queries, observations, skip_optimizations=skip_optimizations ) theta_samples_transformed = posterior_transformed[model.theta()][0] assert ( graph_original.strip() != graph_transformed.strip() ), "Original and transformed graph should not be identical." assert type(theta_samples_original) == type( theta_samples_transformed ), "Sample type of original and transformed model should be the same." assert len(theta_samples_original) == len( theta_samples_transformed ), "Sample size of original and transformed model should be the same." assert ( scipy.stats.ks_2samp(theta_samples_original, theta_samples_transformed).pvalue >= 0.05 )
beanmachine-main
tests/ppl/compiler/testlib/fix_beta_conjugacy_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import unittest import beanmachine.graph as bmg import numpy as np def tidy(s: str) -> str: return "\n".join(c.strip() for c in s.strip().split("\n")).strip() class TestOperators(unittest.TestCase): def test_oper_args(self) -> None: """ We will test test number of arguments for each operator 0, 1, 2, 3 etc. """ self.maxDiff = None g = bmg.Graph() c1 = g.add_constant_real(2.5) c2 = g.add_constant_real(-1.5) c3 = g.add_constant_probability(0.5) c4 = g.add_constant_probability(0.6) c5 = g.add_constant_probability(0.7) c6 = g.add_constant_natural(23) c7 = g.add_constant_bool(False) c8 = g.add_constant_neg_real(-1.25) c9 = g.add_constant_pos_real(1.25) # add const matrices, operators on matrix to be added g.add_constant_bool_matrix(np.array([[True, False], [False, True]])) g.add_constant_real_matrix(np.array([[-0.1, 0.0], [2.0, -1.0]])) g.add_constant_natural_matrix(np.array([[1, 2], [0, 999]])) g.add_constant_pos_matrix(np.array([[0.1, 0.0], [2.0, 1.0]])) g.add_constant_neg_matrix(np.array(([-0.3, -0.4]))) g.add_constant_probability_matrix(np.array([0.1, 0.9])) g.add_constant_col_simplex_matrix(np.array([[0.1, 1.0], [0.9, 0.0]])) with self.assertRaises(ValueError): g.add_constant_neg_matrix(np.array([[0.1, 0.0], [2.0, -1.0]])) with self.assertRaises(ValueError): g.add_constant_pos_matrix(np.array([[0.1, 0.0], [2.0, -1.0]])) with self.assertRaises(ValueError): g.add_constant_col_simplex_matrix(np.array([[0.1, 0.0], [2.0, 1.0]])) with self.assertRaises(ValueError): g.add_constant_probability_matrix(np.array([1.1, 0.9])) # test TO_REAL with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.TO_REAL, []) g.add_operator(bmg.OperatorType.TO_REAL, [c4]) g.add_operator(bmg.OperatorType.TO_REAL, [c6]) g.add_operator(bmg.OperatorType.TO_REAL, [c8]) with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.TO_REAL, [c4, c5]) # test EXP # Exp needs exactly one operand with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.EXP, []) with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.EXP, [c2, c8]) # That operand must be real, negative real or positive real: with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.EXP, [c3]) # prob throws with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.EXP, [c6]) # natural throws with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.EXP, [c7]) # bool throws g.add_operator(bmg.OperatorType.EXP, [c2]) # real OK g.add_operator(bmg.OperatorType.EXP, [c8]) # neg_real OK g.add_operator(bmg.OperatorType.EXP, [c9]) # pos_real OK # test LOG # Log needs exactly one operand: with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.LOG, []) with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.LOG, [c1, c2]) # That operand must be positive real or probability: with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.LOG, [c2]) with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.LOG, [c6]) with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.LOG, [c7]) with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.LOG, [c8]) g.add_operator(bmg.OperatorType.LOG, [c3]) # test NEGATE # Negate needs exactly one operand with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.NEGATE, []) g.add_operator(bmg.OperatorType.NEGATE, [c2]) with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.NEGATE, [c1, c2]) # Negate can take a real, negative real or positive real. with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.NEGATE, [c3]) with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.NEGATE, [c6]) with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.NEGATE, [c7]) g.add_operator(bmg.OperatorType.NEGATE, [c1]) g.add_operator(bmg.OperatorType.NEGATE, [c8]) # test COMPLEMENT with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.COMPLEMENT, []) g.add_operator(bmg.OperatorType.COMPLEMENT, [c4]) with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.COMPLEMENT, [c4, c4]) g.add_operator(bmg.OperatorType.COMPLEMENT, [c7]) # test MULTIPLY with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.MULTIPLY, []) with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.MULTIPLY, [c3]) g.add_operator(bmg.OperatorType.MULTIPLY, [c4, c5]) g.add_operator(bmg.OperatorType.MULTIPLY, [c3, c4, c5]) # test ADD # Add requires two or more operands with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.ADD, []) with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.ADD, [c1]) # All operands must be (1) the same type, and (2) # real, neg real or pos real. with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.ADD, [c1, c8]) with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.ADD, [c3, c3]) g.add_operator(bmg.OperatorType.ADD, [c1, c2]) g.add_operator(bmg.OperatorType.ADD, [c1, c2, c1]) g.add_operator(bmg.OperatorType.ADD, [c8, c8, c8]) # test POW with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.POW, []) with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.POW, [c1]) with self.assertRaises(ValueError): g.add_operator(bmg.OperatorType.POW, [c1, c1, c1]) g.add_operator(bmg.OperatorType.POW, [c1, c2]) observed = g.to_string() expected = """ 0: CONSTANT(real 2.5) (out nodes: 24, 30, 31, 31, 33) 1: CONSTANT(real -1.5) (out nodes: 19, 23, 30, 31, 33) 2: CONSTANT(probability 0.5) (out nodes: 22, 29) 3: CONSTANT(probability 0.6) (out nodes: 16, 26, 28, 29) 4: CONSTANT(probability 0.7) (out nodes: 28, 29) 5: CONSTANT(natural 23) (out nodes: 17) 6: CONSTANT(boolean 0) (out nodes: 27) 7: CONSTANT(negative real -1.25) (out nodes: 18, 20, 25, 32, 32, 32) 8: CONSTANT(positive real 1.25) (out nodes: 21) 9: CONSTANT(matrix<boolean> 1 0 0 1) (out nodes: ) 10: CONSTANT(matrix<real> -0.1 0 2 -1) (out nodes: ) 11: CONSTANT(matrix<natural> 1 2 0 999) (out nodes: ) 12: CONSTANT(matrix<positive real> 0.1 0 2 1) (out nodes: ) 13: CONSTANT(matrix<negative real> -0.3 -0.4) (out nodes: ) 14: CONSTANT(matrix<probability> 0.1 0.9) (out nodes: ) 15: CONSTANT(col_simplex_matrix<probability> 0.1 1 0.9 0) (out nodes: ) 16: TO_REAL(3) (out nodes: ) 17: TO_REAL(5) (out nodes: ) 18: TO_REAL(7) (out nodes: ) 19: EXP(1) (out nodes: ) 20: EXP(7) (out nodes: ) 21: EXP(8) (out nodes: ) 22: LOG(2) (out nodes: ) 23: NEGATE(1) (out nodes: ) 24: NEGATE(0) (out nodes: ) 25: NEGATE(7) (out nodes: ) 26: COMPLEMENT(3) (out nodes: ) 27: COMPLEMENT(6) (out nodes: ) 28: MULTIPLY(3, 4) (out nodes: ) 29: MULTIPLY(2, 3, 4) (out nodes: ) 30: ADD(0, 1) (out nodes: ) 31: ADD(0, 1, 0) (out nodes: ) 32: ADD(7, 7, 7) (out nodes: ) 33: POW(0, 1) (out nodes: ) """ self.assertEqual(tidy(expected), tidy(observed)) def test_arithmetic(self) -> None: g = bmg.Graph() c1 = g.add_constant_natural(3) o0 = g.add_operator(bmg.OperatorType.TO_REAL, [c1]) o1 = g.add_operator(bmg.OperatorType.NEGATE, [o0]) o2 = g.add_operator(bmg.OperatorType.EXP, [o1]) # positive real o2_real = g.add_operator(bmg.OperatorType.TO_REAL, [o2]) o3 = g.add_operator(bmg.OperatorType.MULTIPLY, [o2_real, o0]) o4 = g.add_operator(bmg.OperatorType.EXPM1, [o0]) o5 = g.add_operator(bmg.OperatorType.ADD, [o0, o3, o4]) o6 = g.add_operator(bmg.OperatorType.POW, [o5, o0]) # real # Verify that EXPM1 on a negative real is legal. o7 = g.add_operator(bmg.OperatorType.NEGATE, [o2]) o8 = g.add_operator(bmg.OperatorType.EXPM1, [o7]) g.query(o6) g.query(o8) samples = g.infer(2) # both samples should have exactly the same value since we are doing # deterministic operators only self.assertEqual(type(samples[0][0]), float) self.assertEqual(samples[0][0], samples[1][0]) # the result should be identical to doing this math directly const1 = 3.0 r6 = (const1 + math.exp(-const1) * const1 + math.expm1(const1)) ** const1 self.assertAlmostEqual(samples[0][0], r6, 3) r8 = math.expm1(-math.exp(-const1)) self.assertAlmostEqual(samples[0][1], r8, 3) def test_probability(self) -> None: g = bmg.Graph() c1 = g.add_constant_probability(0.8) c2 = g.add_constant_probability(0.7) o1 = g.add_operator(bmg.OperatorType.COMPLEMENT, [c1]) o2 = g.add_operator(bmg.OperatorType.MULTIPLY, [o1, c2]) g.query(o2) samples = g.infer(2) self.assertTrue(type(samples[0][0]), float) self.assertAlmostEqual(samples[0][0], 0.14, 3) def test_to_probability(self) -> None: # We have some situations where we know that a real or positive # real quantity is a probability but we cannot prove it. For # example, 0.4 * beta_sample + 0.5 is definitely between 0.0 and # 1.0, but we assume that the sum of two probabilities is a # positive real. # # The to_probability operator takes a real or positive real and # constrains it to the range (0.0, 1.0) g = bmg.Graph() c0 = g.add_constant_real(0.25) c1 = g.add_constant_real(0.5) c2 = g.add_constant_real(0.75) o0 = g.add_operator(bmg.OperatorType.ADD, [c0, c1]) o1 = g.add_operator(bmg.OperatorType.TO_PROBABILITY, [o0]) o2 = g.add_operator(bmg.OperatorType.ADD, [c1, c2]) o3 = g.add_operator(bmg.OperatorType.TO_PROBABILITY, [o2]) g.query(o0) g.query(o1) g.query(o2) g.query(o3) samples = g.infer(1) self.assertAlmostEqual(samples[0][0], 0.75, 3) self.assertAlmostEqual(samples[0][1], 0.75, 3) self.assertAlmostEqual(samples[0][2], 1.25, 3) self.assertAlmostEqual(samples[0][3], 1.0, 3) def test_to_neg_real(self) -> None: # We have some situations where we know that a real quantity # is negative but we cannot prove it. For example, # log(0.4 * beta() + 0.5) is definitely negative but we # assume that the sum of two probabilities is a positive real, # and so the log is a real, not a negative real. # # The to_neg_real operator takes a real and constrains it to # be negative. g = bmg.Graph() two = g.add_constant_pos_real(2.0) beta = g.add_distribution( bmg.DistributionType.BETA, bmg.AtomicType.PROBABILITY, [two, two] ) s = g.add_operator(bmg.OperatorType.SAMPLE, [beta]) c4 = g.add_constant_probability(0.4) c5 = g.add_constant_pos_real(0.5) mult = g.add_operator(bmg.OperatorType.MULTIPLY, [c4, s]) tr = g.add_operator(bmg.OperatorType.TO_POS_REAL, [mult]) add = g.add_operator(bmg.OperatorType.ADD, [tr, c5]) # Positive real lg = g.add_operator(bmg.OperatorType.LOG, [add]) # Real tnr = g.add_operator(bmg.OperatorType.TO_NEG_REAL, [lg]) lme = g.add_operator(bmg.OperatorType.LOG1MEXP, [tnr]) ex = g.add_operator(bmg.OperatorType.EXP, [lme]) g.query(add) g.query(lg) g.query(tnr) g.query(ex) samples = g.infer(1, bmg.InferenceType.NMC)[0] add_sample = samples[0] lg_sample = samples[1] tnr_sample = samples[2] ex_sample = samples[3] self.assertTrue(0.5 <= add_sample <= 0.9) self.assertTrue(lg_sample <= 0.0) self.assertEqual(lg_sample, tnr_sample) self.assertAlmostEqual(ex_sample, 1.0 - add_sample, 3) def test_sample(self) -> None: # negative test we can't exponentiate the sample from a Bernoulli g = bmg.Graph() c1 = g.add_constant_probability(0.6) d1 = g.add_distribution( bmg.DistributionType.BERNOULLI, bmg.AtomicType.BOOLEAN, [c1] ) s1 = g.add_operator(bmg.OperatorType.SAMPLE, [d1]) with self.assertRaises(ValueError) as cm: o1 = g.add_operator(bmg.OperatorType.EXP, [s1]) self.assertTrue( "operator EXP requires a neg_real, real or pos_real parent" in str(cm.exception) ) # the proper way to do it is to convert to floating point first g = bmg.Graph() c1 = g.add_constant_probability(0.6) d1 = g.add_distribution( bmg.DistributionType.BERNOULLI, bmg.AtomicType.BOOLEAN, [c1] ) s1 = g.add_operator(bmg.OperatorType.SAMPLE, [d1]) o1 = g.add_operator(bmg.OperatorType.TO_REAL, [s1]) # o2 and o3 both compute the same value o2 = g.add_operator(bmg.OperatorType.EXP, [o1]) o3 = g.add_operator(bmg.OperatorType.EXP, [o1]) # direcly negating o3 results in a NEG_REAL value g.add_operator(bmg.OperatorType.NEGATE, [o3]) # converting o3 to REAL then applying negate results in REAL value o3_real = g.add_operator(bmg.OperatorType.TO_REAL, [o3]) o4 = g.add_operator(bmg.OperatorType.NEGATE, [o3_real]) o2_real = g.add_operator(bmg.OperatorType.TO_REAL, [o2]) o5 = g.add_operator(bmg.OperatorType.ADD, [o2_real, o4]) # o5 should be 0 in all possible worlds g.query(o5) samples = g.infer(10) self.assertEqual(type(samples[0][0]), float) self.assertEqual( [s[0] for s in samples], [0.0] * 10, "all samples should be zero" )
beanmachine-main
tests/graph/operator_test.py
beanmachine-main
tests/graph/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import unittest import numpy as np from beanmachine import graph class TestNMC(unittest.TestCase): # see https://www.jstatsoft.org/article/view/v012i03/v12i03.pdf def test_eight_schools(self): # For each school, the average treatment effect and the standard deviation DATA = [ (28.39, 14.9), (7.94, 10.2), (-2.75, 16.3), (6.82, 11.0), (-0.64, 9.4), (0.63, 11.4), (18.01, 10.4), (12.16, 17.6), ] # the expected mean and standard deviation of each random variable EXPECTED = [ (11.1, 9.1), (7.6, 6.6), (5.7, 8.4), (7.1, 7.0), (5.1, 6.8), (5.7, 7.3), (10.4, 7.3), (8.3, 8.4), (7.6, 5.9), # overall mean (6.7, 5.6), # overall std ] g = graph.Graph() zero = g.add_constant_real(0.0) thousand = g.add_constant_pos_real(1000.0) # overall_mean ~ Normal(0, 1000) overall_mean_dist = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [zero, thousand] ) overall_mean = g.add_operator(graph.OperatorType.SAMPLE, [overall_mean_dist]) # overall_std ~ HalfCauchy(1000) # [note: the original paper had overall_std ~ Uniform(0, 1000)] overall_std_dist = g.add_distribution( graph.DistributionType.HALF_CAUCHY, graph.AtomicType.POS_REAL, [thousand] ) overall_std = g.add_operator(graph.OperatorType.SAMPLE, [overall_std_dist]) # for each school we will add two random variables, # but first we need to define a distribution school_effect_dist = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [overall_mean, overall_std], ) for treatment_mean_value, treatment_std_value in DATA: # school_effect ~ Normal(overall_mean, overall_std) school_effect = g.add_operator( graph.OperatorType.SAMPLE, [school_effect_dist] ) g.query(school_effect) # treatment_mean ~ Normal(school_effect, treatment_std) treatment_std = g.add_constant_pos_real(treatment_std_value) treatment_mean_dist = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [school_effect, treatment_std], ) treatment_mean = g.add_operator( graph.OperatorType.SAMPLE, [treatment_mean_dist] ) g.observe(treatment_mean, treatment_mean_value) g.query(overall_mean) g.query(overall_std) means = g.infer_mean(3000, graph.InferenceType.NMC) for idx, (mean, std) in enumerate(EXPECTED): self.assertTrue( abs(means[idx] - mean) < std * 0.5, f"index {idx} expected {mean} +- {std*0.5} actual {means[idx]}", ) # see https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Bivariate_case # we are assuming zero mean here for simplicity def test_bivariate_gaussian(self): g = graph.Graph() flat = g.add_distribution( graph.DistributionType.FLAT, graph.AtomicType.REAL, [] ) x = g.add_operator(graph.OperatorType.SAMPLE, [flat]) y = g.add_operator(graph.OperatorType.SAMPLE, [flat]) x_sq = g.add_operator(graph.OperatorType.MULTIPLY, [x, x]) y_sq = g.add_operator(graph.OperatorType.MULTIPLY, [y, y]) x_y = g.add_operator(graph.OperatorType.MULTIPLY, [x, y]) SIGMA_X = 5.0 SIGMA_Y = 2.0 RHO = 0.7 x_sq_term = g.add_constant_real(-0.5 / (1 - RHO**2) / SIGMA_X**2) g.add_factor(graph.FactorType.EXP_PRODUCT, [x_sq, x_sq_term]) y_sq_term = g.add_constant_real(-0.5 / (1 - RHO**2) / SIGMA_Y**2) g.add_factor(graph.FactorType.EXP_PRODUCT, [y_sq, y_sq_term]) x_y_term = g.add_constant_real(RHO / (1 - RHO**2) / SIGMA_X / SIGMA_Y) g.add_factor(graph.FactorType.EXP_PRODUCT, [x_y, x_y_term]) g.query(x) g.query(x_sq) g.query(y) g.query(y_sq) g.query(x_y) # note: there are no observations, so this next line should have no effect g.remove_observations() means = g.infer_mean(10000, graph.InferenceType.NMC) print("means", means) # only printed on error self.assertTrue(abs(means[0] - 0.0) < 0.2, "mean of x should be 0") self.assertTrue( abs(means[1] - SIGMA_X**2) < 0.5, f"mean of x^2 should be {SIGMA_X**2}" ) self.assertTrue(abs(means[2] - 0.0) < 0.2, "mean of y should be 0") self.assertTrue( abs(means[3] - SIGMA_Y**2) < 0.2, f"mean of y^2 should be {SIGMA_Y**2}" ) post_cov = means[4] / math.sqrt(means[1]) / math.sqrt(means[3]) self.assertTrue( abs(post_cov - RHO) < 0.2, f"covariance should be {RHO} is {post_cov}" ) def test_probit_regression(self): """ x ~ Normal(0, 1) y ~ Bernoulli(Phi(x)) P(Phi(x) | y = true) ~ Beta(2, 1) P(Phi(x) | y = false) ~ Beta(1, 2) """ g = graph.Graph() zero = g.add_constant_real(0.0) one = g.add_constant_pos_real(1.0) prior = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [zero, one] ) x = g.add_operator(graph.OperatorType.SAMPLE, [prior]) phi_x = g.add_operator(graph.OperatorType.PHI, [x]) likelihood = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [phi_x] ) y = g.add_operator(graph.OperatorType.SAMPLE, [likelihood]) g.observe(y, True) phi_x_sq = g.add_operator(graph.OperatorType.MULTIPLY, [phi_x, phi_x]) g.query(phi_x) g.query(phi_x_sq) means = g.infer_mean(10000, graph.InferenceType.NMC) post_var = means[1] - means[0] ** 2 self.assertAlmostEqual( means[0], 2 / (2 + 1), msg=f"posterior mean {means[0]} is not accurate", delta=0.01, ) self.assertAlmostEqual( post_var, 2 * 1 / (2 + 1) ** 2 / (2 + 1 + 1), 2, f"posterior variance {post_var} is not accurate", ) # now test P(Phi(x) | y = false) ~ Beta(1, 2) g.remove_observations() g.observe(y, False) means = g.infer_mean(10000, graph.InferenceType.NMC) post_var = means[1] - means[0] ** 2 self.assertAlmostEqual( means[0], 1 / (1 + 2), 2, f"posterior mean {means[0]} is not accurate" ) self.assertAlmostEqual( post_var, 1 * 2 / (1 + 2) ** 2 / (1 + 2 + 1), 2, f"posterior variance {post_var} is not accurate", ) def test_clara_gp(self): """ CLARA-GP model f() ~ GP(0, squared_exp_covar) for each labeler l: spec_l ~ Beta(SPEC_ALPHA, SPEC_BETA) sens_l ~ Beta(SENS_ALPHA, SENS_BETA) for each item i violating_i ~ Bernoulli(Phi(f(i))) for each labeler l if violating_i prob_i_l = sens_l else prob_i_l = 1 - spec_l label_i_l ~ Bernoulli(prob_i_l) """ ALPHA = 1.0 RHO = 0.1 SENS_ALPHA = 9.0 SENS_BETA = 1.0 SPEC_ALPHA = 9.5 SPEC_BETA = 0.5 NUM_LABELERS = 2 SCORES = np.array([0.1, 0.2, 0.3]) ITEM_LABELS = [[False, False], [False, True], [True, True]] # see https://mc-stan.org/docs/2_19/functions-reference/covariance.html for # a reference on this covariance function covar = ALPHA**2 * np.exp( -((np.expand_dims(SCORES, 1) - SCORES) ** 2) / 2 / RHO**2 ) tau = np.linalg.inv(covar) # the precision matrix g = graph.Graph() # first we will create f ~ GP flat = g.add_distribution( graph.DistributionType.FLAT, graph.AtomicType.REAL, [] ) f = [g.add_operator(graph.OperatorType.SAMPLE, [flat]) for _ in SCORES] for i in range(len(SCORES)): tau_i_i = g.add_constant_real(-0.5 * tau[i, i]) g.add_factor(graph.FactorType.EXP_PRODUCT, [tau_i_i, f[i], f[i]]) for j in range(i + 1, len(SCORES)): tau_i_j = g.add_constant_real(-1.0 * tau[i, j]) g.add_factor(graph.FactorType.EXP_PRODUCT, [tau_i_j, f[i], f[j]]) # for each labeler l: # spec_l ~ Beta(SPEC_ALPHA, SPEC_BETA) # sens_l ~ Beta(SENS_ALPHA, SENS_BETA) spec_alpha = g.add_constant_pos_real(SPEC_ALPHA) spec_beta = g.add_constant_pos_real(SPEC_BETA) spec_prior = g.add_distribution( graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [spec_alpha, spec_beta], ) sens_alpha = g.add_constant_pos_real(SENS_ALPHA) sens_beta = g.add_constant_pos_real(SENS_BETA) sens_prior = g.add_distribution( graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [sens_alpha, sens_beta], ) spec, comp_spec, sens = [], [], [] for labeler in range(NUM_LABELERS): spec.append(g.add_operator(graph.OperatorType.SAMPLE, [spec_prior])) comp_spec.append( g.add_operator(graph.OperatorType.COMPLEMENT, [spec[labeler]]) ) sens.append(g.add_operator(graph.OperatorType.SAMPLE, [sens_prior])) # for each item i for i, labels in enumerate(ITEM_LABELS): # violating_i ~ Bernoulli(Phi(f(i))) dist_i = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [g.add_operator(graph.OperatorType.PHI, [f[i]])], ) violating_i = g.add_operator(graph.OperatorType.SAMPLE, [dist_i]) # for each labeler l for lidx, label_val in enumerate(labels): # if violating_i # prob_i_l = sens_l # else # prob_i_l = 1 - spec_l prob_i_l = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [violating_i, sens[lidx], comp_spec[lidx]], ) # label_i_l ~ Bernoulli(prob_i_l) dist_i_l = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [prob_i_l], ) label_i_l = g.add_operator(graph.OperatorType.SAMPLE, [dist_i_l]) g.observe(label_i_l, label_val) g.query(violating_i) means = g.infer_mean(1000, graph.InferenceType.NMC) self.assertLess(means[0], means[1]) self.assertLess(means[1], means[2]) def test_uncoupled_bools(self): """ X_1 ~ Bernoulli(0.5) X_2 ~ Bernoulli(0.5) P(X_1 == X_2) = 0.5 """ g = graph.Graph() half = g.add_constant_probability(0.5) bernoulli = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [half] ) X_1 = g.add_operator(graph.OperatorType.SAMPLE, [bernoulli]) X_2 = g.add_operator(graph.OperatorType.SAMPLE, [bernoulli]) g.query(X_1) g.query(X_2) prob_equal = ( sum(x == y for (x, y) in g.infer(100000, graph.InferenceType.NMC)) / 100000 ) self.assertAlmostEqual(prob_equal, 0.5, delta=0.01) def test_coupled_bools(self): """ X_1 ~ Bernoulli(0.5) X_2 ~ Bernoulli(0.5) sigma_1 = 1 if X_1 else -1 sigma_2 = 1 if X_2 else -1 target += sigma_1 * sigma_2 P(X_1 == X_2) = e / (e + e^-1) """ g = graph.Graph() half = g.add_constant_probability(0.5) bernoulli = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [half] ) X_1 = g.add_operator(graph.OperatorType.SAMPLE, [bernoulli]) X_2 = g.add_operator(graph.OperatorType.SAMPLE, [bernoulli]) plus_one = g.add_constant_real(1.0) minus_one = g.add_constant_real(-1.0) sigma_1 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [X_1, plus_one, minus_one] ) sigma_2 = g.add_operator( graph.OperatorType.IF_THEN_ELSE, [X_2, plus_one, minus_one] ) g.add_factor(graph.FactorType.EXP_PRODUCT, [sigma_1, sigma_2]) g.query(X_1) g.query(X_2) prob_equal = ( sum(x == y for (x, y) in g.infer(100000, graph.InferenceType.NMC)) / 100000 ) self.assertAlmostEqual(prob_equal, 0.88, delta=0.01) @classmethod def create_GPfactor(cls, bmg, alpha, rho, scores, mu=0.0): # see https://mc-stan.org/docs/2_19/functions-reference/covariance.html for # a reference on this covariance function covar = alpha**2 * np.exp( -((np.expand_dims(scores, 1) - scores) ** 2) / 2 / rho**2 ) tau = np.linalg.inv(covar) # the precision matrix neg_mu = bmg.add_constant_real(-mu) # f ~ GP flat = bmg.add_distribution( graph.DistributionType.FLAT, graph.AtomicType.REAL, [] ) f = [bmg.add_operator(graph.OperatorType.SAMPLE, [flat]) for _ in scores] if mu == 0.0: f_centered = f else: f_centered = [ bmg.add_operator(graph.OperatorType.ADD, [fi, neg_mu]) for fi in f ] for i in range(len(scores)): tau_i_i = bmg.add_constant_real(-0.5 * tau[i, i]) bmg.add_factor( graph.FactorType.EXP_PRODUCT, [tau_i_i, f_centered[i], f_centered[i]] ) for j in range(i + 1, len(scores)): tau_i_j = bmg.add_constant_real(-1.0 * tau[i, j]) bmg.add_factor( graph.FactorType.EXP_PRODUCT, [tau_i_j, f_centered[i], f_centered[j]], ) return f @classmethod def sum_negate_nodes(cls, bmg, in_nodes): result = bmg.add_operator( graph.OperatorType.NEGATE, [ bmg.add_operator( graph.OperatorType.TO_REAL, [bmg.add_operator(graph.OperatorType.ADD, in_nodes)], ) ], ) return result def test_clara_gp_logit(self): """ CLARA-GP model with prev, sens, spec in logit space f_prev() ~ GP(0, squared_exp_covar) f_sens() ~ GP(logit(0.9), squared_exp_covar) f_spec() ~ GP(logit(0.95), squared_exp_covar) for each item i log_prev_i = -log1pexp(-f_prev(i)) # log(prev_i) log_comp_prev_i = -log1pexp(f_prev(i)) # log(1 - prev_i) # assume all labeller share the same sens and spec # so sens and spec only depends on score, indexed by i log_spec_i = -log1pexp(-f_spec(i)) log_com_spec_i = -log1pexp(f_spec(i)) log_sens_i = -log1pexp(-f_sens(i)) log_comp_sens_i = -log1pexp(f_sens(i)) loglik1, loglik2 = log_prev_i, log_comp_prev_i for each label loglik1 += label_i_l ? log_sens_i : log_comp_sens_i loglik2 += label_i_l ? log_comp_spec_i : log_spec_i add factor: logsumexp(loglik1, loglk2) """ ALPHA = 1.0 RHO = 0.1 SPEC_MU = 2.9 # logit(0.95) SENS_MU = 2.2 # logit(0.9) # NUM_LABELERS = 2 SCORES = np.array([0.1, 0.2, 0.3]) ITEM_LABELS = [[False, False], [False, True], [True, True]] # create f ~ GP g = graph.Graph() f_prev = self.create_GPfactor(g, ALPHA, RHO, SCORES) f_spec = self.create_GPfactor(g, ALPHA, RHO, SCORES, SPEC_MU) f_sens = self.create_GPfactor(g, ALPHA, RHO, SCORES, SENS_MU) # for each factor: # -log(p) = lop1pexp(-f) # -log(1-p) = log1pexp(f) # note: the followings log_* are negative log probabilities, # negate right before LOGSUMEXP # for each item i for i, labels in enumerate(ITEM_LABELS): # in this test case, we assume labelers share the same spec and sens log_spec = g.add_operator( graph.OperatorType.LOG1PEXP, [g.add_operator(graph.OperatorType.NEGATE, [f_spec[i]])], ) log_comp_spec = g.add_operator(graph.OperatorType.LOG1PEXP, [f_spec[i]]) log_sens = g.add_operator( graph.OperatorType.LOG1PEXP, [g.add_operator(graph.OperatorType.NEGATE, [f_sens[i]])], ) log_comp_sens = g.add_operator(graph.OperatorType.LOG1PEXP, [f_sens[i]]) log_prev = g.add_operator( graph.OperatorType.LOG1PEXP, [g.add_operator(graph.OperatorType.NEGATE, [f_prev[i]])], ) log_comp_prev = g.add_operator(graph.OperatorType.LOG1PEXP, [f_prev[i]]) loglik1, loglik2 = [log_prev], [log_comp_prev] # for each labeler l for label_val in labels: if label_val: loglik1.append(log_sens) loglik2.append(log_comp_spec) else: loglik1.append(log_comp_sens) loglik2.append(log_spec) loglik1 = self.sum_negate_nodes(g, loglik1) loglik2 = self.sum_negate_nodes(g, loglik2) g.add_factor( graph.FactorType.EXP_PRODUCT, [g.add_operator(graph.OperatorType.LOGSUMEXP, [loglik1, loglik2])], ) g.query(f_prev[i]) means = g.infer_mean(1000, graph.InferenceType.NMC) self.assertLess(means[0], means[1]) self.assertLess(means[1], means[2])
beanmachine-main
tests/graph/nmc_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import numpy as np from beanmachine import graph class TestBayesNet(unittest.TestCase): def test_simple_dep(self): g = graph.Graph() c1 = g.add_constant_col_simplex_matrix(np.array([0.8, 0.2])) d1 = g.add_distribution( graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c1] ) g.add_operator(graph.OperatorType.SAMPLE, [d1]) def test_tabular(self): g = graph.Graph() c1 = g.add_constant_col_simplex_matrix(np.array([0.8, 0.2])) # negative test with self.assertRaises(ValueError) as cm: g.add_distribution( graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [] ) self.assertTrue("must be COL_SIMPLEX" in str(cm.exception)) g = graph.Graph() c1 = g.add_constant_col_simplex_matrix(np.array([0.8, 0.2])) var1 = g.add_operator( graph.OperatorType.SAMPLE, [ g.add_distribution( graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c1] ) ], ) var2 = g.add_operator( graph.OperatorType.SAMPLE, [ g.add_distribution( graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c1] ) ], ) # since the following has two parents it must have a tabular dist with # 3 dimensions in the tensor with self.assertRaises(ValueError) as cm: g.add_operator( graph.OperatorType.SAMPLE, [ g.add_distribution( graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c1, var1, var2], ) ], ) self.assertTrue("expected 4 dims got 1" in str(cm.exception)) c2 = g.add_constant_col_simplex_matrix(np.array([[0.6, 0.99], [0.4, 0.01]])) g.add_distribution( graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c2, g.add_constant_bool(True)], ) with self.assertRaises(ValueError) as cm: g.add_distribution( graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c2, g.add_constant_natural(1)], ) self.assertTrue("only supports boolean parents" in str(cm.exception)) c3 = g.add_constant_real_matrix(np.array([1.1, -0.1])) with self.assertRaises(ValueError) as cm: g.add_distribution( graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c3] ) self.assertTrue("must be COL_SIMPLEX" in str(cm.exception)) c4 = g.add_constant_col_simplex_matrix(np.array([0.6, 0.3, 0.1])) with self.assertRaises(ValueError) as cm: g.add_distribution( graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c4] ) self.assertTrue("must have two rows" in str(cm.exception)) def test_bernoulli(self): g = graph.Graph() c1 = g.add_constant_probability(1.0) c2 = g.add_constant_probability(0.8) # negative tests on number of parents # 0 parents not allowed with self.assertRaises(ValueError) as cm: g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [] ) self.assertTrue( "Bernoulli distribution must have exactly one parent" in str(cm.exception) ) # 2 parents not allowed with self.assertRaises(ValueError) as cm: g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [c1, c2] ) self.assertTrue( "Bernoulli distribution must have exactly one parent" in str(cm.exception) ) # 1 parent is OK d1 = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [c1] ) # negative test on type of parent c3 = g.add_constant_natural(1) with self.assertRaises(ValueError) as cm: g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [c3] ) self.assertTrue("must be a probability" in str(cm.exception)) # negative test on value of parent with self.assertRaises(ValueError) as cm: g.add_constant_probability(1.1) self.assertTrue("must be between 0 and 1" in str(cm.exception)) v1 = g.add_operator(graph.OperatorType.SAMPLE, [d1]) g.query(v1) samples = g.infer(1) self.assertEqual(type(samples[0][0]), bool) self.assertTrue(samples[0][0]) means = g.infer_mean(1) self.assertEqual(len(means), 1, "exactly one node queried") def test_beta(self): g = graph.Graph() c1 = g.add_constant_pos_real(1.1) c2 = g.add_constant_pos_real(5.0) # negative tests on number of parents # 0 parents not allowed with self.assertRaises(ValueError) as cm: g.add_distribution( graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [] ) self.assertTrue( "Beta distribution must have exactly two parents" in str(cm.exception) ) # 1 parent not allowed with self.assertRaises(ValueError) as cm: g.add_distribution( graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [c1] ) self.assertTrue( "Beta distribution must have exactly two parents" in str(cm.exception) ) # negative test on type of parent c3 = g.add_constant_bool(True) with self.assertRaises(ValueError) as cm: g.add_distribution( graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [c3, c3] ) self.assertTrue("must be positive real-valued" in str(cm.exception)) # negative test on sample type with self.assertRaises(ValueError) as cm: g.add_distribution( graph.DistributionType.BETA, graph.AtomicType.REAL, [c1, c2] ) self.assertTrue("Beta produces probability samples" in str(cm.exception)) # 2 real-valued parents with probability sample type are OK d1 = g.add_distribution( graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [c1, c2] ) # now let's draw some samples from the Beta distribution v1 = g.add_operator(graph.OperatorType.SAMPLE, [d1]) g.query(v1) samples = g.infer(1, graph.InferenceType.REJECTION) self.assertEqual(type(samples[0][0]), float) self.assertTrue(samples[0][0] > 0 and samples[0][0] < 1) means = g.infer_mean(10000, graph.InferenceType.REJECTION) self.assertAlmostEqual(means[0], 1.1 / (1.1 + 5.0), 2, "beta mean") def test_binomial(self): g = graph.Graph() c1 = g.add_constant_natural(10) c2 = g.add_constant_probability(0.55) d1 = g.add_distribution( graph.DistributionType.BINOMIAL, graph.AtomicType.NATURAL, [c1, c2] ) v1 = g.add_operator(graph.OperatorType.SAMPLE, [d1]) g.query(v1) samples = g.infer(1, graph.InferenceType.REJECTION) self.assertEqual(type(samples[0][0]), int) self.assertTrue(samples[0][0] <= 10) means = g.infer_mean(10000, graph.InferenceType.REJECTION) self.assertTrue(means[0] > 5 and means[0] < 6) def test_categorical(self): g = graph.Graph() simplex = [0.5, 0.25, 0.125, 0.125] c1 = g.add_constant_col_simplex_matrix(np.array(simplex)) # Negative test: Number of parents must be exactly one: with self.assertRaises(ValueError) as cm: g.add_distribution( graph.DistributionType.CATEGORICAL, graph.AtomicType.NATURAL, [] ) self.assertTrue( "Categorical distribution must have exactly one parent" in str(cm.exception) ) with self.assertRaises(ValueError) as cm: g.add_distribution( graph.DistributionType.CATEGORICAL, graph.AtomicType.NATURAL, [c1, c1] ) self.assertEqual( "Categorical distribution must have exactly one parent", str(cm.exception) ) # Negative test: parent must be simplex: c3 = g.add_constant_natural(1) with self.assertRaises(ValueError) as cm: g.add_distribution( graph.DistributionType.CATEGORICAL, graph.AtomicType.NATURAL, [c3] ) self.assertEqual( "Categorical parent must be a one-column simplex", str(cm.exception) ) # Negative test: type must be natural with self.assertRaises(ValueError) as cm: g.add_distribution( graph.DistributionType.CATEGORICAL, graph.AtomicType.REAL, [c1] ) self.assertEqual( "Categorical produces natural valued samples", str(cm.exception) ) # Positive test: d1 = g.add_distribution( graph.DistributionType.CATEGORICAL, graph.AtomicType.NATURAL, [c1] ) v1 = g.add_operator(graph.OperatorType.SAMPLE, [d1]) g.query(v1) num_samples = 10000 # TODO: We use rejection sampling in this test because at present NMC # does not support inference over naturals. If inference over discrete # variables is important for BMG, we should create a Uniform Proposer # similar to how it's done in Bean Machine proper. samples = g.infer( num_samples=num_samples, algorithm=graph.InferenceType.REJECTION, seed=123, n_chains=1, )[0] # The distribution of the samples should closely match the simplex used to # generate them. histogram = [0, 0, 0, 0] for sample in samples: histogram[sample[0]] += 1 self.assertAlmostEqual(simplex[0], histogram[0] / num_samples, delta=0.01) self.assertAlmostEqual(simplex[1], histogram[1] / num_samples, delta=0.01) self.assertAlmostEqual(simplex[2], histogram[2] / num_samples, delta=0.01) self.assertAlmostEqual(simplex[3], histogram[3] / num_samples, delta=0.01) def _create_graph(self): g = graph.Graph() c1 = g.add_constant_col_simplex_matrix(np.array([0.8, 0.2])) c2 = g.add_constant_col_simplex_matrix(np.array([[0.6, 0.99], [0.4, 0.01]])) c3 = g.add_constant_col_simplex_matrix( np.transpose(np.array([[1, 0], [0.2, 0.8], [0.1, 0.9], [0.01, 0.99]])) ) Rain = g.add_operator( graph.OperatorType.SAMPLE, [ g.add_distribution( graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c1] ) ], ) Sprinkler = g.add_operator( graph.OperatorType.SAMPLE, [ g.add_distribution( graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c2, Rain] ) ], ) GrassWet = g.add_operator( graph.OperatorType.SAMPLE, [ g.add_distribution( graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c3, Sprinkler, Rain], ) ], ) return g, Rain, Sprinkler, GrassWet def test_query(self): g, Rain, Sprinkler, GrassWet = self._create_graph() g.query(Rain) g.query(Sprinkler) g.query(GrassWet) g.infer(1) p = g.add_constant_probability(0.8) b = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [p] ) # Querying a constant is weird but allowed g.query(p) # But querying a distribution directly rather than a sample is # illegal: with self.assertRaises(ValueError) as cm: g.query(b) self.assertEqual( f"Query of node_id {b} expected a node of type 1 or 3 but is 2", str(cm.exception), ) def test_to_dot(self): self.maxDiff = None g, Rain, Sprinkler, GrassWet = self._create_graph() g.query(Rain) g.query(Sprinkler) g.query(GrassWet) g.observe(GrassWet, True) observed = g.to_dot() expected = """ digraph "graph" { N0[label="simplex"]; N1[label="simplex"]; N2[label="simplex"]; N3[label="Tabular"]; N4[label="~"]; N5[label="Tabular"]; N6[label="~"]; N7[label="Tabular"]; N8[label="~"]; N0 -> N3; N1 -> N5; N2 -> N7; N3 -> N4; N4 -> N5; N4 -> N7; N5 -> N6; N6 -> N7; N7 -> N8; O0[label="Observation"]; N8 -> O0; Q0[label="Query"]; N4 -> Q0; Q1[label="Query"]; N6 -> Q1; Q2[label="Query"]; N8 -> Q2; }""" self.assertEqual(expected.strip(), observed.strip()) def test_observe(self): g, Rain, Sprinkler, GrassWet = self._create_graph() g.observe(GrassWet, True) with self.assertRaises(ValueError) as cm: g.observe(GrassWet, True) self.assertTrue("duplicate observe for node" in str(cm.exception)) g = graph.Graph() c1 = g.add_constant_probability(1.0) c2 = g.add_constant_probability(0.5) o1 = g.add_operator(graph.OperatorType.MULTIPLY, [c1, c2]) d1 = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [o1] ) o2 = g.add_operator(graph.OperatorType.SAMPLE, [d1]) with self.assertRaises(ValueError) as cm: g.observe(o1, True) self.assertTrue( "only SAMPLE and IID_SAMPLE nodes may be observed" in str(cm.exception) ) g.observe(o2, True) # ok to observe this node with self.assertRaises(ValueError) as cm: g.observe(o2, False) self.assertTrue("duplicate observe" in str(cm.exception)) g.remove_observations() g.observe(o2, False) def test_inference(self): g, Rain, Sprinkler, GrassWet = self._create_graph() g.observe(GrassWet, True) qr = g.query(Rain) g.query(GrassWet) # Querying the same node twice is idempotent. self.assertEqual(g.query(Rain), qr) samples = g.infer(1) self.assertTrue(len(samples) == 1) # since we have observed grass wet is true the query should be true self.assertEqual(type(samples[0][1]), bool) self.assertTrue(samples[0][1]) # test parallel inference samples_all = g.infer(num_samples=1, n_chains=2) self.assertTrue(len(samples_all) == 2) self.assertTrue(len(samples_all[0]) == 1) self.assertTrue(len(samples_all[1]) == 1) self.assertEqual(samples[0][0], samples_all[0][0][0]) self.assertEqual(samples[0][1], samples_all[0][0][1]) self.assertEqual(type(samples_all[1][0][0]), bool) self.assertEqual(type(samples_all[1][0][1]), bool) self.assertTrue(samples_all[1][0][1]) def test_infer_mean(self): g = graph.Graph() c1 = g.add_constant_probability(1.0) op1 = g.add_operator(graph.OperatorType.MULTIPLY, [c1, c1]) d1 = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [op1] ) op2 = g.add_operator(graph.OperatorType.SAMPLE, [d1]) g.query(op1) g.query(op2) means = g.infer_mean(100) self.assertAlmostEqual(means[0], 1.0) self.assertAlmostEqual(means[1], 1.0) # test parallel inference means_all = g.infer_mean(num_samples=100, n_chains=2) self.assertTrue(len(means_all) == 2) self.assertAlmostEqual(means_all[0][0], 1.0) self.assertAlmostEqual(means_all[0][1], 1.0) self.assertAlmostEqual(means_all[1][0], 1.0) self.assertAlmostEqual(means_all[1][1], 1.0) def test_neg_real(self): g = graph.Graph() with self.assertRaises(ValueError) as cm: g.add_constant_neg_real(1.25) self.assertTrue("neg_real must be <=0" in str(cm.exception)) neg1 = g.add_constant_neg_real(-1.25) expected = """ 0: CONSTANT(negative real -1.25) (out nodes: ) """ self.assertEqual(g.to_string().strip(), expected.strip()) add_negs = g.add_operator(graph.OperatorType.ADD, [neg1, neg1]) g.query(add_negs) means = g.infer_mean(10) self.assertAlmostEqual(means[0], -2.5) samples = g.infer(10) self.assertAlmostEqual(samples[0][0], -2.5) def test_get_log_prob(self): g, Rain, Sprinkler, GrassWet = self._create_graph() g.observe(GrassWet, True) g.query(Rain) g.query(GrassWet) conf = graph.InferConfig() conf.keep_log_prob = True g.infer( num_samples=10, algorithm=graph.InferenceType.GIBBS, seed=123, n_chains=2, infer_config=conf, ) log_probs = g.get_log_prob() self.assertEqual(len(log_probs), 2) self.assertEqual(len(log_probs[0]), 10) def test_graph_stats(self): g = graph.Graph() c1 = g.add_constant_natural(10) c2 = g.add_constant_probability(0.55) d1 = g.add_distribution( graph.DistributionType.BINOMIAL, graph.AtomicType.NATURAL, [c1, c2] ) g.add_operator(graph.OperatorType.SAMPLE, [d1]) stats = g.collect_statistics() self.maxDiff = None expected = """ Graph Statistics Report ####################### Number of nodes: 4 Number of edges: 3 Graph density: 0.25 Number of root nodes: 2 Number of terminal nodes: 1 Maximum no. of incoming edges into a node: 2 Maximum no. of outgoing edges from a node: 1 Node statistics: ################ CONSTANT: 2 \tRoot nodes: 2 \tConstant node statistics: \t------------------------- \t\tPROBABILITY and SCALAR: 1 \t\tNATURAL and SCALAR: 1 \t\tDistribution of incoming edges: \t\t------------------------------- \t\tNodes with 0 edges: 2 \t\tDistribution of outgoing edges: \t\t------------------------------- \t\tNodes with 1 edges: 2 DISTRIBUTION: 1 \tNo root or terminal nodes \tDistribution node statistics: \t----------------------------- \t\tBINOMIAL: 1 \t\tDistribution of incoming edges: \t\t------------------------------- \t\tNodes with 2 edges: 1 \t\tDistribution of outgoing edges: \t\t------------------------------- \t\tNodes with 1 edges: 1 OPERATOR: 1 \tTerminal nodes: 1 \tOperator node statistics: \t------------------------- \t\tSAMPLE: 1 \t\tDistribution of incoming edges: \t\t------------------------------- \t\tNodes with 1 edges: 1 \t\tDistribution of outgoing edges: \t\t------------------------------- \t\tNodes with 0 edges: 1 Edge statistics: ################ \tDistribution of incoming edges: \t------------------------------- \tNodes with 0 edges: 2 \tNodes with 1 edges: 1 \tNodes with 2 edges: 1 \tDistribution of outgoing edges: \t------------------------------- \tNodes with 0 edges: 1 \tNodes with 1 edges: 3 """ self.assertEqual(stats.strip(), expected.strip()) class TestContinuousModels(unittest.TestCase): def test_product_distribution(self): g = graph.Graph() MEAN0 = -5.0 STD0 = 1.0 real0 = g.add_constant_real(MEAN0) pos0 = g.add_constant_pos_real(STD0) normal_dist0 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [real0, pos0] ) real1 = g.add_operator(graph.OperatorType.SAMPLE, [normal_dist0]) STD1 = 2.0 pos1 = g.add_constant_pos_real(STD1) normal_dist1 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [real1, pos1] ) MEAN2 = 5.0 STD2 = 2.0 real2 = g.add_constant_real(MEAN2) pos2 = g.add_constant_pos_real(STD2) normal_dist2 = g.add_distribution( graph.DistributionType.NORMAL, graph.AtomicType.REAL, [real2, pos2] ) product_dist1 = g.add_distribution( graph.DistributionType.PRODUCT, graph.AtomicType.REAL, [normal_dist1, normal_dist2], ) product_sample1 = g.add_operator(graph.OperatorType.SAMPLE, [product_dist1]) product_sample2 = g.add_operator(graph.OperatorType.SAMPLE, [product_dist1]) product_sample3 = g.add_operator(graph.OperatorType.SAMPLE, [product_dist1]) g.observe(product_sample1, -1.0) g.observe(product_sample2, 0.0) g.observe(product_sample3, 1.0) g.query(real1) default_config = graph.InferConfig() samples = g.infer( num_samples=10000, algorithm=graph.InferenceType.NMC, seed=5123401, n_chains=1, infer_config=default_config, ) chain = 0 variable = 0 values = [sample_tuple[variable] for sample_tuple in samples[chain]] mean = sum(values) / len(values) print(mean) expected = -2.848 # obtained from the same test ran in C++ self.assertAlmostEqual(mean, expected, delta=0.1)
beanmachine-main
tests/graph/graph_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import unittest import numpy as np from beanmachine import graph class TestCAVI(unittest.TestCase): def test_interface(self): g = graph.Graph() c1 = g.add_constant_probability(0.1) d1 = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [c1] ) o1 = g.add_operator(graph.OperatorType.SAMPLE, [d1]) g.query(o1) mean_vars = g.variational(100, 10, elbo_samples=100) self.assertEqual(len(mean_vars), 1, "number of queries") self.assertEqual(len(mean_vars[0]), 1, "each parameter must be mean") elbo = g.get_elbo() self.assertEqual(len(elbo), 100, "one ELBO value per iteration") mean_vars = g.variational(100, 10) # elbo_samples=0 by default elbo = g.get_elbo() self.assertEqual(len(elbo), 0, "ELBO not computed unless requested") def build_graph1(self): """ o1 ~ Bernoulli( 0.1 ) o2 ~ Bernoulli( exp( - o1 ) ) infer P(o1 | o2 = True) now, P(o1 = T, o2 = T) = 0.1 * exp(-1) = 0.036787944117144235 and, P(o1 = F, o2 = T) = 0.9 * exp(0) = 0.9 => P(o1 = True | o2 = True) = 0.03927030055005057 also P(o2 = True) = 0.9367879441171443 >= ELBO """ g = graph.Graph() c1 = g.add_constant_probability(0.1) d1 = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [c1] ) o1 = g.add_operator(graph.OperatorType.SAMPLE, [d1]) c2 = g.add_constant_col_simplex_matrix( np.array([[0.0, 1 - math.exp(-1)], [1.0, math.exp(-1)]]) ) d2 = g.add_distribution( graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c2, o1] ) o2 = g.add_operator(graph.OperatorType.SAMPLE, [d2]) g.observe(o2, True) g.query(o1) return g def test_cavi1(self): g = self.build_graph1() means = g.variational(1, 1, elbo_samples=100) self.assertAlmostEqual(0.039, means[0][0], 2, "posterior mean") elbo = g.get_elbo() self.assertGreater(math.log(0.94), elbo[0], "ELBO") def test_gibbs1(self): g = self.build_graph1() samples = g.infer(1000, graph.InferenceType.GIBBS) means = np.array(samples, dtype=float).mean(axis=0) self.assertGreater(means[0].item(), 0.03) self.assertLess(means[0].item(), 0.05) def build_graph2(self): """ This is a simplified noisy-or model X ~ Bernoulli(0.01) Y ~ Bernoulli(0.01) Z ~ Bernoulli(1 - exp( log(0.99) + log(0.01)*X + log(0.01)*Y )) Note: the last line is equivalent to: Z ~ BernoulliNoisyOr( - ( log(0.99) + log(0.01)*X + log(0.01)*Y ) ) OR Z ~ BernoulliNoisyOr( -log(0.99) + (-log(0.01))*X + (-log(0.01))*Y ) ) query (X, Y) observe Z = True X Y P(X, Y, Z=T) P(X, Y | Z=T) --------------------------------- F F 0.009801 0.3322 F T 0.009802 0.3322 T F 0.009802 0.3322 T T 0.0000999901 0.0034 P(Z=T) = 0.029505, thus ELBO <= log(.029505) = -3.5232 Let Q(X) = Q(Y) = Bernoulli(q); The KL-Divergence as a function of q is: kl = lambda q: (1-q)**2 * (2*log(1-q)-log(.3322)) + 2*q*(1-q)*(log(q)+log(1-q)-log(.3322)) + q**2 * (2*log(q)-log(.0034)) KL Divergence is minimized at q=0.245, and kl(.245) = .2635 And max ELBO = log P(Z=T) - kl(.245) = -3.7867 """ g = graph.Graph() c_prior = g.add_constant_probability(0.01) d_prior = g.add_distribution( graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [c_prior] ) x = g.add_operator(graph.OperatorType.SAMPLE, [d_prior]) y = g.add_operator(graph.OperatorType.SAMPLE, [d_prior]) pos_x = g.add_operator(graph.OperatorType.TO_POS_REAL, [x]) pos_y = g.add_operator(graph.OperatorType.TO_POS_REAL, [y]) c_m_log_pt01 = g.add_constant_pos_real(-(math.log(0.01))) c_m_log_pt99 = g.add_constant_pos_real(-(math.log(0.99))) param = g.add_operator( graph.OperatorType.ADD, [ c_m_log_pt99, g.add_operator(graph.OperatorType.MULTIPLY, [c_m_log_pt01, pos_x]), g.add_operator(graph.OperatorType.MULTIPLY, [c_m_log_pt01, pos_y]), ], ) d_like = g.add_distribution( graph.DistributionType.BERNOULLI_NOISY_OR, graph.AtomicType.BOOLEAN, [param] ) z = g.add_operator(graph.OperatorType.SAMPLE, [d_like]) g.observe(z, True) g.query(x) g.query(y) return g def test_gibbs2(self): g = self.build_graph2() samples = np.array(g.infer(10000, graph.InferenceType.GIBBS), dtype=float) x_marginal = samples.mean(axis=0)[0] y_marginal = samples.mean(axis=0)[1] x_y_joint = (samples[:, 0] * samples[:, 1]).mean() self.assertAlmostEqual( x_marginal, y_marginal, 1, "posterior marginal of x and y are nearly equal" ) self.assertAlmostEqual(x_marginal, 0.33, 1, "posterior x is 0.33") self.assertLess(x_y_joint, 0.01, "joint posterior of x and y < 0.01") def test_cavi2(self): g = self.build_graph2() means = g.variational(100, 1000, elbo_samples=1000) self.assertAlmostEqual( means[0][0], means[1][0], 1, "X and Y have same variational posterior" ) self.assertAlmostEqual(means[0][0], 0.245, 1, "X posterior is ?") elbo = g.get_elbo() self.assertAlmostEqual(elbo[-1], -3.7867, 1, "ELBO converged")
beanmachine-main
tests/graph/cavi_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. __version__ = "0.2.0"
beanmachine-main
src/beanmachine/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ The file contains MiniBM, a minimal implementation of Bean Machine PPL with a Metropolis Hastings implementation and a coin flipping model at the end. It is standalone, in that MiniBM does not depend on the Bean Machine framework at all. The only two dependencies for MiniBM are the PyTorch library and tqdm (for progress bar). """ from __future__ import annotations import itertools import random from collections import defaultdict from functools import wraps from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple import torch import torch.distributions as dist from tqdm.auto import tqdm class RVIdentifier(NamedTuple): """ A struct whose attributes uniquely identifies a random variable in Bean Machine. Args: wrapper: A reference to the decorated random variable function args: Arguments taken by the """ wrapper: Callable args: Tuple @property def function(self): """A pointer to the original function that returns the distribution object""" return self.wrapper.__wrapped__ # calls the original function def random_variable(f: Callable[Any, dist.Distribution]): """A decorator that convert a Python function that returns a distribution into a function that evaluates to a Bean Machine random variable. In Bean Machine, a @random_variable function can be used in two ways: 1. When being invoked outside of an inference scope, it returns an RVIdentifier without evaluating the original function. 2. During inference, or when being invoked from another random variable, the function will update the graph (if needed) and return its value at state of inference. For example:: @random_variable def foo(): return dist.Normal(0., 1.0) print(foo()) # RVIdentifier(wrapper=foo, args=()) @random_variable def bar(): mean = foo() # evaluates to value of theta() during inference return dist.Normal(mean, 1.0) Args: f: A function that returns a PyTorch Distribution object """ @wraps(f) def wrapper(*args): rvid = RVIdentifier(wrapper, args) # Bean Machine inference methods use the World class to store and control the # state of inference world = get_world_context() if world is None: # We're not in an active inference. Return an ID for the random variable return rvid else: # Update the graph and return current value of the random variable in world return world.update_graph(rvid) return wrapper RVDict = Dict[RVIdentifier, torch.Tensor] # alias for typing WORLD_STACK: List[World] = [] def get_world_context() -> Optional[World]: """Returns the active World (if any) or None""" return WORLD_STACK[-1] if WORLD_STACK else None class World: """ A World is Bean Machine's internal representation of a state of the model. At the high level, it stores a value for each of the random variables. It can also be used as a context manager to control the behavior of random variables. For example:: @random_variable def foo(): return dist.Normal(0., 1.0) @random_variable def bar(): return dist.Normal(foo(), 1.0) # initialize world and add bar() and its ancesters to it world = World.initialize_world([bar()]) world[bar()] # returns the value of bar() in world world[foo()] # since foo() is bar()'s parent, it is also initialized in world # World is also used within inference as a context manager to control the # behavior of random variable with world: foo() # returns the value of foo() in world, which equals to world[foo()] """ def __init__(self, observations: Optional[RVDict] = None): self.observations: RVDict = observations or {} self.variables: RVDict = {} def __getitem__(self, node: RVIdentifier) -> torch.Tensor: return self.variables[node] def __enter__(self) -> World: WORLD_STACK.append(self) return self def __exit__(self, *args) -> None: WORLD_STACK.pop() def update_graph(self, node: RVIdentifier) -> torch.Tensor: """Update the graphy by adding node to self (if needed) and retuurn the value of node in self.""" if node not in self.variables: # parent nodes will be invoked when calling node.get_distribution distribution = self.get_distribution(node) if node in self.observations: self.variables[node] = self.observations[node] else: self.variables[node] = distribution.sample() return self.variables[node] def replace(self, values: RVDict) -> World: """Return a new world where the values of the random variables are replaced by the provided values""" new_world = World(self.observations) new_world.variables = {**self.variables, **values} return new_world def log_prob(self) -> torch.Tensor: """Return the joint log prob on all random variables in the world""" log_prob = torch.tensor(0.0) for node, value in self.variables.items(): distribution = self.get_distribution(node) log_prob += distribution.log_prob(value).sum() return log_prob def get_distribution(self, node: RVIdentifier) -> dist.Distribution: """A utility method that activate the current world and invoke the function associated with node. Bean Machine requires random variable functions to return a distribution object, so this method will also return a distribution object.""" with self: return node.function(*node.args) @staticmethod def initialize_world( queries: List[RVIdentifier], observations: Optional[RVDict] = None ) -> World: """Initializes and returns a new world. Starting from the queries and observations, the parent nodes will be added recursively to the world.""" observations = observations or {} world = World(observations) for node in itertools.chain(queries, observations): world.update_graph(node) return world class MetropolisHastings: """A naive implementation of the `Metropolis-Hastings algorithm <https://en.wikipedia.org/wiki/Metropolis%E2%80%93Hastings_algorithm>`_""" def infer( self, queries: List[RVIdentifier], observations: Optional[RVDict], num_samples: int, ) -> RVDict: world = World.initialize_world(queries, observations) samples = defaultdict(list) # the main inference loop for _ in tqdm(range(num_samples)): latent_nodes = world.variables.keys() - world.observations.keys() random.shuffle(latent_nodes) # randomly select a node to be updated at a time for node in latent_nodes: proposer_distribution = world.get_distribution(node) new_value = proposer_distribution.sample() new_world = world.replace({node: new_value}) backward_distribution = new_world.get_distribution(node) # log P(x, y) old_log_prob = world.log_prob() # log P(x', y) new_log_prob = new_world.log_prob() # log g(x'|x) forward_log_prob = proposer_distribution.log_prob(new_value).sum() # log g(x|x') backward_log_prob = backward_distribution.log_prob(world[node]).sum() accept_log_prob = ( new_log_prob + backward_log_prob - old_log_prob - forward_log_prob ) if torch.bernoulli(accept_log_prob.exp().clamp(max=1)): # accept the new state world = new_world # collect the samples before moving to the next iteration for node in queries: samples[node].append(world[node]) # stack the list of tensors into a single tensor samples = {node: torch.stack(samples[node]) for node in samples} return samples def main(): # coin fliping model adapted from our tutorial # (https://beanmachine.org/docs/overview/tutorials/Coin_flipping/CoinFlipping/) @random_variable def weight(): return dist.Beta(2, 2) @random_variable def y(): return dist.Bernoulli(weight()).expand((N,)) # data generation true_weight = 0.75 true_y = dist.Bernoulli(true_weight) N = 100 y_obs = true_y.sample((N,)) print("Head rate:", y_obs.mean()) # running inference samples = MetropolisHastings().infer([weight()], {y(): y_obs}, num_samples=500) print("Estimated weight of the coin:", samples[weight()].mean()) if __name__ == "__main__": main()
beanmachine-main
src/beanmachine/minibm.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from torch.distributions import Distribution from . import experimental from .diagnostics import Diagnostics from .diagnostics.common_statistics import effective_sample_size, r_hat, split_r_hat from .diagnostics.tools import viz from .inference import ( CompositionalInference, empirical, GlobalHamiltonianMonteCarlo, GlobalNoUTurnSampler, seed, simulate, SingleSiteAncestralMetropolisHastings, SingleSiteHamiltonianMonteCarlo, SingleSiteNewtonianMonteCarlo, SingleSiteNoUTurnSampler, SingleSiteRandomWalk, SingleSiteUniformMetropolisHastings, ) from .model import ( functional, get_beanmachine_logger, param, random_variable, RVIdentifier, ) LOGGER = get_beanmachine_logger() # TODO(@neerajprad): Remove once T81756389 is fixed. Distribution.set_default_validate_args(False) __all__ = [ "CompositionalInference", "Diagnostics", "GlobalHamiltonianMonteCarlo", "GlobalNoUTurnSampler", "Predictive", "RVIdentifier", "SingleSiteAncestralMetropolisHastings", "SingleSiteHamiltonianMonteCarlo", "SingleSiteNewtonianMonteCarlo", "SingleSiteNoUTurnSampler", "SingleSiteRandomWalk", "SingleSiteUniformMetropolisHastings", "effective_sample_size", "empirical", "experimental", "functional", "seed", "param", "r_hat", "random_variable", "simulate", "split_r_hat", "viz", ]
beanmachine-main
src/beanmachine/ppl/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
beanmachine-main
src/beanmachine/ppl/experimental/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import warnings from enum import Enum from typing import Callable from beanmachine.ppl.inference.proposer.nnc import nnc_jit class TorchJITBackend(Enum): NONE = "none" NNC = "nnc" INDUCTOR = "inductor" # TODO (T135789755): update the API to select between backends when we move this # integration out of experimental. def get_backend( nnc_compile: bool, experimental_inductor_compile: bool ) -> TorchJITBackend: """A helper function to select between the Torch JIT backends based on the flags""" if experimental_inductor_compile: if nnc_compile: warnings.warn( "Overriding nnc_compile option with experimental_inductor_compile", stacklevel=3, ) warnings.warn( "The support of TorchInductor is experimental and the API is " "subject to change in the future releases of Bean Machine. For " "questions regarding TorchInductor, please see " "https://github.com/pytorch/torchdynamo.", stacklevel=3, ) return TorchJITBackend.INDUCTOR elif nnc_compile: return TorchJITBackend.NNC else: return TorchJITBackend.NONE def inductor_jit(f: Callable) -> Callable: """ A helper function that lazily imports the TorchInductor utils and the related libraries, then invoke functorch to JIT compile the provided function. """ # Lazily import related libraries so users don't have them (e.g. from using # an older version of PyTorch) won't run into ModuleNotFound error when # importing Bean Machine from functorch.compile import aot_function from torch._inductor.compile_fx import compile_fx_inner from torch._inductor.decomposition import select_decomp_table return aot_function(f, compile_fx_inner, decompositions=select_decomp_table()) def jit_compile(f: Callable, backend: TorchJITBackend) -> Callable: if backend is TorchJITBackend.NNC: return nnc_jit(f) elif backend is TorchJITBackend.INDUCTOR: return inductor_jit(f) else: # Fall back to use PyTorch return f
beanmachine-main
src/beanmachine/ppl/experimental/torch_jit_backend.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import gpytorch as gpt import torch from botorch.models.gpytorch import GPyTorchModel from botorch.posteriors.gpytorch import GPyTorchPosterior class SimpleGP(gpt.models.ExactGP, GPyTorchModel): """ GPytorch model that supports Bean Machine sampling and broadcasting semantics. In train mode, BM priors may be specified over GP parameters. In eval mode, this objects acts as a Gpytorch model and generates predictions using Gpytorch's prediction strategies. For an example, see the [tutorial](link:TODO) """ def __init__(self, x_train, y_train, mean, kernel, likelihood, *args, **kwargs): super().__init__(x_train, y_train, likelihood) self.mean = mean self.kernel = kernel def forward(self, data, *args, **kwargs): """ Default forward definining a GP prior. Should be overridden by child class. """ mean = self.mean(data) cov = self.kernel(data) return gpt.distributions.MultivariateNormal(mean, cov) def bm_load_samples(self, rv_dict): """ Loads tensors from a dict keyed on module name and valued by tensor whose shape is (num_samples, sample_shape). See `~gpytorch.Module.initialize`. :param rv_dict: Python dict keyed on module name and valued by tensor whose shape is (num_samples, sample_shape) """ self.pyro_load_from_samples(rv_dict) class BoTorchGP(SimpleGP, GPyTorchModel): """ Experimental module that is compatible with BoTorch. samples = nuts.infer(queries, obs, num_samples).get_chain(0) gp.eval() gp.bm_load_samples({kernel.lengthscale=samples[lengthscale_prior()]}) from botorch.acquisition.objective import IdentityMCObjective acqf = get_acquisition_function("qEI", gp, IdentityMCObjective(), x_train) new_point = acqf(new_input).mean() """ def __init__(self, x_train, y_train, *args, **kwargs): super().__init__(x_train, y_train, *args, **kwargs) if y_train.dim() > 1: self._num_outputs = y_train.shape[-1] else: self._num_outputs = 1 def posterior(self, data, observation_noise=False, **kwargs): """ Returns the posterior conditioned on new data. Used in BoTorch. See `~botorch.models.model.Model.posterior`. :param data: a `torch.Tensor` containing test data of shape `(batch, data_dim)`. :returns: `~botorch.posteriors.gpytorch.GPytorchPosterior` MultivariateNormal distribution. """ self.eval() try: mvn = self(data, batch_shape=(data.shape[0],)) except AttributeError as e: raise AttributeError( "Running in eval mode but one of the parameters is still" "a BM random variable. Did you `bm_load_samples`? \n" + str(e) ) if observation_noise is not False: if torch.is_tensor(observation_noise): # TODO: Make sure observation noise is transformed correctly self._validate_tensor_args(X=data, Y=observation_noise) if observation_noise.shape[-1] == 1: observation_noise = observation_noise.squeeze(-1) mvn = self.likelihood(mvn, data, noise=observation_noise) else: mvn = self.likelihood(mvn, data) return GPyTorchPosterior(mvn=mvn)
beanmachine-main
src/beanmachine/ppl/experimental/gp/models.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict import beanmachine.ppl as bm import gpytorch from beanmachine.ppl import RVIdentifier from beanmachine.ppl.world import get_world_context def _trace_bm(module, name_to_rv=None, is_tracing=True, memo=None, prefix=""): "Adapted from https://github.com/cornellius-gp/gpytorch/blob/master/gpytorch/module.py#L470" if memo is None: memo = set() if name_to_rv is None: name_to_rv = {} if hasattr(module, "_priors"): for prior_name, (prior, closure, setting_closure) in module._priors.items(): if prior is not None and prior not in memo: if setting_closure is None: raise RuntimeError( "Cannot perform fully Bayesian inference without a setting_closure for each prior," f" but the following prior had none: {prior_name}, {prior}." ) memo.add(prior_name) prior = prior.expand(closure(module).shape) rv_name = prefix + ("." if prefix else "") + prior_name if is_tracing: # tracing pass, no enclosing World def f(): return prior f.__name__ = rv_name rv = bm.random_variable(f) name_to_rv[rv_name] = rv() else: # sampling pass, must be enclosed by World world = get_world_context() assert ( world is not None ), "Expected enclosing World context for bm.random_variable priors" value = world.update_graph(name_to_rv[rv_name]) setting_closure(module, value) for mname, module_ in module.named_children(): submodule_prefix = prefix + ("." if prefix else "") + mname _, child_name_to_rv = _trace_bm( module=module_, name_to_rv=name_to_rv, is_tracing=is_tracing, memo=memo, prefix=submodule_prefix, ) name_to_rv.update(child_name_to_rv) return module, name_to_rv def make_prior_random_variables( module: gpytorch.module.Module, ) -> Dict[str, RVIdentifier]: """ Recurses through `module` and its childrens' `._priors`, creating `bm.random_variable`s for each prior. Returns a map from prior names to `random_variable`s. """ return _trace_bm(module, name_to_rv=None, is_tracing=True)[1] def bm_sample_from_prior( model: gpytorch.module.Module, name_to_rv: Dict[str, RVIdentifier], ) -> gpytorch.module.Module: """ Samples from `model` with parameters drawn by invoking the `random_variable` to their prior in `name_to_rv`. """ return _trace_bm(model, name_to_rv, is_tracing=False)[0]
beanmachine-main
src/beanmachine/ppl/experimental/gp/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
beanmachine-main
src/beanmachine/ppl/experimental/causal_inference/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
beanmachine-main
src/beanmachine/ppl/experimental/causal_inference/models/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional, Union import torch from beanmachine.ppl.experimental.causal_inference.models.bart.exceptions import ( TreeStructureError, ) from beanmachine.ppl.experimental.causal_inference.models.bart.mutation import ( GrowMutation, PruneMutation, ) from beanmachine.ppl.experimental.causal_inference.models.bart.node import ( LeafNode, SplitNode, ) class Tree: """ Encapsulates a tree structure where each node is either a nonterminal `SplitNode` or a terminal `LeafNode`. This class consists of methods to track and modify overall tree structure. Args: nodes: List of nodes comprising the tree. """ def __init__(self, nodes: List[Union[LeafNode, SplitNode]]): self._nodes = nodes def num_nodes(self) -> int: """ Returns the total number of nodes in the tree. """ return len(self._nodes) def leaf_nodes(self) -> List[LeafNode]: """ Returns a list of all of the leaf nodes in the tree. """ return [node for node in self._nodes if isinstance(node, LeafNode)] def growable_leaf_nodes(self, X: torch.Tensor) -> List[LeafNode]: """ List of all leaf nodes in the tree which can be grown in a non-degenerate way i.e. such that not all values in the column of the covariate matrix are duplicates conditioned on the rules of that node. Args: X: Input / covariate matrix. """ return [node for node in self.leaf_nodes() if node.is_growable(X)] def num_growable_leaf_nodes(self, X: torch.Tensor) -> int: """ Returns the number of nodes which can be grown in the tree. """ return len(self.growable_leaf_nodes(X)) def split_nodes(self) -> List[SplitNode]: """ List of internal `SplitNode`s in the tree. """ return [node for node in self._nodes if isinstance(node, SplitNode)] def prunable_split_nodes(self) -> List[SplitNode]: """ List of decision nodes in the tree that are suitable for pruning i.e., `SplitNode`s` that have two terminal `LeafNode` children """ return [node for node in self.split_nodes() if node.is_prunable()] def num_prunable_split_nodes(self) -> int: """ Number of prunable split nodes in tree. """ return len(self.prunable_split_nodes()) def predict(self, X: torch.Tensor) -> torch.Tensor: """ Generate a set of predictions with the same dimensionality as the target array Note that the prediction is from one tree, so represents only (1 / number_of_trees) of the target. """ prediction = torch.zeros((len(X), 1), dtype=torch.float) for leaf in self.leaf_nodes(): prediction[leaf.composite_rules.condition_on_rules(X)] = leaf.predict() return prediction def mutate(self, mutation: Union[GrowMutation, PruneMutation]) -> None: """ Apply a change to the structure of the tree. Args: mutation: The mutation to apply to the tree. Only grow and prune mutations are accepted. """ if isinstance(mutation, PruneMutation): self._remove_node(mutation.old_node.left_child) self._remove_node(mutation.old_node.right_child) self._remove_node(mutation.old_node) self._add_node(mutation.new_node) elif isinstance(mutation, GrowMutation): self._remove_node(mutation.old_node) self._add_node(mutation.new_node) self._add_node(mutation.new_node.left_child) self._add_node(mutation.new_node.right_child) else: raise TreeStructureError("Only Grow and Prune mutations are valid.") for node in self._nodes: if node.right_child == mutation.old_node: node._right_child = mutation.new_node if node.left_child == mutation.old_node: node._left_child = mutation.new_node def _remove_node(self, node: Optional[Union[LeafNode, SplitNode]] = None) -> None: """ Remove a single node from the tree non-recursively. Only drops the node and not any children. """ if node is not None: self._nodes.remove(node) def _add_node(self, node: Optional[Union[LeafNode, SplitNode]] = None) -> None: """ Add a node to the tree non-recursively. Only adds the node and does not link it to any node. """ if node is not None: self._nodes.append(node)
beanmachine-main
src/beanmachine/ppl/experimental/causal_inference/models/bart/tree.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from abc import ABC from dataclasses import dataclass from typing import Union from beanmachine.ppl.experimental.causal_inference.models.bart.exceptions import ( GrowError, PruneError, ) from beanmachine.ppl.experimental.causal_inference.models.bart.node import ( LeafNode, SplitNode, ) @dataclass class Mutation(ABC): """ A data class for storing the nodes before and after a mutation to a tree. These mutations are applied to traverse the space of tree structures. The possible mutations considered here are: - **Grow**: Where a `LeafNode` of the tree is split based on a decision rule, turning it into an internal `SplitNode`. - **Prune**: Where an internal `SplitNode` with only terminal children is converted into a `LeafNode`. These steps constitute the Grow-Prune approach of Pratola [1] where the additional steps of BART (Change and Swap) are eliminated. Reference: [1] Pratola MT, Chipman H, Higdon D, McCulloch R, Rust W (2013). “Parallel Bayesian Additive Regression Trees.” Technical report, University of Chicago. https://arxiv.org/pdf/1309.1906.pdf Args: old_node: The node before mutation. new_node: The node after mutation. """ __slots__ = ["old_node", "new_node"] def __init__( self, old_node: Union[SplitNode, LeafNode], new_node: Union[SplitNode, LeafNode], ): self.old_node = old_node self.new_node = new_node @dataclass class PruneMutation(Mutation): """Encapsulates the prune action where an internal `SplitNode` with only terminal children is converted into a `LeafNode`. Args: old_node: The node before mutation. new_node: The node after mutation. """ def __init__(self, old_node: SplitNode, new_node: LeafNode): """ Raises: PruneError: if the prune mutation is invalid. """ if not isinstance(old_node, SplitNode) or not old_node.is_prunable(): raise PruneError("Pruning only valid on prunable SplitNodes") if not isinstance(new_node, LeafNode): raise PruneError("Pruning can only create a LeafNode") super().__init__(old_node, new_node) @dataclass class GrowMutation(Mutation): """Encapsulates the grow action where a `LeafNode` of the tree is split based on a decision rule, turning it into an internal `SplitNode`. Args: old_node: The node before mutation. new_node: The node after mutation. """ def __init__(self, old_node: LeafNode, new_node: SplitNode): """ Raises: GrowError: if the grow mutation is invalid. """ if not isinstance(old_node, LeafNode): raise GrowError("Can only grow LeafNodes") if not isinstance(new_node, SplitNode): raise GrowError("Growing a LeafNode turns it into a SplitNode") super().__init__(old_node, new_node)
beanmachine-main
src/beanmachine/ppl/experimental/causal_inference/models/bart/mutation.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from abc import ABCMeta, abstractmethod import torch from .tree import Tree class TreeProposer(metaclass=ABCMeta): @abstractmethod def propose(self, tree: Tree, X: torch.Tensor) -> Tree: raise NotImplementedError
beanmachine-main
src/beanmachine/ppl/experimental/causal_inference/models/bart/tree_proposer.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
beanmachine-main
src/beanmachine/ppl/experimental/causal_inference/models/bart/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from collections import Counter from math import log from typing import cast, List, NamedTuple, Optional, Tuple import torch from beanmachine.ppl.experimental.causal_inference.models.bart.node import LeafNode from beanmachine.ppl.experimental.causal_inference.models.bart.scalar_samplers import ( LeafMean, ) from beanmachine.ppl.experimental.causal_inference.models.bart.split_rule import ( Operator, SplitRule, ) from beanmachine.ppl.experimental.causal_inference.models.bart.tree import Tree from torch import multinomial class CutPoint(NamedTuple): dim: int cut_val: float class SortedInvariants(NamedTuple): O_: torch.Tensor uniq_vals: List[List[float]] val_counts: List[Counter] class GrowFromRootTreeProposer: """ Implements the "Grow-from-root" backfitting algorithm as described in [1]. Reference: [1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees" https://arxiv.org/abs/1810.02215 """ def __init__(self): self.num_cuts = None self.num_null_cuts = None def propose( self, X: torch.Tensor, partial_residual: torch.Tensor, m: int, w: torch.Tensor, sigma_val: float, leaf_sampler: LeafMean, alpha: float, beta: float, root_node: LeafNode, num_cuts: int, num_null_cuts: int, ) -> Tuple[Tree, torch.Tensor]: """ Propose a new tree and modified Dirichlet weights based on the grow-from-root algorithm [1]. Reference: [1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees" https://arxiv.org/abs/1810.02215 Args: X: Training data / covariate matrix of shape (num_observations, input_dimensions). partial_residual: Residual vector of shape (num_observations, 1). m: Number of input dimensions / variables to sample. This is usually a subset of the total number of input dimensions in the input data. w: Vector of weights or probabilities of picking an input dimension. sigma_val: Current value of noise staqndard deviation. leaf_sampler: A sampler to sample the posterior distribution of leaf means. alpha: Hyperparameter controlling the prior probability of a node being terminal as discussed in [1]. beta: Hyperparameter controlling the prior probability of a node being terminal as discussed in [1]. root_node: Root of the tree to grow. num_cuts: Number of cuts to make along each dimensions. num_null_cuts: Weighting given to the no-split cut along each dimension as discussed in [1]. """ if num_cuts <= 0: raise ValueError("num_cuts has to be nonnegative") self.num_cuts = num_cuts if num_null_cuts <= 0 or num_null_cuts >= num_cuts: raise ValueError( "num_null_cuts has to be greater than or equal to 1 and lesser than total number of cuts" ) self.num_null_cuts = num_null_cuts O_ = self._presort(X) uniq_vals, val_counts = self._get_uniq_elems(X, O_) root_invariants = SortedInvariants( O_=O_, uniq_vals=uniq_vals, val_counts=val_counts ) all_leaf_nodes = [] variable_counts = [0 for _ in range(X.shape[-1])] self._grow_from_root( current_node=root_node, X=X, partial_residual=partial_residual, invariants=root_invariants, m=m, w=w, sigma_val=sigma_val, leaf_sampler=leaf_sampler, alpha=alpha, beta=beta, all_leaf_nodes=all_leaf_nodes, variable_counts=variable_counts, ) out_tree = Tree(nodes=all_leaf_nodes) return out_tree, torch.Tensor(variable_counts) def _presort(self, X: torch.Tensor) -> torch.Tensor: """ Presort the input data to generate the O matrix as discussed in section 3.2 [1]. Reference: [1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees" https://arxiv.org/abs/1810.02215 Args: X: Training data / covariate matrix of shape (num_observations, input_dimensions). """ num_observations, num_dims = X.shape O_ = torch.sort(X, 0)[-1] return torch.transpose(O_, dim0=0, dim1=1) def _get_uniq_elems(self, X: torch.Tensor, O_: torch.Tensor) -> Tuple[list, list]: """ Get the unique values along every input dimension and the counts for each unique value. Args: X: Training data / covariate matrix of shape (num_observations, input_dimensions). O_: Index matrix of shape (input_dimensions, num_observations) contained the indexes of input data sorted along each dimension. """ num_dims, num_observations = O_.shape uniq_vals = [] val_counts = [] for inp_dim in range(num_dims): dim_uniq_vals = [] value_counter = Counter() for obs in range(num_observations): current_val = X[O_[inp_dim, obs], inp_dim].item() if obs == 0 or (current_val > X[O_[inp_dim, obs - 1], inp_dim]): dim_uniq_vals.append(current_val) value_counter[current_val] += 1 uniq_vals.append(dim_uniq_vals) val_counts.append(value_counter) return uniq_vals, val_counts def _grow_from_root( self, current_node: LeafNode, X: torch.Tensor, partial_residual: torch.Tensor, invariants: SortedInvariants, m: int, w: torch.Tensor, sigma_val: float, leaf_sampler: LeafMean, alpha: float, beta: float, all_leaf_nodes: List[LeafNode], variable_counts: List[int], ): """ Implement the recursive grow-from-root strategy proposed in [1]. Reference: [1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees" https://arxiv.org/abs/1810.02215 Args: current_node: The node being mutated. X: Training data / covariate matrix of shape (num_observations, input_dimensions). partial_residual: Residual vector of shape (num_observations, 1). invariants: The sorted index matrix and unique values and unique counts used to maintain sorted order. m: Number of input dimensions / variables to sample. This is usually a subset of the total number of input dimensions in the input data. w: Vector of weights or probabilities of picking an input dimension. sigma_val: Current value of noise staqndard deviation. leaf_sampler: A sampler to sample the posterior distribution of leaf means. alpha: Hyperparameter controlling the prior probability of a node being terminal as discussed in [1]. beta: Hyperparameter controlling the prior probability of a node being terminal as discussed in [1]. all_leaf_nodes: All the laf nodes of the grown tree. variable_counts: The number of time each input dimensions / variable has been split while growing this tree. """ dims_to_sample = self._sample_variables(m=m, w=w) cut_points = self._select_cutpoints( candidate_dims=dims_to_sample, uniq_vals=invariants.uniq_vals ) sampled_cut_point = self._sample_cut_point( candidate_cut_points=cut_points, invariants=invariants, partial_residual=partial_residual, sigma_val=sigma_val, leaf_sampler=leaf_sampler, current_node=current_node, alpha=alpha, beta=beta, ) if sampled_cut_point is None: current_node.val = leaf_sampler.sample_posterior( X=X, y=partial_residual, current_sigma_val=sigma_val, node=current_node ) all_leaf_nodes.append(current_node) return variable_counts[sampled_cut_point.dim] += 1 left_rule, right_rule = SplitRule( grow_dim=sampled_cut_point.dim, grow_val=sampled_cut_point.cut_val, operator=Operator.le, ), SplitRule( grow_dim=sampled_cut_point.dim, grow_val=sampled_cut_point.cut_val, operator=Operator.gt, ) new_node = LeafNode.grow_node( current_node, left_rule=left_rule, right_rule=right_rule ) left_invariants, right_invariants = self._sift( X=X, cut_point=sampled_cut_point, invariants=invariants ) self._grow_from_root( current_node=cast(LeafNode, new_node.left_child), X=X, partial_residual=partial_residual, invariants=left_invariants, m=m, w=w, sigma_val=sigma_val, leaf_sampler=leaf_sampler, alpha=alpha, beta=beta, all_leaf_nodes=all_leaf_nodes, variable_counts=variable_counts, ) self._grow_from_root( current_node=cast(LeafNode, new_node.right_child), X=X, partial_residual=partial_residual, invariants=right_invariants, m=m, w=w, sigma_val=sigma_val, leaf_sampler=leaf_sampler, alpha=alpha, beta=beta, all_leaf_nodes=all_leaf_nodes, variable_counts=variable_counts, ) def _sample_variables(self, m: int, w: torch.Tensor) -> List[int]: """ Sample a subset of input dimensions to split on as discussed in section 3.4 of [1]. Reference: [1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees" https://arxiv.org/abs/1810.02215. Note: The number of sampled variables are set to min(m, count_nonzero(w)). Args: m: number of dimensions to sample, corresponding to 'm' in [1]. w: Vector of weights of picking an input dimension. """ m = cast(int, min(m, torch.count_nonzero(w).item())) return [ _.item() for _ in multinomial(input=w, num_samples=m, replacement=False) ] def _select_cutpoints( self, candidate_dims: List[int], uniq_vals: List[List[float]], ) -> List[CutPoint]: """ Select cutpoints along every dimension. Args: candidate_dims: Dimensions that are being split along. uniq_vals: Unique values along every dimension. """ candidate_cuts = [] for inp_dim in candidate_dims: # check for degeneracy if len(uniq_vals[inp_dim]) < 2: continue if len(uniq_vals[inp_dim]) <= self.num_cuts: skip_val_freq = 1 elif self.num_cuts == 1: skip_val_freq = len( uniq_vals[inp_dim] ) # just select the first val if only 1 cut required else: skip_val_freq = math.floor( (len(uniq_vals[inp_dim]) - 2) / (self.num_cuts - 1) ) curr_id = 0 # all uniq vals except last get added to the bag while curr_id < (len(uniq_vals[inp_dim]) - 1): candidate_cuts.append( CutPoint(dim=inp_dim, cut_val=uniq_vals[inp_dim][curr_id]) ) curr_id += skip_val_freq return candidate_cuts def _sample_cut_point( self, candidate_cut_points: List[CutPoint], partial_residual: torch.Tensor, invariants: SortedInvariants, sigma_val: float, leaf_sampler: LeafMean, current_node: LeafNode, alpha: float, beta: float, ) -> Optional[CutPoint]: """ Select a sample cut point by using sampling probabilities calculated in eq. (4) of [1]. Args: candidate_cut_points: DCut points to sample from. partial_residual: Residual vector of shape (num_observations, 1). invariants: The sorted index matrix and unique values and unique counts used to maintain sorted order. sigma_val: Current value of noise standard deviation. leaf_sampler: A sampler to sample the posterior distribution of leaf means. current_node: The node being mutated. alpha: Hyperparameter controlling the prior probability of a node being terminal as discussed in [1]. beta: Hyperparameter controlling the prior probability of a node being terminal as discussed in [1]. """ if len(candidate_cut_points) == 0: return None selection_log_likelihoods = [] selection_probabs = [] total_num_observations = invariants.O_.shape[-1] total_residual = torch.sum(partial_residual[invariants.O_[0]]).item() tau = leaf_sampler.prior_scale**2 sigma2 = sigma_val**2 MAX_LOG_LIKELIHOOD = -float("inf") def _integrated_log_likelihood( num_observations: int, residual: float, ) -> float: log_likelihood = +0.5 * log( (sigma2) / (sigma2 + tau * num_observations) ) + 0.5 * (tau * (residual**2)) / ( (sigma2) * (sigma2 + tau * num_observations) ) return log_likelihood kappa = self.num_null_cuts * ( (math.pow((1 + current_node.depth), beta) / alpha) - 1 ) null_log_likelihood = ( _integrated_log_likelihood( num_observations=total_num_observations, residual=total_residual ) + log(kappa) + log(len(candidate_cut_points)) ) if null_log_likelihood > MAX_LOG_LIKELIHOOD: MAX_LOG_LIKELIHOOD = null_log_likelihood selection_log_likelihoods.append(null_log_likelihood) current_O_id_, current_uniq_val_id_ = 0, 0 residuals_le_cutpoint, num_obs_le_cutpoint = [], [] for cut_id, cut_point in enumerate(candidate_cut_points): current_residual = 0.0 current_num_obs = 0 if cut_id == 0 or cut_point.dim != candidate_cut_points[cut_id - 1].dim: residuals_le_cutpoint = [] num_obs_le_cutpoint = [] current_O_id_ = 0 current_uniq_val_id_ = 0 else: current_residual += residuals_le_cutpoint[-1] current_num_obs += num_obs_le_cutpoint[-1] while ( invariants.uniq_vals[cut_point.dim][current_uniq_val_id_] <= cut_point.cut_val ): num_ties = invariants.val_counts[cut_point.dim][ invariants.uniq_vals[cut_point.dim][current_uniq_val_id_] ] current_num_obs += num_ties for _ in range(num_ties): current_residual += partial_residual[ invariants.O_[cut_point.dim, current_O_id_] ].item() current_O_id_ += 1 current_uniq_val_id_ += 1 residuals_le_cutpoint.append(current_residual) num_obs_le_cutpoint.append(current_num_obs) cut_point_log_likelihood = _integrated_log_likelihood( num_observations=current_num_obs, residual=current_residual, ) + _integrated_log_likelihood( num_observations=(total_num_observations - current_num_obs), residual=(total_residual - current_residual), ) if cut_point_log_likelihood > MAX_LOG_LIKELIHOOD: MAX_LOG_LIKELIHOOD = cut_point_log_likelihood selection_log_likelihoods.append(cut_point_log_likelihood) # turn it into likelihoods sum_ = 0.0 for log_likelihood in selection_log_likelihoods: likelihood = math.exp(log_likelihood - MAX_LOG_LIKELIHOOD) sum_ += likelihood selection_probabs.append(likelihood) selection_probabs = torch.tensor([_ / sum_ for _ in selection_probabs]) sampled_cut_id = cast( int, multinomial(input=selection_probabs, num_samples=1).item() ) if sampled_cut_id == 0: # no split return None return candidate_cut_points[sampled_cut_id - 1] def _sift( self, X: torch.Tensor, invariants: SortedInvariants, cut_point: CutPoint ) -> Tuple[SortedInvariants, SortedInvariants]: """ Sift all data into left and right partitions to maintain sorted order during recursion. Args: X: Training data / covariate matrix of shape (num_observations, input_dimensions). invariants: The sorted index matrix and unique values and unique counts used to maintain sorted order. cut_point: The cut point to split along. """ num_dims, num_observations = invariants.O_.shape O_left, O_right = [], [] uniq_vals_left, uniq_vals_right = [], [] val_counts_left, val_counts_right = [], [] for dim in range(num_dims): dim_O_left, dim_O_right = [], [] dim_uniq_vals_left, dim_uniq_vals_right = [], [] dim_val_counts_left, dim_val_counts_right = Counter(), Counter() for col in range(num_observations): obs_id = invariants.O_[dim, col].item() curr_observation_dim_val = X[obs_id, dim].item() if X[obs_id, cut_point.dim] <= cut_point.cut_val: dim_O_left.append(obs_id) if ( len(dim_uniq_vals_left) == 0 or dim_uniq_vals_left[-1] != curr_observation_dim_val ): dim_uniq_vals_left.append(curr_observation_dim_val) dim_val_counts_left[curr_observation_dim_val] += 1 else: dim_O_right.append(obs_id) if ( len(dim_uniq_vals_right) == 0 or dim_uniq_vals_right[-1] != curr_observation_dim_val ): dim_uniq_vals_right.append(curr_observation_dim_val) dim_val_counts_right[curr_observation_dim_val] += 1 O_left.append(dim_O_left) O_right.append(dim_O_right) uniq_vals_left.append(dim_uniq_vals_left) uniq_vals_right.append(dim_uniq_vals_right) val_counts_left.append(dim_val_counts_left) val_counts_right.append(dim_val_counts_right) left_invariants = SortedInvariants( O_=torch.tensor(O_left), uniq_vals=uniq_vals_left, val_counts=val_counts_left, ) right_invariants = SortedInvariants( O_=torch.tensor(O_right), uniq_vals=uniq_vals_right, val_counts=val_counts_right, ) return left_invariants, right_invariants
beanmachine-main
src/beanmachine/ppl/experimental/causal_inference/models/bart/grow_from_root_tree_proposer.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import enum from dataclasses import dataclass from typing import List, Optional import torch class Operator(enum.Enum): le = "less than equal to" gt = "greater than" @dataclass(eq=True) class SplitRule: """ A representation of a split in feature space as a result of a decision node node growing to a leaf node. Args: grow_dim: The dimension used for the split. grow_val: The value used for splitting. operator: The relational operation used for the split. The two operators considered are "less than or equal" for the left child and "greater than" for the right child. """ __slots__ = ["grow_dim", "grow_val", "operator"] def __init__( self, grow_dim: int, grow_val: float, operator: Operator, ): self.grow_dim = grow_dim self.grow_val = grow_val self.operator = operator class DimensionalRule: """ Represents the range of values along one dimension of the input which passes a rule. For example, if input is X = [x1, x2] then a dimensional rule for x1 could be x1 in [3, 4 , 5...20] representing the rule 3 < x1 <=20 (assuming x1 is an integer). Args: grow_dim: The dimension used for the rule. min_val: The minimum value of grow_dim which satisfies the rule (exclusive i.e. min_val fails the rule). max_val: The maximum value of grow_dim which satisfies the rule (inclusive i.e. max_val passes the rule). """ def __init__(self, grow_dim: int, min_val: float, max_val: float): self.grow_dim = grow_dim self.min_val, self.max_val = min_val, max_val def add_rule(self, new_rule: SplitRule) -> "DimensionalRule": """Add a rule to the dimension. If the rule is less restrictive than an existing rule, nothing changes. Args: new_rule: The new rule to add. """ if self.grow_dim != new_rule.grow_dim: raise ValueError("New rule grow dimension does not match") if new_rule.operator == Operator.gt and new_rule.grow_val > self.min_val: return DimensionalRule(self.grow_dim, new_rule.grow_val, self.max_val) elif new_rule.operator == Operator.le and new_rule.grow_val < self.max_val: return DimensionalRule(self.grow_dim, self.min_val, new_rule.grow_val) else: # new rule is already covered by existing rule return self class CompositeRules: """ Represents a composition of `DimensionalRule`s along multiple dimensions of input. For example, if input is X = [x1, x2] then a composite rule could be x1 in [3, 4 , 5...20] and x2 in [-inf..-10] representing the rule 3 < x1 <=20 (assuming x1 is an integer) and x2<= -10. Note: CompositeRules arre immutable and all changes to them return copies with the desired modification. Args: all_dims: All dimensions which have rules. all_split_rules: All rules corresponding to each dimension in `all_dims`. """ def __init__( self, all_dims: List[int], all_split_rules: Optional[List[SplitRule]] = None ): self.dimensional_rules = { dim: DimensionalRule(dim, -float("inf"), float("inf")) for dim in all_dims } if all_split_rules is None: self.all_split_rules = [] else: self.all_split_rules = all_split_rules for split_rule in self.all_split_rules: self.dimensional_rules[split_rule.grow_dim] = self.dimensional_rules[ split_rule.grow_dim ].add_rule(split_rule) if len(self.all_split_rules) > 0: self.grow_dim = self.all_split_rules[-1].grow_dim else: self.grow_dim = None def condition_on_rules(self, X: torch.Tensor) -> torch.Tensor: """Condition the input on a composite rule and get a mask such that X[mask] satisfies the rule. Args: X: Input / covariate matrix. """ mask = torch.ones(len(X), dtype=torch.bool) for dim in self.dimensional_rules.keys(): mask = ( mask & (X[:, dim].gt(self.dimensional_rules[dim].min_val)) & (X[:, dim].le(self.dimensional_rules[dim].max_val)) ) return mask def add_rule(self, new_rule: SplitRule) -> "CompositeRules": """Add a split rule to the composite ruleset. Returns a copy of `CompositeRules`""" if new_rule.grow_dim not in self.dimensional_rules.keys(): raise ValueError( "The dimension of new split rule is outside the scope of the composite rule" ) return CompositeRules( list(self.dimensional_rules.keys()), self.all_split_rules + [new_rule] ) def most_recent_split_rule(self) -> Optional[SplitRule]: """Returns the most recent split_rule added. Returns None if no rules were applied.""" if len(self.all_split_rules) == 0: return None else: return self.all_split_rules[-1]
beanmachine-main
src/beanmachine/ppl/experimental/causal_inference/models/bart/split_rule.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. class TreeStructureError(Exception): """Base class for errors related to tree structure""" pass class PruneError(TreeStructureError): """Raised for errors in pruning operations on a tree such as trying to prune a root node or trying to prune a node which would has non-terminal children.""" pass class GrowError(TreeStructureError): """Raised for errors in growing a tree such as trying to grow from a node along an input dimension which has no unique values.""" pass class NotInitializedError(AttributeError): """Raised for errors in accessing model attributes which have not been initialized for example trying to predict a model which has not been trained.""" pass
beanmachine-main
src/beanmachine/ppl/experimental/causal_inference/models/bart/exceptions.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # For now supports only ordered numeric variables from __future__ import annotations import math from copy import deepcopy from typing import cast, List, Optional, Tuple import torch from beanmachine.ppl.experimental.causal_inference.models.bart.exceptions import ( NotInitializedError, ) from beanmachine.ppl.experimental.causal_inference.models.bart.grow_from_root_tree_proposer import ( GrowFromRootTreeProposer, ) from beanmachine.ppl.experimental.causal_inference.models.bart.grow_prune_tree_proposer import ( GrowPruneTreeProposer, ) from beanmachine.ppl.experimental.causal_inference.models.bart.node import LeafNode from beanmachine.ppl.experimental.causal_inference.models.bart.scalar_samplers import ( LeafMean, NoiseStandardDeviation, ) from beanmachine.ppl.experimental.causal_inference.models.bart.split_rule import ( CompositeRules, ) from beanmachine.ppl.experimental.causal_inference.models.bart.tree import Tree from torch.distributions.dirichlet import Dirichlet from tqdm.auto import trange class BART: """Bayesian Additive Regression Trees (BART) are Bayesian sum of trees models [1] Default parameters are taken from [1]. Reference: [1] Hugh A. Chipman, Edward I. George, Robert E. McCulloch (2010). "BART: Bayesian additive regression trees" https://projecteuclid.org/journals/annals-of-applied-statistics/volume-4/issue-1/BART-Bayesian-additive-regression-trees/10.1214/09-AOAS285.full Args: num_trees: Number of trees. alpha: Parameter used in the tree depth prior, Eq. 7 of [1]. beta: Parameter used in the tree depth prior, Eq. 7 of [1]. k: Parameter used in the u_i_j prior, Eq. 8 of [1]. sigma_concentration: Concentration parameter (alpha) for the inverse gamma distribution prior of p(sigma). sigma_rate: Rate parameter (beta) for the inverse gamma distribution prior of p(sigma). num_burn: Number of samples burned-in. tree_sampler: The tree sampling method used. num_sample: Number of samples to collect. p_grow: Probability of tree growth. Used by the tree sampler. random_state: Random state used to seed. """ def __init__( self, num_trees: int = 200, alpha: float = 0.95, beta: float = 2.0, k: float = 2.0, noise_sd_concentration: float = 3.0, noise_sd_rate: float = 1.0, tree_sampler: Optional[GrowPruneTreeProposer] = None, random_state: Optional[int] = None, ): self.num_trees = num_trees self.num_samples = None self.all_tree_predictions = None self._all_trees = None self.leaf_mean = None self.k = k self.alpha = alpha self.beta = beta if noise_sd_concentration <= 0 or noise_sd_rate <= 0: raise ValueError("Invalid specification of noise_sd distribution priors") self.noise_sd_concentration = noise_sd_concentration self.noise_sd_rate = noise_sd_rate self.sigma = NoiseStandardDeviation( prior_concentration=self.noise_sd_concentration, prior_rate=self.noise_sd_rate, ) self.samples = None self.X = None self.y = None self.y_min = None self.y_max = None if random_state is not None: torch.manual_seed(random_state) if tree_sampler is None: self.tree_sampler = GrowPruneTreeProposer(grow_probability=0.5) elif isinstance(tree_sampler, GrowPruneTreeProposer): self.tree_sampler = tree_sampler else: NotImplementedError("tree_sampler not implemented") if isinstance(self.tree_sampler, GrowPruneTreeProposer): self._step = self._grow_prune_step else: NotImplementedError( "step function not defined" ) # this should never be raised def fit( self, X: torch.Tensor, y: torch.Tensor, num_samples: int = 1000, num_burn: int = 250, ) -> BART: """Fit the training data and learn the parameters of the model. Args: X: Training data / covariate matrix of shape (num_observations, input_dimensions). y: Response vector of shape (num_observations, 1). """ self.num_samples = num_samples self._load_data(X, y) self.samples = {"trees": [], "sigmas": []} self.leaf_mean = LeafMean( prior_loc=0.0, prior_scale=0.5 / (self.k * math.sqrt(self.num_trees)) ) self._init_trees(X) for iter_id in trange(num_burn + num_samples): trees, sigma = self._step() self._all_trees = trees if iter_id >= num_burn: self.samples["trees"].append(trees) self.samples["sigmas"].append(sigma) return self def _load_data(self, X: torch.Tensor, y: torch.Tensor): """ Load the training data. The response is scaled to [-1, 1] as per [1]. Reference: [1] Hugh A. Chipman, Edward I. George, Robert E. McCulloch (2010). "BART: Bayesian additive regression trees" https://projecteuclid.org/journals/annals-of-applied-statistics/volume-4/issue-1/BART-Bayesian-additive-regression-trees/10.1214/09-AOAS285.full Args: X: Training data / covariate matrix of shape (num_observations, input_dimensions). y: Response vector of shape (num_observations, 1). """ if not isinstance(X, torch.Tensor) or not isinstance(y, torch.Tensor): raise ValueError("Expected type torch.Tensor") if X.shape[0] != y.shape[0]: raise ValueError( f"Number of samples in X {X.shape[0]} not the same as in y {y.shape[0]}" ) self.X = X self.y_min = y.min() self.y_max = y.max() self.y = self._scale(y).reshape(-1, 1) def _scale(self, y: torch.Tensor) -> torch.Tensor: """ Scale tensor to [-1. ,1.] Args: y: Input tensor. """ max_ = torch.ones_like(y) min_ = -torch.ones_like(y) y_std = (y - self.y_min) / (self.y_max - self.y_min) return y_std * (max_ - min_) + min_ def _inverse_scale(self, y: torch.Tensor) -> torch.Tensor: """ Rescale tensor back from [-1. ,1.]. Args: y: Input tensor. """ max_ = torch.ones_like(y) min_ = -torch.ones_like(y) y_std = (y - min_) / (max_ - min_) return y_std * (self.y_max - self.y_min) + self.y_min def _init_trees(self, X: torch.Tensor): """ Initialize the trees of the model. Args: X: Training data / covariate matrix of shape (num_observations, input_dimensions). """ self._all_trees = [] num_dims = X.shape[-1] num_points = X.shape[0] for _ in range(self.num_trees): self._all_trees.append( Tree( nodes=[ LeafNode( val=self.leaf_mean.sample_prior(), composite_rules=CompositeRules( all_dims=list(range(num_dims)) ), depth=0, ) ] ) ) self.all_tree_predictions = torch.zeros( (num_points, self.num_trees, 1), dtype=torch.float ) def _grow_prune_step(self) -> Tuple[List, float]: """Take a single MCMC step using the GrowPrune approach of the original BART [1]. Reference: [1] Hugh A. Chipman, Edward I. George, Robert E. McCulloch (2010). "BART: Bayesian additive regression trees" https://projecteuclid.org/journals/annals-of-applied-statistics/volume-4/issue-1/BART-Bayesian-additive-regression-trees/10.1214/09-AOAS285.full """ if self.X is None or self.y is None: raise NotInitializedError("No training data") new_trees = [deepcopy(tree) for tree in self._all_trees] all_tree_predictions = deepcopy(self.all_tree_predictions) for tree_id in range(len(new_trees)): # all_tree_predictions.shape -> (num_observations, num_trees, 1) current_predictions = torch.sum(all_tree_predictions, dim=1) last_iter_tree_prediction = all_tree_predictions[:, tree_id] partial_residual = self.y - current_predictions + last_iter_tree_prediction new_trees[tree_id] = self.tree_sampler.propose( tree=new_trees[tree_id], X=self.X, partial_residual=partial_residual, alpha=self.alpha, beta=self.beta, sigma_val=self.sigma.val, leaf_mean_prior_scale=self.leaf_mean_prior_scale, ) self._update_leaf_mean(new_trees[tree_id], partial_residual) all_tree_predictions[:, tree_id] = new_trees[tree_id].predict(self.X) self.all_tree_predictions = all_tree_predictions self._update_sigma(self.y - torch.sum(all_tree_predictions, dim=1)) return new_trees, self.sigma.val def _update_leaf_mean(self, tree: Tree, partial_residual: torch.Tensor): """ Use Eq. 2.10 of [1] to update leaf node values by sampling from posterior distribution. Reference: [1] Andrew Gelman et al. "Bayesian Data Analysis", 3rd ed. Args: tree: Tree whos leaf is being updated. partial_residual: Current residual of the model excluding this tree of shape (num_observations, 1). """ if self.X is None: raise NotInitializedError("No training data") for leaf_node in tree.leaf_nodes(): new_leaf_val = self.leaf_mean.sample_posterior( node=leaf_node, X=self.X, y=partial_residual, current_sigma_val=self.sigma.val, ) if new_leaf_val is not None: leaf_node.val = new_leaf_val def _update_sigma(self, full_residual: torch.Tensor): """ Use Eq. from section 2.6 of [1] to update sigma by sampling from posterior distribution. Reference: [1] Andrew Gelman et al. "Bayesian Data Analysis", 3rd ed. Args: partial_residual: Current residual of the model excluding this tree of shape (num_observations, 1). """ self.sigma.sample(self.X, full_residual) def _predict_step( self, X: Optional[torch.Tensor] = None, trees: Optional[List[torch.Tensor]] = None, ) -> torch.Tensor: """Get a prediction from a list of trees. Args: X: Covariate matrix to predict on. If None provided, predictions are made on the training set of shape (num_samples, input_dimensions). trees: Trees to perform prediction. The prediction is the sum of predictions from these trees. If None provided, the last drawn sample of trees is used for prediction. Returns: prediction: Prediction of shape (num_samples, 1). """ if self.X is None or self._all_trees is None: raise NotInitializedError("Model not trained") if X is None: X = self.X if trees is None: trees = self._all_trees prediction = torch.zeros((len(X), 1), dtype=torch.float) for single_tree in trees: prediction += single_tree.predict(X) return prediction def predict(self, X: torch.Tensor) -> torch.Tensor: """ Perform a prediction using all the samples collected in the model. Args: X: Covariate matrix to predict on of shape (num_observations, input_dimensions). Returns: prediction: Prediction corresponding to average of all samples of shape (num_observations, 1). """ prediction = torch.mean( self.get_posterior_predictive_samples(X), dim=-1, dtype=torch.float ) return prediction.reshape(-1, 1) def predict_with_quantiles( self, X: torch.Tensor, quantiles: Optional[torch.Tensor] = None ) -> Tuple[torch.Tensor, torch.Tensor]: """ Returns the quantiles of prediction. Args: X: Covariate matrix to predict on of shape (num_samples, input_dimensions). quantiles: The quantiles required. If nothing supplied, the default quantiles are [0.025, 0.5, 0.975] Returns: prediction, qvals: Prediction corresponding to average of all samples of shape (num_observations, 1), qvals tensor of shape (num_obs, len(quantiles)) and qvals[:, i] is quantile value corresponding to quantiles[i]. """ if quantiles is None: quantiles = torch.Tensor([0.025, 0.5, 0.975]) for q in quantiles: if not 0.0 < q < 1.0: raise ValueError("Quantiles must be in (0, 1)") prediction_samples = self.get_posterior_predictive_samples(X) prediction = torch.mean(prediction_samples, dim=-1, dtype=torch.float).reshape( -1, 1 ) qvals = ( torch.quantile(prediction_samples, dim=1, q=quantiles) .transpose(0, 1) .reshape(-1, len(quantiles)) ) return prediction, qvals def get_posterior_predictive_samples(self, X: torch.Tensor) -> torch.Tensor: """ Returns samples from the posterior predictive distribution P(y|X). Args: X: Covariate matrix to predict on of shape (num_observations, input_dimensions). Returns: posterior_predictive_samples: Samples from the predictive distribution P(y|X) of shape (num_observations, num_samples). """ posterior_predictive_samples = [] for sample_id in range(self.num_samples): single_prediction_sample = self._inverse_scale( self._predict_step(X=X, trees=self.samples["trees"][sample_id]) ) # ( torch.Size(num_observations, 1) ) posterior_predictive_samples.append(single_prediction_sample) return torch.concat(posterior_predictive_samples, dim=-1) @property def leaf_mean_prior_scale(self): if self.leaf_mean is None: raise NotInitializedError("LeafMean prior not set.") return self.leaf_mean.prior_scale class XBART(BART): """Implementes XBART [1] which is a faster implementation of Bayesian Additive Regression Trees (BART) are Bayesian sum of trees models [2]. Default parameters are taken from [1]. Reference: [1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees" https://arxiv.org/abs/1810.02215 [2] Hugh A. Chipman, Edward I. George, Robert E. McCulloch (2010). "BART: Bayesian additive regression trees" https://projecteuclid.org/journals/annals-of-applied-statistics/volume-4/issue-1/BART-Bayesian-additive-regression-trees/10.1214/09-AOAS285.full Args: num_trees: Number of trees. If this is not set in the constructor explicitly, it defaults to 0 and is adaptively set as a function of the trianing data in the ```fit()``` method. alpha: Parameter used in the tree depth prior, Eq. 7 of [2]. beta: Parameter used in the tree depth prior, Eq. 7 of [2]. tau: Prior variance of the leaf-specific mean paramete used in the u_i_j prior, section 2.2 of [1]. noise_sd_concentration: Concentration parameter (alpha) for the inverse gamma distribution prior of p(sigma). noise_sd_rate: Rate parameter (beta) for the inverse gamma distribution prior of p(sigma). tree_sampler: The tree sampling method used. random_state: Random state used to seed. num_cuts: The maximum number of cuts per dimension. num_null_cuts: Number of "no split" null cuts to consider along each dimension. This affects the tree depth as discussed in [1]. m: Size of the subset of variables that are sampled for cutting points in the post burnin period as discussed in section 3.4 of [1]. """ def __init__( self, num_trees: int = 0, alpha: float = 0.95, beta: float = 2.0, tau: Optional[float] = None, noise_sd_concentration: float = 3.0, noise_sd_rate: float = 1.0, tree_sampler: Optional[GrowFromRootTreeProposer] = None, random_state: Optional[int] = None, num_cuts: Optional[int] = None, num_null_cuts: int = 1, m: Optional[int] = None, ): self.num_cuts = num_cuts self.num_null_cuts = num_null_cuts self.tau = tau self.m = m super().__init__( num_trees=num_trees, alpha=0.95, beta=1.25, noise_sd_concentration=3.0, noise_sd_rate=1.0, tree_sampler=None, random_state=None, ) if tree_sampler is None: self.tree_sampler = GrowFromRootTreeProposer() elif isinstance(tree_sampler, GrowFromRootTreeProposer): self.tree_sampler = tree_sampler else: raise NotImplementedError("tree_sampler not implemented") self._step = self._grow_from_root_step self.var_counts = None self.all_tree_var_counts = None def fit( self, X: torch.Tensor, y: torch.Tensor, num_samples: int = 25, num_burn: int = 15, ) -> XBART: """Fit the training data and learn the parameters of the model. Reference: [1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees" https://arxiv.org/abs/1810.02215 Args: X: Training data / covariate matrix of shape (num_observations, input_dimensions). y: Response vector of shape (num_observations, 1). num_samples: Number of post burnin samples to draw. num_burn: Number of burnin samples to draw (for adaptation). """ self.num_samples = num_samples self._load_data(X, y) if not self.num_trees > 0: self._adaptively_init_num_trees() if self.tau is None: self._adaptively_init_tau() self.tau = cast(float, self.tau) self.leaf_mean = LeafMean(prior_loc=0.0, prior_scale=math.sqrt(self.tau)) if self.num_cuts is None: self._adaptively_init_num_cuts() if self.m is None: self.m = self.X.shape[-1] self.samples = {"trees": []} self._init_trees(X) self.all_tree_predictions = ( (torch.clone(self.y) / self.num_trees) .unsqueeze(1) .tile((1, self.num_trees, 1)) ) self.var_counts = torch.ones((X.shape[-1],)) self.all_tree_var_counts = torch.ones((self.num_trees, X.shape[-1])) is_burnin_period = True num_dims_to_sample = self.X.shape[-1] for iter_id in trange(num_burn + num_samples): if iter_id >= num_burn: is_burnin_period = False num_dims_to_sample = self.m trees = self._step(num_dims_to_sample=num_dims_to_sample) self._all_trees = trees if not is_burnin_period: self.samples["trees"].append(trees) return self def _adaptively_init_num_trees(self): """Implements the default for number of trees from section 3.1 of [1]. Reference: [1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees" https://arxiv.org/abs/1810.02215 """ n = len(self.X) self.num_trees = int(math.pow(math.log(n), math.log(math.log(n))) / 4) def _adaptively_init_tau(self): """Implements the default for tau from section 3.1 of [1]. Reference: [1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees" https://arxiv.org/abs/1810.02215 """ if not self.num_trees > 0: raise NotInitializedError("num_trees not set") self.tau = (3 / 10) * (torch.var(self.y).item() / self.num_trees) def _adaptively_init_num_cuts(self): """Implements the default for number of cuts, C from section 3.3 of [1]. Reference: [1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees" https://arxiv.org/abs/1810.02215 """ n = len(self.X) self.num_cuts = max(math.sqrt(n), 100) def _grow_from_root_step(self, num_dims_to_sample: int) -> List[Tree]: """Take a single MCMC step using the Grow-from-root approach of xBART [1]. Reference: [1] He J., Yalov S., Hahn P.R. (2018). "XBART: Accelerated Bayesian Additive Regression Trees" https://arxiv.org/abs/1810.02215 Args: num_dims_to_sample: Size of the subset of variables that are sampled for cutting points as discussed in section 3.4 of [1]. """ if self.X is None or self.y is None: raise NotInitializedError("No training data") all_tree_predictions = deepcopy(self.all_tree_predictions) new_trees = [] for tree_id in range(self.num_trees): # all_tree_predictions.shape -> (num_observations, num_trees, 1) current_predictions = torch.sum(all_tree_predictions, dim=1) last_iter_tree_prediction = all_tree_predictions[:, tree_id] partial_residual = self.y - current_predictions + last_iter_tree_prediction w = self._draw_var_weights() new_tree, new_var_counts = self.tree_sampler.propose( X=self.X, partial_residual=partial_residual, m=num_dims_to_sample, w=w, alpha=self.alpha, beta=self.beta, sigma_val=self.sigma.val, leaf_sampler=self.leaf_mean, root_node=self._get_root_node(), num_cuts=self.num_cuts, num_null_cuts=self.num_null_cuts, ) new_trees.append(new_tree) self.var_counts += new_var_counts - self.all_tree_var_counts[tree_id] self.all_tree_var_counts[tree_id] = new_var_counts all_tree_predictions[:, tree_id] = new_tree.predict(self.X) self._update_sigma(self.y - torch.sum(all_tree_predictions, dim=1)) self.all_tree_predictions = all_tree_predictions return new_trees def _draw_var_weights(self) -> torch.Tensor: return Dirichlet(self.var_counts).sample() def _get_root_node(self): return LeafNode( depth=0, composite_rules=CompositeRules(all_dims=list(range(self.X.shape[-1]))), )
beanmachine-main
src/beanmachine/ppl/experimental/causal_inference/models/bart/bart_model.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Optional import torch from beanmachine.ppl.experimental.causal_inference.models.bart.node import LeafNode from torch.distributions.gamma import Gamma from torch.distributions.normal import Normal class NoiseStandardDeviation: """The NoiseStandardDeviation class encapsulates the noise standard deviation. The variance is parametrized by an inverse-gamma prior which is conjugate to a normal likelihood. Args: prior_concentration (float): Also called alpha. Must be greater than zero. prior_rate (float): Also called beta. Must be greater than 0. val (float): Current value of noise standard deviation. """ def __init__( self, prior_concentration: float, prior_rate: float, val: Optional[float] = None ): if prior_concentration <= 0 or prior_rate <= 0: raise ValueError("Invalid prior hyperparameters") self.prior_concentration = prior_concentration self.prior_rate = prior_rate if val is None: self.sample(X=torch.Tensor([]), residual=torch.Tensor([])) # prior init else: self._val = val @property def val(self) -> float: return self._val @val.setter def val(self, val: float): self._val = val def sample(self, X: torch.Tensor, residual: torch.Tensor) -> float: """Sample from the posterior distribution of sigma. If empty tensors are passed for X and residual, there will be no update so the sampling will be from the prior. Note: This sets the value of the `val` attribute to the drawn sample. Args: X: Covariate matrix / training data shape (num_observations, input_dimensions). residual: The current residual of the model shape (num_observations, 1). """ self.val = self._get_sample(X, residual) return self.val def _get_sample(self, X: torch.Tensor, residual: torch.Tensor) -> float: """ Draw a sample from the posterior. Args: X: Covariate matrix / training data of shape (num_observations, input_dimensions). residual: The current residual of the model of shape (num_observations, 1). """ posterior_concentration = self.prior_concentration + (len(X) / 2.0) posterior_rate = self.prior_rate + (0.5 * (torch.sum(torch.square(residual)))) draw = torch.pow(Gamma(posterior_concentration, posterior_rate).sample(), -0.5) return draw.item() class LeafMean: """ Class to sample form the prior and posterior distributions of the leaf nodes in BART. Reference: [1] Hugh A. Chipman, Edward I. George, Robert E. McCulloch (2010). "BART: Bayesian additive regression trees" https://projecteuclid.org/journals/annals-of-applied-statistics/volume-4/issue-1/BART-Bayesian-additive-regression-trees/10.1214/09-AOAS285.full Args: prior_loc: Prior location parameter. prior_scale: Prior scale parameter. """ def __init__(self, prior_loc: float, prior_scale: float): if prior_scale < 0: raise ValueError("Invalid prior hyperparameters") self._prior_loc = prior_loc self._prior_scale = prior_scale @property def prior_scale(self): return self._prior_scale def sample_prior(self): return Normal(loc=self._prior_loc, scale=self._prior_scale).sample().item() def sample_posterior( self, node: LeafNode, X: torch.Tensor, y: torch.Tensor, current_sigma_val: float, ): X_in_node, y_in_node = node.data_in_node(X, y) if len(X_in_node) == 0: return None # no new data num_points_in_node = len(X_in_node) prior_variance = (self._prior_scale) ** 2 likelihood_variance = (current_sigma_val**2) / num_points_in_node likelihood_mean = torch.sum(y_in_node) / num_points_in_node posterior_variance = 1.0 / (1.0 / prior_variance + 1.0 / likelihood_variance) posterior_mean = ( likelihood_mean * prior_variance + self._prior_loc * likelihood_variance ) / (likelihood_variance + prior_variance) return ( Normal(loc=posterior_mean, scale=math.sqrt(posterior_variance)) .sample() .item() )
beanmachine-main
src/beanmachine/ppl/experimental/causal_inference/models/bart/scalar_samplers.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional, overload, Tuple, Union import torch from beanmachine.ppl.experimental.causal_inference.models.bart.exceptions import ( PruneError, ) from beanmachine.ppl.experimental.causal_inference.models.bart.split_rule import ( CompositeRules, SplitRule, ) class BaseNode: """ Base class for node structures. Contains reference to a left and right child which can be used to traverse the tree. Args: depth (int): Distance of node from root node. composite_rules (CompositeRules): Dimensional rules that are satisfied by this node. left_child ("BaseNode"): Left child of the node. right_child ("BaseNode"): Right child of the node. """ def __init__( self, depth: int, composite_rules: CompositeRules, left_child: Optional["BaseNode"] = None, right_child: Optional["BaseNode"] = None, ): """ """ self.depth = depth self.composite_rules = composite_rules self._left_child = left_child self._right_child = right_child @property def left_child(self) -> Optional["BaseNode"]: """Returns the left_child of the node.""" return self._left_child @left_child.setter def left_child(self, left_child: Optional["BaseNode"]): self._left_child = left_child @property def right_child(self) -> Optional["BaseNode"]: """Returns the right_child of the node.""" return self._right_child @right_child.setter def right_child(self, right_child: Optional["BaseNode"]): self._right_child = right_child @overload def data_in_node( self, X: torch.Tensor, y: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: ... @overload def data_in_node(self, X: torch.Tensor) -> torch.Tensor: ... def data_in_node( self, X: torch.Tensor, y: Optional[torch.Tensor] = None ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: """ Conditions the covariate matrix and (optionally) response vector to return the respective subsets which satisfy the composite rules of this node. Note that the conditioning only looks at the input / covariate matrix to determine this conditioning. Args: X: Input / covariate matrix. y: (Optional) response vector. """ condition_mask = self.composite_rules.condition_on_rules(X) if y is not None: return X[condition_mask], y[condition_mask] return X[condition_mask] class LeafNode(BaseNode): """ A representation of a leaf node in the tree. Does not have children. In addition to the normal work of a `BaseNode`, a `LeafNode` is responsible for making predictions based on its value. Args: depth (int): Distance of node from root node. composite_rules (CompositeRules): Dimensional rules that are satisfied by this node. val (float): The prediction value of the node. """ def __init__( self, depth: int, composite_rules: CompositeRules, val: float = 0.0, ): self.val = val super().__init__( depth=depth, composite_rules=composite_rules, left_child=None, right_child=None, ) def predict(self) -> float: """ Returns the val attribute as a prediction. """ return self.val def is_growable(self, X: torch.Tensor) -> bool: """ Returns true if this leaf node can be grown. This is checked by ensuring the input covariate matrix has atleast more than 1 unique values along any dimension. Args: X: Input / covariate matrix. """ return len(self.get_growable_dims(X)) > 0 def get_growable_dims(self, X: torch.Tensor) -> List[int]: """ Returns the list of dimensions along which this leaf node can be gronw. This is checked by ensuring the input covariate matrix has more than 1 unique values along any dimension. Args: X: Input / covariate matrix. """ X_conditioned = self.data_in_node(X) if len(X_conditioned) == 0: return [] return [ dim for dim in range(X_conditioned.shape[-1]) if len(torch.unique(self.get_growable_vals(X_conditioned, dim))) > 1 ] def get_num_growable_dims(self, X: torch.Tensor) -> int: """ Returns the number of dimensions along which this leaf node can be grown. This is checked by ensuring the input covariate matrix has atleast more than 1 unique values along any dimension. Args: X: Input / covariate matrix. """ return len(self.get_growable_dims(X)) def get_growable_vals(self, X: torch.Tensor, grow_dim: int) -> torch.Tensor: """Returns the values in a feature dimension. Args: X: Input / covariate matrix. grow_dim: Input dimensions along which values are required """ return self.data_in_node(X)[:, grow_dim] def get_partition_of_split( self, X: torch.Tensor, grow_dim: int, grow_val: float ) -> float: """ Get probability that a split value is chosen among possible values in an input dimension defined as N(values_in_dimension == split_val) / N(values_in_dimension). Args: X: Input / covariate matrix. grow_dim: Input dimensions along which values are required. grow_va;: The value along which the split is being carried out. """ growable_vals = self.get_growable_vals(X, grow_dim) return torch.mean( (growable_vals == grow_val).to(torch.float), dtype=torch.float ).item() @staticmethod def grow_node( node: "LeafNode", left_rule: SplitRule, right_rule: SplitRule, ) -> "SplitNode": """ Converts a LeafNode into an internal SplitNode by applying the split rules for the left and right nodes. This returns a copy of the oriingal node. Args: left_rule: Rule applied to left child of the grown node. right_rule: Rule applied to the right child of the grown node. """ left_composite_rules = node.composite_rules.add_rule(left_rule) right_composite_rules = node.composite_rules.add_rule(right_rule) return SplitNode( depth=node.depth, composite_rules=node.composite_rules, left_child=LeafNode( depth=node.depth + 1, composite_rules=left_composite_rules ), right_child=LeafNode( depth=node.depth + 1, composite_rules=right_composite_rules ), ) class SplitNode(BaseNode): """ Encapsulates internal node in the tree. It has the same attributes as BaseNode. It contains the additional logic to determine if this node can be pruned. Args: depth (int): Distance of node from root node. composite_rules (CompositeRules): Dimensional rules that are satisfied by this node. left_child ("BaseNode"): Left child of the node. right_child ("BaseNode"): Right child of the node. """ def __init__( self, depth: int, composite_rules: CompositeRules, left_child: Optional["BaseNode"] = None, right_child: Optional["BaseNode"] = None, ): """ Args: depth: Distance of node from root node. composite_rules: Dimensional rules that are satisfied by this node. left_child: Left child of the node. right_child: Right child of the node. """ super().__init__( depth=depth, composite_rules=composite_rules, left_child=left_child, right_child=right_child, ) def is_prunable(self) -> bool: """Returns true if this node is prunable. This is decided by the fact if its children are `LeafNodes`.""" return isinstance(self.left_child, LeafNode) and isinstance( self.right_child, LeafNode ) def most_recent_rule(self) -> Optional[SplitRule]: """Returns the rule which grew this node from a `LeafNode` and is specifically the rule which created its left child.""" if self.left_child is None: raise AttributeError("This node is not split") return self.left_child.composite_rules.most_recent_split_rule() @staticmethod def prune_node( node: "SplitNode", ) -> LeafNode: """ Converts a SplitNode to a LeafNode by eliminating its children (if they are leaf nodes). Returns a copy. Args: node: Node to prune. Raises: PruneError: If this node is not prunable. """ if not node.is_prunable(): raise PruneError("Not a valid prunable node") return LeafNode(depth=node.depth, composite_rules=node.composite_rules)
beanmachine-main
src/beanmachine/ppl/experimental/causal_inference/models/bart/node.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import enum import math from typing import Union import torch from beanmachine.ppl.experimental.causal_inference.models.bart.exceptions import ( GrowError, PruneError, TreeStructureError, ) from beanmachine.ppl.experimental.causal_inference.models.bart.mutation import ( GrowMutation, Mutation, PruneMutation, ) from beanmachine.ppl.experimental.causal_inference.models.bart.node import ( LeafNode, SplitNode, ) from beanmachine.ppl.experimental.causal_inference.models.bart.split_rule import ( Operator, SplitRule, ) from beanmachine.ppl.experimental.causal_inference.models.bart.tree import Tree from beanmachine.ppl.experimental.causal_inference.models.bart.tree_proposer import ( TreeProposer, ) from numpy.random import choice from torch.distributions.uniform import Uniform class MutationKind(enum.Enum): grow = "grow operation" prune = "prune operation" class GrowPruneTreeProposer(TreeProposer): """This implements the Grow Prune tree sampling approach of Pratola [1] where the additional steps of BART (Change and Swap) are eliminated for computational efficiency. Reference: [1] Pratola MT, Chipman H, Higdon D, McCulloch R, Rust W (2013). “Parallel Bayesian Additive Regression Trees.” Technical report, University of Chicago. https://arxiv.org/pdf/1309.1906.pdf Args: grow_probability: Probability of growing a node. """ def __init__(self, grow_probability: float = 0.5): if grow_probability > 1.0 or grow_probability < 0.0: raise ValueError( f"Grow probability {grow_probability} not a valid probabiity" ) self.grow_probability = grow_probability self.prune_probability = 1 - self.grow_probability self._uniform = Uniform(0.0, 1.0) def propose( self, tree: Tree, X: torch.Tensor, partial_residual: torch.Tensor, alpha: float, beta: float, sigma_val: float, leaf_mean_prior_scale: float, ) -> Tree: """Propose a tree based on a Metropolis-Hastings step. Refer to [1] for details. Reference: [1] Adam Kapelner & Justin Bleich (2014). "bartMachine: Machine Learning with Bayesian Additive Regression Trees". https://arxiv.org/pdf/1312.2171.pdf Args: tree: Previous tree. X: Covariate matrix / training data. partial_residual: Partial residual of the current tree model with respect to the training data. alpha: Hyperparameter used in tree prior. beta: Hyperparameter used in tree prior. sigma_val: Current estimate of noise standard deviation in the data. leaf_mean_prior_scale: Prior of the scale hyperparameter in the normal distribution of the leaf mean. """ new_mutation = self._get_new_mutation(tree, X) # carry out move if new_mutation == MutationKind.grow: try: leaf_to_grow = self._select_leaf_to_grow(tree=tree, X=X) except GrowError: return self.propose( tree, X, partial_residual, alpha, beta, sigma_val, leaf_mean_prior_scale, ) grow_dim = self._select_grow_dim(leaf_to_grow, X) grow_val = self._get_grow_val( leaf_to_grow=leaf_to_grow, grow_dim=grow_dim, X=X ) left_rule, right_rule = SplitRule( grow_dim=grow_dim, grow_val=grow_val, operator=Operator.le ), SplitRule(grow_dim=grow_dim, grow_val=grow_val, operator=Operator.gt) mutation = GrowMutation( old_node=leaf_to_grow, new_node=LeafNode.grow_node( leaf_to_grow, left_rule=left_rule, right_rule=right_rule ), ) elif new_mutation == MutationKind.prune: try: split_node_to_prune = self._select_split_node_to_prune(tree) except PruneError: return self.propose( tree, X, partial_residual, alpha, beta, sigma_val, leaf_mean_prior_scale, ) mutation = PruneMutation( old_node=split_node_to_prune, new_node=SplitNode.prune_node(split_node_to_prune), ) else: raise TreeStructureError("Can only grow or prune") # Metropolis-Hasting step log_draw_probability = ( self._get_log_transition_ratio( tree=tree, mutation=mutation, X=X, ) + self._get_log_likelihood_ratio( mutation=mutation, X=X, partial_residual=partial_residual, sigma_val=sigma_val, leaf_mean_prior_scale=leaf_mean_prior_scale, ) + self._get_log_structure_ratio( mutation=mutation, alpha=alpha, beta=beta, X=X, ) ) if self._uniform.sample().item() < math.exp(log_draw_probability): tree.mutate(mutation) return tree return tree def _get_new_mutation(self, tree: Tree, X: torch.Tensor) -> MutationKind: """Get a new mutation. Args: tree: Previous tree. X: Covariate matrix / training data. """ if tree.num_nodes() == 1 or tree.num_prunable_split_nodes() == 0: return MutationKind.grow if tree.num_growable_leaf_nodes(X) == 0: return MutationKind.prune if bool(torch.bernoulli(torch.Tensor([self.grow_probability])).item()): return MutationKind.grow return MutationKind.prune def _select_leaf_to_grow(self, tree: Tree, X: torch.Tensor) -> LeafNode: """ Select which leaf to grow. Args: tree: Previous tree. X: Covariate matrix / training data. """ growable_leaf_nodes = tree.growable_leaf_nodes(X) if len(growable_leaf_nodes) < 1: raise GrowError("Leaf cannot be grown") return choice(growable_leaf_nodes) def _select_grow_dim(self, leaf_to_grow: LeafNode, X: torch.Tensor) -> int: """ Select an input dimension to grow along. Args: tree: Previous tree. leaf_to_grow: Leaf currently being grown. X: Covariate matrix / training data. """ if not leaf_to_grow.is_growable(X): raise GrowError("Leaf cannot be grown") return choice(leaf_to_grow.get_growable_dims(X)) def _get_grow_val( self, leaf_to_grow: LeafNode, grow_dim: int, X: torch.Tensor ) -> float: """ Select a value in the chosen input dimension to grow along. Args: tree: Previous tree. leaf_to_grow: Leaf currently being grown. grow_dim: Input dimension to grow along. X: Covariate matrix / training data. """ if not leaf_to_grow.is_growable(X): raise GrowError("Leaf cannot be grown") growable_vals = leaf_to_grow.get_growable_vals(X, grow_dim) max_growable_val = torch.max(growable_vals) candidate_val = choice(growable_vals) degenerate_grow_condition = candidate_val == max_growable_val while degenerate_grow_condition: return choice(growable_vals) return candidate_val def _select_split_node_to_prune(self, tree: Tree) -> SplitNode: """ Select and internal node to prune. Args: tree: Previous tree. """ prunable_split_nodes = tree.prunable_split_nodes() if len(prunable_split_nodes) < 1: raise PruneError return choice(prunable_split_nodes) def _get_log_transition_ratio( self, tree: Tree, mutation: Mutation, X: torch.Tensor, ) -> float: """ Get the log transition ratio as discussed in [1]. [1] Adam Kapelner & Justin Bleich (2014). "bartMachine: Machine Learning with Bayesian Additive Regression Trees". https://arxiv.org/pdf/1312.2171.pdf Args: tree: Previous tree. mutation: Proposed mutation, X: Covariate matrix / training data. """ if isinstance(mutation, GrowMutation): return self._grow_log_transition_ratio(tree=tree, mutation=mutation, X=X) elif isinstance(mutation, PruneMutation): return self._prune_log_transition_ratio(tree=tree, mutation=mutation, X=X) else: raise TreeStructureError("Can only grow or prune") def _grow_log_transition_ratio( self, tree: Tree, mutation: GrowMutation, X: torch.Tensor, ) -> float: """ Implement expression for log( P(T -> T*) / P(T* -> T) ) in a GROW move as discussed in eq. 8 of [1] Reference: [1] Adam Kapelner & Justin Bleich. "bartMachine: Machine Learning with Bayesian Additive Regression Trees" (2013). https://arxiv.org/abs/1312.2171 Args: tree: Previous tree. mutation: Proposed mutation, X: Covariate matrix / training data. """ log_p_new_to_old_tree = math.log(self.prune_probability) - math.log( tree.num_prunable_split_nodes() + 1 ) log_p_old_to_new_tree = math.log( self.grow_probability ) + _log_probability_of_growing_a_tree( tree=tree, mutation=mutation, X=X, ) return log_p_new_to_old_tree - log_p_old_to_new_tree def _prune_log_transition_ratio( self, tree: Tree, mutation: PruneMutation, X: torch.Tensor, ) -> float: """ Implement expression for log( P(T -> T*) / P(T* -> T) ) in a PRUNE move as discussed in section A.2 of [1] Reference: [1] Adam Kapelner & Justin Bleich. "bartMachine: Machine Learning with Bayesian Additive Regression Trees" (2013). https://arxiv.org/abs/1312.2171 Args: tree: Previous tree. mutation: Proposed mutation, X: Covariate matrix / training data. """ num_growable_leaves_in_pruned_tree = ( tree.num_growable_leaf_nodes(X) - mutation.old_node.left_child.is_growable(X=X) - mutation.old_node.right_child.is_growable(X=X) + mutation.new_node.is_growable(X=X) ) if num_growable_leaves_in_pruned_tree == 0: return -float("inf") # impossible prune log_p_old_to_new_tree = math.log(self.prune_probability) - math.log( tree.num_prunable_split_nodes() ) log_probability_selecting_leaf_to_grow = -math.log( num_growable_leaves_in_pruned_tree ) log_probability_growing_leaf = _log_probability_of_growing_node( mutation=GrowMutation( old_node=mutation.new_node, new_node=mutation.old_node ), X=X, ) log_p_new_to_old_tree = ( math.log(self.grow_probability) + log_probability_selecting_leaf_to_grow + log_probability_growing_leaf ) return log_p_new_to_old_tree - log_p_old_to_new_tree def _get_log_likelihood_ratio( self, mutation: Mutation, X: torch.Tensor, partial_residual: torch.Tensor, sigma_val: float, leaf_mean_prior_scale: float, ) -> float: """ Implement expression for log( P(R | T*, sigma) / P(R | T, sigma) ) in a GROW move as discussed in [1] Reference: [1] Adam Kapelner & Justin Bleich. "bartMachine: Machine Learning with Bayesian Additive Regression Trees" (2013). https://arxiv.org/abs/1312.2171 Args: tree: Previous tree. mutation: Proposed mutation, sigma_val:urrent estimate of noise standard deviation in the data. leaf_mean_prior_scale: Prior of the scale hyperparameter in the normal distribution of the leaf mean. X: Covariate matrix / training data. partial_residual: Partial residual of the current tree model with respect to the training data. """ if isinstance(mutation, GrowMutation): return self._grow_log_likelihood_ratio( mutation=mutation, sigma_val=sigma_val, leaf_mean_prior_scale=leaf_mean_prior_scale, X=X, partial_residual=partial_residual, ) elif isinstance(mutation, PruneMutation): return -self._grow_log_likelihood_ratio( mutation=GrowMutation( old_node=mutation.new_node, new_node=mutation.old_node ), sigma_val=sigma_val, leaf_mean_prior_scale=leaf_mean_prior_scale, X=X, partial_residual=partial_residual, ) else: raise TreeStructureError(" Can only grow or prune") def _grow_log_likelihood_ratio( self, mutation: GrowMutation, sigma_val: float, leaf_mean_prior_scale: float, X: torch.Tensor, partial_residual: torch.Tensor, ) -> float: """ Implement expression for log( P(R | T*, sigma) / P(R | T, sigma) ) in a GROW move as discussed in eq. 10 of [1] Reference: [1] Adam Kapelner & Justin Bleich. "bartMachine: Machine Learning with Bayesian Additive Regression Trees" (2013). https://arxiv.org/abs/1312.2171 Args: tree: Previous tree. mutation: Proposed mutation, sigma_val:urrent estimate of noise standard deviation in the data. leaf_mean_prior_scale: Prior of the scale hyperparameter in the normal distribution of the leaf mean. X: Covariate matrix / training data. partial_residual: Partial residual of the current tree model with respect to the training data. """ var = sigma_val**2 var_mu = leaf_mean_prior_scale**2 nodes = { "parent": mutation.old_node, "left": mutation.new_node.left_child, "right": mutation.new_node.right_child, } y_sum, num_points = {}, {} for node_label, node in nodes.items(): X_conditioned, y_conditioned = node.data_in_node(X, partial_residual) y_sum[node_label] = torch.sum(y_conditioned) num_points[node_label] = len(X_conditioned) first_term = (var * (var + num_points["parent"] * leaf_mean_prior_scale)) / ( (var + num_points["left"] * var_mu) * (var + num_points["right"] * var_mu) ) first_term = math.log(math.sqrt(first_term)) left_contribution = torch.square(y_sum["left"]) / ( var + num_points["left"] * leaf_mean_prior_scale ) right_contribution = torch.square(y_sum["right"]) / ( var + num_points["right"] * leaf_mean_prior_scale ) parent_contribution = torch.square(y_sum["parent"]) / ( var + num_points["parent"] * leaf_mean_prior_scale ) second_term = left_contribution + right_contribution - parent_contribution return first_term + (var_mu / (2 * var)) * second_term.item() def _get_log_structure_ratio( self, mutation: Mutation, alpha: float, beta: float, X: torch.Tensor, ) -> float: """ Implement expression for log( P(T*) / P(T) ) in as discussed in [1]. Reference: [1] Adam Kapelner & Justin Bleich. "bartMachine: Machine Learning with Bayesian Additive Regression Trees" (2013). https://arxiv.org/abs/1312.2171 Args: mutation: Proposed mutation, X: Covariate matrix / training data. alpha: Hyperparameter used in tree prior. beta: Hyperparameter used in tree prior. """ if isinstance(mutation, GrowMutation): return self._grow_log_structure_ratio( mutation=mutation, alpha=alpha, beta=beta, X=X, ) elif isinstance(mutation, PruneMutation): return -self._grow_log_structure_ratio( mutation=GrowMutation( old_node=mutation.new_node, new_node=mutation.old_node ), alpha=alpha, beta=beta, X=X, ) else: raise TreeStructureError("Only grow or prune mutations are allowed") def _grow_log_structure_ratio( self, mutation: GrowMutation, alpha: float, beta: float, X: torch.Tensor, ) -> float: """ Implement expression for log( P(T*) / P(T) ) in a GROW step as discussed in section A.1 of [1]. Reference: [1] Adam Kapelner & Justin Bleich. "bartMachine: Machine Learning with Bayesian Additive Regression Trees" (2013). https://arxiv.org/abs/1312.2171 Args: mutation: Proposed mutation, X: Covariate matrix / training data. alpha: Hyperparameter used in tree prior. beta: Hyperparameter used in tree prior. """ denominator = _log_probability_node_is_terminal(alpha, beta, mutation.old_node) log_probability_left_is_terminal = _log_probability_node_is_terminal( alpha, beta, mutation.new_node.left_child ) log_probability_right_is_terminal = _log_probability_node_is_terminal( alpha, beta, mutation.new_node.right_child ) log_probability_parent_is_nonterminal = _log_probability_node_is_nonterminal( alpha, beta, mutation.old_node ) log_probability_rule = _log_probability_of_growing_node(mutation=mutation, X=X) numerator = ( log_probability_left_is_terminal + log_probability_right_is_terminal + log_probability_parent_is_nonterminal + log_probability_rule ) return numerator - denominator def _log_probability_node_is_nonterminal( alpha: float, beta: float, node: Union[LeafNode, SplitNode] ) -> float: """Get log probability of node being non-terminal (internal node) as discussed in Eq. 7 of [1]. Reference: [1] Hugh A. Chipman, Edward I. George, Robert E. McCulloch (2010). "BART: Bayesian additive regression trees" https://projecteuclid.org/journals/annals-of-applied-statistics/volume-4/issue-1/BART-Bayesian-additive-regression-trees/10.1214/09-AOAS285.full Args: alpha: Hyperparameter used in tree prior. beta: Hyperparameter used in tree prior. node: Node for which probability is being calculated. """ return math.log(alpha * math.pow(1 + node.depth, -beta)) def _log_probability_node_is_terminal( alpha: float, beta: float, node: Union[LeafNode, SplitNode] ) -> float: """Get log probability of node being terminal (leaf node) as discussed in Eq. 7 of [1]. Reference: [1] Hugh A. Chipman, Edward I. George, Robert E. McCulloch (2010). "BART: Bayesian additive regression trees" https://projecteuclid.org/journals/annals-of-applied-statistics/volume-4/issue-1/BART-Bayesian-additive-regression-trees/10.1214/09-AOAS285.full Args: alpha: Hyperparameter used in tree prior. beta: Hyperparameter used in tree prior. node: Node for which probability is being calculated. """ return 1 - _log_probability_node_is_nonterminal(alpha=alpha, beta=beta, node=node) def _log_probability_of_growing_a_tree( tree: Tree, mutation: GrowMutation, X: torch.Tensor ) -> float: """ Get probability of choosing a node and growing it as discussed in section A.1 of [1]. Reference: [1] Adam Kapelner & Justin Bleich. "bartMachine: Machine Learning with Bayesian Additive Regression Trees" (2013). https://arxiv.org/abs/1312.2171 Args: tree: Previous tree. mutation: Growth mutation being applied. X: Covariate matrix / training data. """ return -math.log(tree.num_growable_leaf_nodes(X)) +_log_probability_of_growing_node(mutation=mutation, X=X) def _log_probability_of_growing_node(mutation: GrowMutation, X: torch.Tensor) -> float: """ Get probability of growing a node as discussed in section A.1 of [1]. Reference: [1] Adam Kapelner & Justin Bleich. "bartMachine: Machine Learning with Bayesian Additive Regression Trees" (2013). https://arxiv.org/abs/1312.2171 Args: mutation: Growth mutation being applied. X: Covariate matrix / training data. """ log_probability_of_selecting_dim = -math.log( mutation.old_node.get_num_growable_dims(X) ) grow_dim = mutation.new_node.most_recent_rule().grow_dim grow_val = mutation.new_node.most_recent_rule().grow_val log_probability_of_growing_at_val = -math.log( mutation.old_node.get_partition_of_split(X, grow_dim, grow_val) ) return log_probability_of_selecting_dim + log_probability_of_growing_at_val
beanmachine-main
src/beanmachine/ppl/experimental/causal_inference/models/bart/grow_prune_tree_proposer.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations import dataclasses from typing import ( Collection, Dict, Iterable, Iterator, List, Mapping, Optional, Set, Tuple, TypeVar, ) import torch import torch.distributions as dist from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.world import init_to_uniform from beanmachine.ppl.world.base_world import BaseWorld from beanmachine.ppl.world.initialize_fn import init_from_prior, InitializeFn from beanmachine.ppl.world.variable import Variable RVDict = Dict[RVIdentifier, torch.Tensor] T = TypeVar("T", bound="World") @dataclasses.dataclass class _TempVar: node: RVIdentifier parents: Set[RVIdentifier] = dataclasses.field(default_factory=set) class World(BaseWorld, Mapping[RVIdentifier, torch.Tensor]): """ A World represents an instantiation of the graphical model and can be manipulated or evaluated. In the context of MCMC inference, a world represents a single Monte Carlo posterior sample. A World can also be used as a context manager to run and sample random variables. Example:: @bm.random_variable def foo(): return Normal(0., 1.) world = World() with world: x = foo() # returns a sample, ie tensor. with world: y = foo() # same world = same tensor assert x == y Args: observations (Optional): Optional observations, which fixes the random variables to observed values initialize_fn (callable, Optional): Callable which takes a ``torch.distribution`` object as argument and returns a ``torch.Tensor`` """ def __init__( self, observations: Optional[RVDict] = None, initialize_fn: InitializeFn = init_from_prior, ) -> None: self.observations: RVDict = observations or {} self._initialize_fn: InitializeFn = initialize_fn self._variables: Dict[RVIdentifier, Variable] = {} self._call_stack: List[_TempVar] = [] @classmethod def initialize_world( cls: type[T], queries: Iterable[RVIdentifier], observations: Optional[RVDict] = None, initialize_fn: InitializeFn = init_to_uniform, max_retries: int = 100, **kwargs, ) -> T: """ Initializes a world with all of the random variables (queries and observations). In case of initializing values outside of support of the distributions, the method will keep resampling until a valid initialization is found up to ``max_retries`` times. Args: queries: A list of random variables that need to be inferred. observations: Observations, which fixes the random variables to observed values initialize_fn: Function for initializing the values of random variables max_retries: The number of attempts this method will make before throwing an error (default to 100). """ observations = observations or {} for _ in range(max_retries): world = cls(observations, initialize_fn, **kwargs) # recursively add parent nodes to the graph for node in queries: world.call(node) for node in observations: world.call(node) # check if the initial state is valid log_prob = world.log_prob() if not torch.isinf(log_prob) and not torch.isnan(log_prob): return world # None of the world gives us a valid initial state raise ValueError( f"Cannot find a valid initialization after {max_retries} retries. The model" " might be misspecified." ) def __getitem__(self, node: RVIdentifier) -> torch.Tensor: """ Args: node (RVIdentifier): RVIdentifier of node. Returns: torch.Tensor: The sampled value. """ return self._variables[node].value def get_variable(self, node: RVIdentifier) -> Variable: """ Args: node (RVIdentifier): RVIdentifier of node. Returns: Variable object that contains the metadata of the current node in the world. """ return self._variables[node] def replace(self, values: RVDict) -> World: """ Args: values (RVDict): Dict of RVIdentifiers and their values to replace. Returns: A new world where values specified in the dictionary are replaced. This method will update the internal graph structure. """ assert not any(node in self.observations for node in values) new_world = self.copy() for node, value in values.items(): new_world._variables[node] = new_world._variables[node].replace( value=value.clone() ) # changing the value of a node can change the dependencies of its children nodes nodes_to_update = set().union( *(self._variables[node].children for node in values) ) for node in nodes_to_update: # Invoke node conditioned on the provided values new_distribution, new_parents = new_world._run_node(node) # Update children's dependencies old_node_var = new_world._variables[node] new_world._variables[node] = old_node_var.replace( parents=new_parents, distribution=new_distribution ) dropped_parents = old_node_var.parents - new_parents for parent in dropped_parents: parent_var = new_world._variables[parent] new_world._variables[parent] = parent_var.replace( children=parent_var.children - {node} ) return new_world def __iter__(self) -> Iterator[RVIdentifier]: return iter(self._variables) def __len__(self) -> int: return len(self._variables) @property def latent_nodes(self) -> Set[RVIdentifier]: """ All the latent nodes in the current world. """ return self._variables.keys() - self.observations.keys() def copy(self) -> World: """ Returns: Shallow copy of the current world. """ world_copy = World(self.observations.copy(), self._initialize_fn) world_copy._variables = self._variables.copy() return world_copy def initialize_value(self, node: RVIdentifier) -> None: # recursively calls into parent nodes distribution, parents = self._run_node(node) if node in self.observations: node_val = self.observations[node] else: node_val = self._initialize_fn(distribution) self._variables[node] = Variable( value=node_val, distribution=distribution, parents=parents, ) def update_graph(self, node: RVIdentifier) -> torch.Tensor: """ This function adds a node to the graph and initialize its value if the node is not found in the graph already. Args: node (RVIdentifier): RVIdentifier of node to update in the graph. Returns: The value of the node stored in world (in original space). """ if node not in self._variables: self.initialize_value(node) node_var = self._variables[node] if len(self._call_stack) > 0: tmp_child_var = self._call_stack[-1] tmp_child_var.parents.add(node) node_var.children.add(tmp_child_var.node) return node_var.value def log_prob( self, nodes: Optional[Collection[RVIdentifier]] = None ) -> torch.Tensor: """ Args: nodes (Optional): Optional collection of RVIdentifiers to evaluate the log prob of a subset of the graph. If none is specified, then all the variables in the world are used. Returns: The joint log prob of all of the nodes in the current world """ if nodes is None: nodes = self._variables.keys() log_prob = torch.tensor(0.0) for node in set(nodes): log_prob = log_prob + torch.sum(self._variables[node].log_prob) return log_prob def enumerate_node(self, node: RVIdentifier) -> torch.Tensor: """ Args: node (RVIdentifier): RVIdentifier of node. Returns: A tensor enumerating the support of the node. """ distribution = self._variables[node].distribution if not distribution.has_enumerate_support: raise ValueError(str(node) + " is not enumerable") return distribution.enumerate_support() def _run_node( self, node: RVIdentifier ) -> Tuple[dist.Distribution, Set[RVIdentifier]]: """ Invoke a random variable function conditioned on the current world. Args: node (RVIdentifier): RVIdentifier of node. Returns: Its distribution and set of parent nodes """ self._call_stack.append(_TempVar(node)) with self: distribution = node.function(*node.arguments) temp_var = self._call_stack.pop() if not isinstance(distribution, dist.Distribution): raise TypeError("A random_variable is required to return a distribution.") return distribution, temp_var.parents
beanmachine-main
src/beanmachine/ppl/world/world.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-ignore-all-errors[16, 20] from typing import Callable import torch import torch.distributions as dist InitializeFn = Callable[[dist.Distribution], torch.Tensor] def init_to_uniform(distribution: dist.Distribution) -> torch.Tensor: """ Initializes a uniform distribution to sample from transformed to the support of ``distribution``. A Categorical is used for discrete distributions, a bijective transform is used for constrained continuous distributions, and ``distribution`` is used otherwise. Used as an arg for ``World`` Args: distribution: ``torch.distribution.Distribution`` of the RV, usually the prior distribution. """ sample_val = distribution.sample() if distribution.has_enumerate_support: support = distribution.enumerate_support(expand=False).flatten() return support[torch.randint_like(sample_val, support.numel()).long()] elif not distribution.support.is_discrete: transform = dist.biject_to(distribution.support) return transform(torch.rand_like(transform.inv(sample_val)) * 4 - 2) else: # fall back to sample from prior return init_from_prior(distribution) def init_from_prior(distribution: dist.Distribution) -> torch.Tensor: """ Samples from the distribution. Used as an arg for ``World`` Args: distribution: ``torch.distribution.Distribution`` corresponding to the distribution to sample from """ return distribution.sample()
beanmachine-main
src/beanmachine/ppl/world/initialize_fn.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from beanmachine.ppl.world.base_world import get_world_context from beanmachine.ppl.world.initialize_fn import ( init_from_prior, init_to_uniform, InitializeFn, ) from beanmachine.ppl.world.utils import BetaDimensionTransform, get_default_transforms from beanmachine.ppl.world.world import RVDict, World __all__ = [ "BetaDimensionTransform", "InitializeFn", "RVDict", "World", "get_default_transforms", "get_world_context", "init_from_prior", "init_to_uniform", ]
beanmachine-main
src/beanmachine/ppl/world/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations import dataclasses from typing import Set import torch import torch.distributions as dist from beanmachine.ppl.model.rv_identifier import RVIdentifier from torch.distributions.utils import lazy_property @dataclasses.dataclass class Variable: """ Primitive used for maintaining metadata of random variables. Usually used in conjunction with `World` during inference. """ value: torch.Tensor "Sampled value of random variable" distribution: dist.Distribution "Distribution random variable was sampled from" parents: Set[RVIdentifier] = dataclasses.field(default_factory=set) "Set containing the RVIdentifiers of the parents of the random variable" children: Set[RVIdentifier] = dataclasses.field(default_factory=set) "Set containing the RVIdentifiers of the children of the random variable" @lazy_property def log_prob(self) -> torch.Tensor: """ Returns The logprob of the `value` of the value given the distribution. """ return self.distribution.log_prob(self.value) def replace(self, **changes) -> Variable: """Return a new Variable object with fields replaced by the changes""" return dataclasses.replace(self, **changes)
beanmachine-main
src/beanmachine/ppl/world/variable.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections.abc import Iterable from typing import Iterable as IterableType, overload, Type, Union import torch import torch.distributions as dist import torch.distributions.constraints as constraints from torch.distributions import Distribution from torch.distributions.transforms import Transform ConstraintType = Union[constraints.Constraint, Type] class BetaDimensionTransform(Transform): """ Volume preserving transformation to the Beta distribution support. """ bijective = True domain = constraints.real codomain = constraints.real_vector def __eq__(self, other): return isinstance(other, BetaDimensionTransform) def _call(self, x): return torch.cat((x.unsqueeze(-1), (1 - x).unsqueeze(-1)), -1) def _inverse(self, y): return y[..., 0] / y.sum(dim=-1) def forward_shape(self, shape): return shape + (2,) def inverse_shape(self, shape): return shape[:-1] def log_abs_det_jacobian(self, x, y): return torch.zeros_like(x) def _unwrap(constraint: ConstraintType): if isinstance(constraint, constraints.independent): return _unwrap(constraint.base_constraint) return constraint if isinstance(constraint, type) else constraint.__class__ def _is_constraint_eq(constraint1: ConstraintType, constraint2: ConstraintType): return _unwrap(constraint1) == _unwrap(constraint2) @overload def is_constraint_eq( constraint: ConstraintType, check_constraints: ConstraintType ) -> bool: ... @overload def is_constraint_eq( constraint: ConstraintType, check_constraints: IterableType[ConstraintType] ) -> IterableType[bool]: ... def is_constraint_eq( constraint: ConstraintType, check_constraints: Union[ConstraintType, IterableType[ConstraintType]], ) -> Union[bool, IterableType[bool]]: """ This provides an equality check that works for different constraints specified in :mod:`torch.distributions.constraints`. If `constraint` is `constraints.Independent`, then the `base_constraint` is checked. If `check_constraints` is a single `Constraint` type or instance this returns a `True` if the given `constraint` matches `check_constraints`. Otherwise, if `check_constraints` is an iterable, this returns a `bool` list that represents an element-wise check. :param constraint: A constraint class or instance. :param check_constraints: A constraint class or instance or an iterable containing constraint classes or instances to check against. :returns: bool (or a list of bool) values indicating if the given constraint equals the constraint in `check_constraints`. """ if isinstance(check_constraints, Iterable): return [_is_constraint_eq(constraint, c) for c in check_constraints] return _is_constraint_eq(constraint, check_constraints) def get_default_transforms(distribution: Distribution) -> dist.Transform: """ Get transforms of a distribution to transform it from constrained space into unconstrained space. :param distribution: the distribution to check :returns: a Transform that need to be applied to the distribution to transform it from constrained space into unconstrained space """ if distribution.support.is_discrete: return dist.transforms.identity_transform else: return dist.biject_to(distribution.support).inv def initialize_value(distribution: Distribution, initialize_from_prior: bool = False): """ Initialized the Variable value :param initialize_from_prior: if true, returns sample from prior :returns: the value to the set the Variable value to """ sample_val = distribution.sample() if initialize_from_prior: return sample_val support = distribution.support if isinstance(support, dist.constraints.independent): support = support.base_constraint if initialize_from_prior: return sample_val elif is_constraint_eq(support, dist.constraints.real): return torch.zeros_like(sample_val) elif is_constraint_eq(support, dist.constraints.simplex): value = torch.ones_like(sample_val) return value / sample_val.shape[-1] elif is_constraint_eq(support, dist.constraints.greater_than): return ( torch.ones( sample_val.shape, dtype=sample_val.dtype, device=sample_val.device ) + support.lower_bound ) elif is_constraint_eq(support, dist.constraints.boolean): return dist.Bernoulli(torch.ones_like(sample_val) / 2).sample() elif is_constraint_eq(support, dist.constraints.interval): lower_bound = torch.ones_like(sample_val) * support.lower_bound upper_bound = torch.ones_like(sample_val) * support.upper_bound return dist.Uniform(lower_bound, upper_bound).sample() elif is_constraint_eq(support, dist.constraints.integer_interval): integer_interval = support.upper_bound - support.lower_bound return dist.Categorical( (torch.ones(integer_interval, device=sample_val.device)).expand( sample_val.shape + (integer_interval,) ) ).sample() elif is_constraint_eq(support, dist.constraints.nonnegative_integer): return ( torch.ones( sample_val.shape, dtype=sample_val.dtype, device=sample_val.device ) + support.lower_bound ) return sample_val
beanmachine-main
src/beanmachine/ppl/world/utils.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations from abc import ABCMeta, abstractmethod from typing import Optional import torch from beanmachine.ppl.model.rv_identifier import RVIdentifier _WORLD_STACK: list[BaseWorld] = [] def get_world_context() -> Optional[BaseWorld]: return _WORLD_STACK[-1] if _WORLD_STACK else None class BaseWorld(metaclass=ABCMeta): def __enter__(self) -> BaseWorld: """ This method, together with __exit__, allow us to use world as a context, e.g. ``` with World(): # invoke random variables to update the graph ``` By keeping a stack of context tokens, we can easily nest multiple worlds and restore the outer context if needed, e.g. ``` world1, world2 = World(), World() with world1: # do some graph update specific to world1 with world2: # update world2 # back to updating world1 ``` """ _WORLD_STACK.append(self) return self def __exit__(self, *args) -> None: _WORLD_STACK.pop() def call(self, node: RVIdentifier): """ A helper function that invokes the random variable and return its value """ with self: return node.wrapper(*node.arguments) @abstractmethod def update_graph(self, node: RVIdentifier) -> torch.Tensor: raise NotImplementedError
beanmachine-main
src/beanmachine/ppl/world/base_world.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import inspect from functools import wraps from typing import Any, Callable, Dict, Tuple from beanmachine.ppl.utils.item_counter import ItemCounter from torch import Tensor def _tuplify(t: Any) -> Any: if isinstance(t, list): return tuple(_tuplify(y) for y in t) return t # This returns a tuple or value whose shape is the same as the input tensor. # That is: # # tensor(1) --> 1 # tensor([]) --> () # tensor([1]) --> (1,) # tensor([1, 2]) --> (1, 2) # tensor([[1, 2], [3, 4]]) --> ((1, 2), (3, 4)) # # and so on def tensor_to_tuple(t: Tensor) -> Any: result = _tuplify(t.tolist()) return result class MemoizationKey: # It would be nice to just use a tuple (wrapper, args) for the memoization # key, but tensors can only be compared for equality with torch.equal(t1, t2), # and tensors do not hash via value equality. # # We therefore replace tensors with tuples that contain all the values of the # tensor. For example, if our arguments are (1, tensor([2, 3]), 4) then our # new arguments are (1, (2, 3), 4) wrapper: Callable arguments: Tuple hashcode: int def __init__(self, wrapper: Callable, arguments: Tuple) -> None: self.arguments = ( wrapper, tuple( tensor_to_tuple(a) if isinstance(a, Tensor) else a for a in arguments ), ) self.wrapper = wrapper self.hashcode = hash(self.arguments) def __hash__(self) -> int: return self.hashcode def __eq__(self, o) -> bool: return ( isinstance(o, MemoizationKey) and self.hashcode == o.hashcode and self.wrapper == o.wrapper and self.arguments == o.arguments ) total_memoized_functions = 0 total_memoized_calls = 0 total_cache_misses = 0 count_calls = False function_calls = ItemCounter() def memoizer_report() -> str: call_report = [ f"{item.__name__}: {count}\n" for (item, count) in function_calls.items.items() ] return ( f"funcs: {total_memoized_functions} " + f"calls: {total_memoized_calls} " + f"misses: {total_cache_misses}\n" + "".join(call_report) ) def memoize(f): """ Decorator to be used to memoize arbitrary functions. """ global total_memoized_functions total_memoized_functions += 1 cache: Dict[Any, Any] = {} @wraps(f) def wrapper(*args): if count_calls: global total_memoized_calls total_memoized_calls += 1 function_calls.add_item(f) key = MemoizationKey(wrapper, args) if key not in cache: global total_cache_misses total_cache_misses += 1 result = f(*args) cache[key] = result return result return cache[key] if inspect.ismethod(f): meth_name = f.__name__ + "_wrapper" setattr(f.__self__, meth_name, wrapper) else: f._wrapper = wrapper return wrapper # In Python, how do we memoize a constructor to ensure that instances of # a class with the same constructor arguments are reference-equal? We could # put the @memoize attribute on the class, but this leads to many problems. # # ASIDE: What problems? And why? # # A class is a function that constructs instances; a decorator is a function # from functions to functions. "@memoize class C: ..." passes the instance- # construction function to the decorator and assigns the result to C; this means # that C is no longer a *type*; it is the *function* returned by the decorator. # This in turn means that "instanceof(c, C)" no longer works because C is not a type. # Similarly C cannot be a base class because it is not a type. And so on. # # END ASIDE # # The correct way to do this in Python is to create a metaclass. A class is a factory # for instances; a metaclass is a factory for classes. We can create a metaclass which # produces classes that are memoized. # # The default metaclass in Python is "type"; if you call "type(name, bases, attrs)" # where name is the name of the new type, bases is a tuple of base types, and attrs # is a dictionary of name-value pairs, then you get back a new class with that name, # base classes, and attributes. We can derive from type to make new metaclasses: class MemoizedClass(type): # __new__ is called when the metaclass creates a new class. # metacls is the "self" of the metaclass # name is the name of the class we're creating # bases is a tuple of base types # attrs is a dictionary of attributes def __new__( metacls, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any] ) -> type: # The memoized values will be stored in a per-class dictionary called # _cache, so make sure that the attributes dictionary has that. if "_cache" not in attrs: attrs["_cache"] = {} # That's the only special thing we need to do, so defer the actual # type creation to the "type" metaclass -- our base type. return super(MemoizedClass, metacls).__new__(metacls, name, bases, attrs) # A class is a function which constructs instances; when that function is # called to construct an instance, the __call__ handler is invoked in the # metaclass. By default type.__call__ simply creates a new instance. We # can replace that behavior by overriding the __call__ handler to do something # else. # # cls is the class that we are trying to create an instance of; *args is # the argument list passed to the constructor. def __call__(cls, *args): # TODO: We do not collect statistics on memoization use here. # TODO: We do not canonicalize arguments as the memoizer does above. if args not in cls._cache: # This is the first time we've constructed this class with these # arguments. Defer to the __call__ behavior of "type", which is the # superclass of this metaclass. new_instance = super(MemoizedClass, cls).__call__(*args) cls._cache[args] = new_instance return new_instance return cls._cache[args] # You then use this as # class Foo(FooBase, metaclass=MemoizedClass): ...
beanmachine-main
src/beanmachine/ppl/utils/memoize.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """A helper class to give unique names to a set of objects.""" from typing import Any, Callable, Dict, Optional def make_namer( namer: Optional[Callable[[Any], str]] = None, prefix: str = "" ) -> Callable[[Any], str]: if namer is None: un = UniqueNames(prefix) return lambda x: un.name(x) else: return namer class UniqueNames(object): _map: Dict[Any, str] _prefix: str def __init__(self, prefix: str = ""): self._map = {} self._prefix = prefix def name(self, o: Any) -> str: if o.__hash__ is None: # This can lead to a situation where two objects are given the # same name; if the object is named, then freed, and then a different # object is allocated at the same address, the ID will be re-used. # Ideally, the instance of UniqueNames should be longer-lived than # any of the named objects. o = "unhashable " + str(id(o)) if o not in self._map: self._map[o] = self._prefix + str(len(self._map)) return self._map[o]
beanmachine-main
src/beanmachine/ppl/utils/unique_name.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """A mutable graph builder""" from hashlib import md5 from typing import Callable, Dict, Generic, List, Optional, TypeVar from beanmachine.ppl.utils.dotbuilder import DotBuilder from beanmachine.ppl.utils.equivalence import partition_by_kernel from beanmachine.ppl.utils.unique_name import make_namer # A plate is a collection of nodes and plates. # A graph is a single plate with a collection of edges. # That is, in this model of a graph, only the topmost level # contains the edges; plates contain no edges. T = TypeVar("T") # Node type class Plate(Generic[T]): # Yes, using lists means that we have O(n) removal. But removals are # rare, the lists are typically short, and lists guarantee that # the enumeration order is deterministic, which means that we get # repeatable behavior for testing. _plates: "List[Plate[T]]" _parent: "Optional[Plate[T]]" _graph: "Graph[T]" _nodes: List[T] def __init__(self, graph: "Graph[T]", parent: "Optional[Plate[T]]") -> None: self._plates = [] self._parent = parent self._graph = graph self._nodes = [] def with_plate(self) -> "Plate[T]": """Add a new Plate to this Plate; returns the new Plate""" sub = Plate(self._graph, self) self._plates.append(sub) return sub def without_plate(self, sub: "Plate[T]") -> "Plate[T]": """Removes a given Plate, and all its Plates, and all its nodes.""" if sub in self._plates: # Recursively destroy every nested Plate. # We're going to be modifying a collection as we enumerate it # so make a copy. for subsub in sub._plates.copy(): sub.without_plate(subsub) # Destroy every node. for node in sub._nodes.copy(): sub.without_node(node) # Delete the Plate self._plates.remove(sub) return self def with_node(self, node: T) -> "Plate[T]": """Adds a new node to the plate, or, if the node is already in the graph, moves it to this plate. Edges are unaffected by moves.""" if node not in self._nodes: # Remove the node from its current Plate. if node in self._graph._nodes: self._graph._nodes[node]._nodes.remove(node) # Let the graph know that this node is in this Plate. self._graph._nodes[node] = self self._nodes.append(node) # If this is a new node, set its incoming and outgoing # edge sets to empty. If it is not a new node, keep # them the same. if node not in self._graph._outgoing: self._graph._outgoing[node] = [] if node not in self._graph._incoming: self._graph._incoming[node] = [] return self def without_node(self, node: T) -> "Plate[T]": if node in self._nodes: # Remove the node del self._graph._nodes[node] self._nodes.remove(node) # Delete all the edges associated with this node for o in list(self._graph._outgoing[node]): self._graph._incoming[o].remove(node) for i in list(self._graph._incoming[node]): self._graph._outgoing[i].remove(node) del self._graph._outgoing[node] del self._graph._incoming[node] return self def with_edge(self, start: T, end: T) -> "Plate[T]": if start not in self._graph._nodes: self.with_node(start) if end not in self._graph._nodes: self.with_node(end) self._graph._incoming[end].append(start) self._graph._outgoing[start].append(end) return self class Graph(Generic[T]): _nodes: Dict[T, Plate[T]] _outgoing: Dict[T, List[T]] _incoming: Dict[T, List[T]] _top: Plate[T] _to_name: Callable[[T], str] _to_label: Callable[[T], str] _to_kernel: Callable[[T], str] def __init__( self, to_name: Optional[Callable[[T], str]] = None, to_label: Callable[[T], str] = str, to_kernel: Callable[[T], str] = str, ): # to_name gives a *unique* name to a node. # to_label gives a *not necessarily unique* label when *displaying* a graph. # to_kernel gives a string that is always the same if two nodes are to # be treated as isomorphic. This lets us make labels in the output that # are different than the isomorphism kernel. self._nodes = {} self._outgoing = {} self._incoming = {} self._top = Plate(self, None) self._to_name = make_namer(to_name, "N") self._to_label = to_label self._to_kernel = to_kernel def with_plate(self) -> "Plate[T]": """Add a plate to the top level; returns the plate""" return self._top.with_plate() def without_plate(self, sub: Plate[T]) -> "Graph[T]": """Removes a plate from the top level, and all its plates, and all its nodes.""" self._top.without_plate(sub) return self def global_without_plate(self, sub: Plate[T]) -> "Graph[T]": """Remove a plate no matter where it is, and all its plates, and all its nodes.""" if sub._graph == self: p = sub._parent if p is not None: # This should never happen p.without_plate(sub) return self def with_node(self, node: T) -> "Graph[T]": """Add a node to the top level""" self._top.with_node(node) return self def without_node(self, node: T) -> "Graph[T]": """Remove a node from the top level""" self._top.without_node(node) return self def global_without_node(self, node: T) -> "Graph[T]": """Remove a node no matter where it is""" if node in self._nodes: self._nodes[node].without_node(node) return self def with_edge(self, start: T, end: T) -> "Graph[T]": if start not in self._nodes: self.with_node(start) if end not in self._nodes: self.with_node(end) if start not in self._incoming[end]: self._incoming[end].append(start) if end not in self._outgoing[start]: self._outgoing[start].append(end) return self def without_edge(self, start: T, end: T) -> "Graph[T]": if start in self._nodes and end in self._nodes: self._incoming[end].remove(start) self._outgoing[start].remove(end) return self def _is_dag(self, node: T) -> bool: if node not in self._nodes: return True in_flight: List[T] = [] done: List[T] = [] def depth_first(current: T) -> bool: if current in in_flight: return False if current in done: return True in_flight.append(current) for child in self._outgoing[current]: if not depth_first(child): return False in_flight.remove(current) done.append(current) return True return depth_first(node) def _dag_hash(self, current: T, map: Dict[T, str]) -> str: if current in map: return map[current] label = self._to_kernel(current) children = (self._dag_hash(c, map) for c in self._outgoing[current]) summary = label + "/".join(sorted(children)) hash = md5(summary.encode("utf-8")).hexdigest() map[current] = hash return hash def are_dags_isomorphic(self, n1: T, n2: T) -> bool: """Determines if two nodes in a graph, which must both be roots of a DAG, are isomorphic. Node labels are given by the function, which must return the same string for two nodes iff the two nodes are value-equal for the purposes of isomorphism detection.""" map: Dict[T, str] = {} assert self._is_dag(n1) assert self._is_dag(n2) h1 = self._dag_hash(n1, map) h2 = self._dag_hash(n2, map) return h1 == h2 def merge_isomorphic(self, n1: T, n2: T) -> bool: """Merges two isomorphic nodes. Returns true if there was any merge made.""" # All edges of n2 become edges of n1, and n2 is deleted. if n1 not in self._nodes or n2 not in self._nodes: return False for in_n2 in self._incoming[n2]: self.with_edge(in_n2, n1) for out_n2 in self._outgoing[n2]: self.with_edge(n1, out_n2) self.without_node(n2) return True def merge_isomorphic_many(self, nodes: List[T]) -> bool: """Merges a collection of two or more isomorphic nodes into nodes[0] Returns true if there was any merge made.""" result = False for i in range(1, len(nodes)): result = self.merge_isomorphic(nodes[0], nodes[i]) or result return result def merge_isomorphic_children(self, node: T) -> bool: """Merges all the isomorphic children of a node. Returns true if there was any merge made. The surviving node is the one with the least name.""" if node not in self._outgoing: return False map: Dict[T, str] = {} def kernel(n: T) -> str: return self._dag_hash(n, map) equivalence_classes = partition_by_kernel(self._outgoing[node], kernel) result = False for eqv in equivalence_classes: result = ( self.merge_isomorphic_many(sorted(eqv, key=self._to_name)) or result ) return result def outgoing(self, node: T) -> List[T]: if node in self._outgoing: return list(self._outgoing[node]) return [] def incoming(self, node: T) -> List[T]: if node in self._incoming: return list(self._incoming[node]) return [] def reachable(self, node: T) -> List[T]: # Given a node in a graph, return the transitive closure of outgoing # nodes, including the original node. if node not in self._nodes: return [] in_flight: List[T] = [] done: List[T] = [] def depth_first(current: T) -> None: if (current not in in_flight) and (current not in done): in_flight.append(current) for child in self._outgoing[current]: depth_first(child) in_flight.remove(current) done.append(current) depth_first(node) return done def to_dot(self) -> str: """Converts a graph to a program in the DOT language.""" db: DotBuilder = DotBuilder() def add_nodes(sub: Plate[T], name: str) -> None: if name != "": db.start_subgraph(name, True) namer = make_namer(prefix=name + "_") for subsub in sub._plates: add_nodes(subsub, namer(subsub)) for n in sub._nodes: db.with_node(self._to_name(n), self._to_label(n)) if name != "": db.end_subgraph() add_nodes(self._top, "") for start, ends in self._outgoing.items(): for end in ends: db.with_edge(self._to_name(start), self._to_name(end)) return str(db)
beanmachine-main
src/beanmachine/ppl/utils/graph.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from beanmachine.ppl.utils.dotbuilder import DotBuilder, print_graph from beanmachine.ppl.utils.equivalence import partition_by_kernel, partition_by_relation from beanmachine.ppl.utils.treeprinter import _is_named_tuple, _to_string, print_tree from beanmachine.ppl.utils.unique_name import make_namer __all__ = [ "print_tree", "_is_named_tuple", "_to_string", "partition_by_relation", "partition_by_kernel", "print_graph", "DotBuilder", "make_namer", ]
beanmachine-main
src/beanmachine/ppl/utils/__init__.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """A builder for the graphviz DOT language""" import json import re from typing import Any, Callable, Dict, List, Optional, Set, Tuple from beanmachine.ppl.utils.treeprinter import _is_named_tuple, _to_string from beanmachine.ppl.utils.unique_name import make_namer def _get_children(n: Any) -> List[Tuple[str, Any]]: if isinstance(n, dict): return list(n.items()) if _is_named_tuple(n): return [(k, getattr(n, k)) for k in type(n)._fields] if isinstance(n, tuple) or isinstance(n, list): return [(str(ind), item) for (ind, item) in enumerate(n)] return [] def print_graph( roots: List[Any], get_children: Callable[[Any], List[Tuple[str, Any]]] = _get_children, to_node_name: Optional[Callable[[Any], str]] = None, to_label: Callable[[Any], str] = _to_string, ) -> str: """ This function converts an object representing a graph into a string in the DOT graph display language. The roots are a set of nodes in the graph; the final graph description will contain the transitive closure of the children of all roots. get_children returns a list of (edge_label, node) pairs; if no argument is supplied then a default function that can handle lists, tuples and dictionaries is used. to_node_name returns a *unique* string used to identify the node in the graph. to_label gives a *not necessarily unique* label for a node in a graph. Again if not supplied, a default that can handle dictionaries, lists and tuples is used. """ tnn = make_namer(to_node_name, "N") builder: DotBuilder = DotBuilder() stack: List[Any] = [] stack.extend(roots) done: Set[str] = set() for root in roots: builder.with_node(tnn(root), to_label(root)) while len(stack) > 0: current = stack.pop() current_node = tnn(current) if current_node not in done: for (edge_label, child) in get_children(current): child_node = tnn(child) builder.with_node(child_node, to_label(child)) builder.with_edge(current_node, child_node, edge_label) stack.append(child) done.add(current_node) return str(builder) class DotBuilder: name: str is_subgraph: bool is_cluster: bool _label: str _node_map: "Dict[str, DotNode]" _edges: "Set[DotEdge]" _comments: List[str] _subgraphs: "List[DotBuilder]" _nodes: "List[DotNode]" _current_subgraph: "Optional[DotBuilder]" def __init__( self, name: str = "graph", is_subgraph: bool = False, is_cluster: bool = False ): self.name = name self.is_subgraph = is_subgraph self.is_cluster = is_cluster self._label = "" self._node_map = {} self._edges = set() self._comments = [] self._subgraphs = [] self._nodes = [] self._current_subgraph = None def with_label(self, label: str) -> "DotBuilder": sg = self._current_subgraph if sg is None: self._label = label else: sg.with_label(label) return self def start_subgraph(self, name: str, is_cluster: bool) -> "DotBuilder": sg = self._current_subgraph if sg is None: csg = DotBuilder(name, True, is_cluster) self._current_subgraph = csg self._subgraphs.append(csg) else: sg.start_subgraph(name, is_cluster) return self def end_subgraph(self) -> "DotBuilder": sg = self._current_subgraph if sg is None: raise ValueError("Cannot end a non-existing subgraph.") elif sg._current_subgraph is None: self._current_subgraph = None else: sg.end_subgraph() return self def _get_node(self, name: str) -> "DotNode": if name in self._node_map: return self._node_map[name] new_node = DotNode(name, "", "") self._node_map[name] = new_node self._nodes.append(new_node) return new_node def with_comment(self, comment: str) -> "DotBuilder": sg = self._current_subgraph if sg is None: self._comments.append(comment) else: sg.with_comment(comment) return self def with_node(self, name: str, label: str, color: str = "") -> "DotBuilder": sg = self._current_subgraph if sg is None: n = self._get_node(name) n.label = label n.color = color else: sg.with_node(name, label, color) return self def with_edge( self, frm: str, to: str, label: str = "", color: str = "", constrained: bool = True, ) -> "DotBuilder": sg = self._current_subgraph if sg is None: f = self._get_node(frm) t = self._get_node(to) self._edges.add(DotEdge(f, t, label, color, constrained)) else: sg.with_edge(frm, to, label, color, constrained) return self def _to_string(self, indent: str, sb: List[str]) -> List[str]: new_indent = indent + " " sb.append(indent) sb.append("subgraph" if self.is_subgraph else "digraph") i = "" has_name = len(self.name) > 0 if has_name and self.is_cluster: i = smart_quote("cluster_" + self.name) elif has_name: i = smart_quote(self.name) elif self.is_cluster: i = "cluster" if len(i) > 0: sb.append(" " + i) sb.append(" {\n") for c in self._comments: sb.append(new_indent + "// " + c + "\n") if len(self._label) > 0: sb.append(new_indent + "label=" + smart_quote(self._label) + "\n") nodes = sorted(new_indent + str(n) + "\n" for n in self._nodes) sb.extend(nodes) edges = sorted(new_indent + str(e) + "\n" for e in self._edges) sb.extend(edges) for db in self._subgraphs: sb = db._to_string(new_indent, sb) sb.append(indent + "}\n") return sb def __str__(self): return "".join(self._to_string("", [])) class DotNode: name: str label: str color: str def __init__(self, name: str, label: str, color: str): self.name = name self.label = label self.color = color def __str__(self) -> str: props: List[str] = [] if len(self.label) != 0 and self.label != self.name: props.append("label=" + smart_quote(self.label)) if len(self.color) != 0: props.append("color=" + smart_quote(self.label)) p = "" if len(props) == 0 else "[" + " ".join(props) + "]" return smart_quote(self.name) + p + ";" class DotEdge: frm: DotNode to: DotNode label: str color: str constrained: bool def __init__( self, frm: DotNode, to: DotNode, label: str, color: str, constrained: bool ): self.frm = frm self.to = to self.label = label self.color = color self.constrained = constrained def __str__(self) -> str: props: List[str] = [] if len(self.label) != 0: props.append("label=" + smart_quote(self.label)) if len(self.color) != 0: props.append("color=" + smart_quote(self.label)) if not self.constrained: props.append("constraint=false") p = "" if len(props) == 0 else "[" + " ".join(props) + "]" return smart_quote(self.frm.name) + " -> " + smart_quote(self.to.name) + p + ";" _keywords: List[str] = ["digraph", "edge", "graph", "node", "strict", "subgraph"] _alphanum = re.compile("^[A-Za-z_][A-Za-z_0-9]*$") _numeric = re.compile("^[-]?(\\.[0-9]+|[0-9]+(\\.[0-9]*)?)$") def smart_quote(s: str) -> str: if s is None or len(s) == 0: return '""' if s.lower() in _keywords: return json.dumps(s) if _alphanum.match(s): return s if _numeric.match(s): return s return json.dumps(s)
beanmachine-main
src/beanmachine/ppl/utils/dotbuilder.py
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re def _first_word(s: str) -> str: r = re.search("\\w+", s) return r.group(0) if r else "" _always_a = {"uniform"} _always_an = {"18"} _vowel_sounds = "aeiouxAEIOUX8" def use_an(s: str) -> bool: w = _first_word(s) if len(w) == 0: return False if any(w.startswith(prefix) for prefix in _always_a): return False if any(w.startswith(prefix) for prefix in _always_an): return True return w[0] in _vowel_sounds def a_or_an(s: str) -> str: return "an " + s if use_an(s) else "a " + s def A_or_An(s: str) -> str: return "An " + s if use_an(s) else "A " + s
beanmachine-main
src/beanmachine/ppl/utils/a_or_an.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Defines print_tree, a helper function to render Python objects as trees.""" from typing import Any, Callable, List def _is_named_tuple_type(t: type) -> bool: if not isinstance(getattr(t, "_fields", None), tuple): return False if len(t.__bases__) == 1 and t.__bases__[0] == tuple: return True return any(_is_named_tuple_type(b) for b in t.__bases__) def _is_named_tuple(x: Any) -> bool: return _is_named_tuple_type(type(x)) def _get_children_key_value(v: Any) -> List[Any]: if isinstance(v, dict): return list(v.items()) if isinstance(v, list): return v if _is_named_tuple(v): return [(k, getattr(v, k)) for k in type(v)._fields] if isinstance(v, tuple): return list(v) return [v] def _get_children(n: Any) -> List[Any]: if isinstance(n, dict): return list(n.items()) if isinstance(n, list): return n if _is_named_tuple(n): return [(k, getattr(n, k)) for k in type(n)._fields] # for key-value pairs we do not want subtypes of tuple, just tuple. if type(n) == tuple and len(n) == 2: return _get_children_key_value(n[1]) if isinstance(n, tuple): return list(n) return [] def _to_string(n: Any) -> str: if isinstance(n, dict): return "dict" if isinstance(n, list): return "list" # for key-value pairs we do not want subtypes of tuple, just tuple. if type(n) == tuple and len(n) == 2: return str(n[0]) if _is_named_tuple(n): return type(n).__name__ if isinstance(n, tuple): return "tuple" return str(n) def print_tree( root: Any, get_children: Callable[[Any], List[Any]] = _get_children, to_string: Callable[[Any], str] = _to_string, unicode: bool = True, ) -> str: """ Renders an arbitrary Python object as a tree. This is handy for debugging. If you have a specific tree structure imposed on an object, you can pass in your own get_children method; if omitted, a function that handles Python dictionaries, tuples, named tuples and lists is the default. The text of each node is determined by the to_string argument; if omitted a default function is used. The tree produced uses the Unicode box-drawing characters by default; to use straight ASCII characters, pass False for the unicode parameter. """ def pt(node, indent): builder.append(to_string(node)) builder.append("\n") children = get_children(node) for i in range(len(children)): last = i == len(children) - 1 child = children[i] builder.append(indent) builder.append(el if last else tee) builder.append(dash) pt(child, indent + (" " if last else bar) + " ") el = "\u2514" if unicode else "+" tee = "\u251c" if unicode else "+" dash = "\u2500" if unicode else "-" bar = "\u2502" if unicode else "|" builder = [] pt(root, "") return "".join(builder)
beanmachine-main
src/beanmachine/ppl/utils/treeprinter.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # This is just a little wrapper class around a dictionary for quickly # and easily counting how many of each item you've got. from typing import Any, Dict class ItemCounter: items: Dict[Any, int] def __init__(self) -> None: self.items = {} def add_item(self, item: Any) -> None: if item not in self.items: self.items[item] = 1 else: self.items[item] = self.items[item] + 1 def remove_item(self, item: Any) -> None: if item not in self.items: return count = self.items[item] - 1 if count == 0: del self.items[item] else: assert count > 0 self.items[item] = count
beanmachine-main
src/beanmachine/ppl/utils/item_counter.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Tuple import torch import torch.autograd from torch._vmap_internals import _vmap as vmap def gradients( outputs: torch.Tensor, inputs: torch.Tensor, allow_unused: bool = True ) -> Tuple[torch.Tensor, torch.Tensor]: """ Compute the first and the second gradient of the output Tensor w.r.t. the input Tensor. :param output: A Tensor variable with a single element. :param input: A 1-d tensor input variable that was used to compute the output. Note: the input must have requires_grad=True :returns: tuple of Tensor variables -- The first and the second gradient. """ if outputs.numel() != 1: raise ValueError( f"output tensor must have exactly one element, got {outputs.numel()}" ) grad1 = torch.autograd.grad( outputs, inputs, create_graph=True, retain_graph=True, allow_unused=allow_unused )[0].reshape(-1) # using identity matrix to reconstruct the full hessian from vector-Jacobian product hessians = vmap( lambda vec: torch.autograd.grad( grad1, inputs, vec, create_graph=True, retain_graph=True, allow_unused=allow_unused, )[0].reshape(-1) )(torch.eye(grad1.size(0))) return grad1.detach(), hessians.detach() def halfspace_gradients( outputs: torch.Tensor, inputs: torch.Tensor, allow_unused: bool = True ) -> Tuple[torch.Tensor, torch.Tensor]: """ Compute the first and the second gradient of the output Tensor w.r.t. the input Tensor for half space. :param output: A Tensor variable with a single element. :param input: A 1-d tensor input variable that was used to compute the output. Note: the input must have requires_grad=True :returns: tuple of Tensor variables -- The first and the second gradient. """ grad1, hessians = gradients(outputs, inputs, allow_unused) return grad1, torch.diagonal(hessians) def simplex_gradients( outputs: torch.Tensor, inputs: torch.Tensor, allow_unused: bool = True ) -> Tuple[torch.Tensor, torch.Tensor]: """ Compute the first and the second gradient of the output Tensor w.r.t. the input Tensor for simplex. :param output: A Tensor variable with a single element. :param input: A 1-d tensor input variable that was used to compute the output. Note: the input must have requires_grad=True :returns: tuple of Tensor variables -- The first and the second gradient. """ grad1, hessians = gradients(outputs, inputs, allow_unused) hessian_diag = torch.diagonal(hessians).clone() # mask diagonal entries hessians[torch.eye(hessians.size(0)).bool()] = float("-inf") hessian_diag -= hessians.max(dim=0)[0] return grad1, hessian_diag
beanmachine-main
src/beanmachine/ppl/utils/tensorops.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Defines partition, a helper function to partition a set into equivalence classes by an equivalence relation.""" from collections import defaultdict from typing import Callable, Iterable, List, Set, TypeVar _T = TypeVar("T") _K = TypeVar("K") def partition_by_relation( items: Iterable[_T], relation: Callable[[_T, _T], bool] ) -> List[Set[_T]]: # This is a quadratic algorithm, but n is likely to be small. result = [] for item in items: eqv = next(filter((lambda s: relation(next(iter(s)), item)), result), None) if eqv is None: eqv = set() result.append(eqv) eqv.add(item) return result def partition_by_kernel( items: Iterable[_T], kernel: Callable[[_T], _K] ) -> List[Set[_T]]: d = defaultdict(set) for item in items: d[kernel(item)].add(item) return list(d.values())
beanmachine-main
src/beanmachine/ppl/utils/equivalence.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections from typing import Set, Tuple from beanmachine.ppl.utils.memoize import tensor_to_tuple from torch import Tensor, tensor # When constructing the support of various nodes we often # must remove duplicates from a set of possible values. # Unfortunately, it is not easy to do so with torch tensors. # This helper class implements a set of tensors using the same # technique as is used in the function call memoizer: we encode # the data in the tensor into a tuple with the same shape. The # tuple implements hashing and equality correctly, so we can put # it in a set. class SetOfTensors(collections.abc.Set): _elements: Set[Tuple] def __init__(self, iterable): self._elements = set() for value in iterable: t = value if isinstance(value, Tensor) else tensor(value) self._elements.add(tensor_to_tuple(t)) def __iter__(self): return (tensor(t) for t in self._elements) def __contains__(self, value): t = value if isinstance(value, Tensor) else tensor(value) return tensor_to_tuple(t) in self._elements def __len__(self): return len(self._elements) def __str__(self): return "\n".join(sorted(str(t) for t in self))
beanmachine-main
src/beanmachine/ppl/utils/set_of_tensors.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Dict, Set class MultiDictionary: """A simple append-only multidictionary; values are deduplicated and must be hashable.""" _d: Dict[Any, Set[Any]] def __init__(self) -> None: self._d = {} def add(self, key: Any, value: Any) -> None: if key not in self._d: self._d[key] = {value} else: self._d[key].add(value) def __getitem__(self, key: Any) -> Set[Any]: return self._d[key] if key in self else set() def __iter__(self): return iter(self._d) def __len__(self): return len(self._d) def __contains__(self, key: Any): return key in self._d def keys(self): return self._d.keys() def items(self): return self._d.items() def __repr__(self) -> str: return ( "{" + "\n".join( str(key) + ":{" + ",\n".join(sorted(str(v) for v in self[key])) + "}" for key in self ) + "}" )
beanmachine-main
src/beanmachine/ppl/utils/multidictionary.py
beanmachine-main
src/beanmachine/ppl/examples/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import beanmachine.ppl as bm import torch.distributions as dist from torch import Tensor class BetaBinomialModel: """This Bean Machine model is an example of conjugacy, where the prior and the likelihood are the Beta and the Binomial distributions respectively. Conjugacy means the posterior will also be in the same family as the prior, Beta. The random variable names theta and x follow the typical presentation of the conjugate prior relation in the form of p(theta|x) = p(x|theta) * p(theta)/p(x). Note: Variable names here follow those used on: https://en.wikipedia.org/wiki/Conjugate_prior """ def __init__(self, alpha: Tensor, beta: Tensor, n: Tensor) -> None: self.alpha_ = alpha self.beta_ = beta self.n_ = n @bm.random_variable def theta(self) -> dist.Distribution: return dist.Beta(self.alpha_, self.beta_) @bm.random_variable def x(self) -> dist.Distribution: return dist.Binomial(self.n_, self.theta())
beanmachine-main
src/beanmachine/ppl/examples/conjugate_models/beta_binomial.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from beanmachine.ppl.examples.conjugate_models.beta_binomial import BetaBinomialModel from beanmachine.ppl.examples.conjugate_models.categorical_dirichlet import ( CategoricalDirichletModel, ) from beanmachine.ppl.examples.conjugate_models.gamma_gamma import GammaGammaModel from beanmachine.ppl.examples.conjugate_models.gamma_normal import GammaNormalModel from beanmachine.ppl.examples.conjugate_models.normal_normal import NormalNormalModel __all__ = [ "BetaBinomialModel", "CategoricalDirichletModel", "GammaGammaModel", "GammaNormalModel", "NormalNormalModel", ]
beanmachine-main
src/beanmachine/ppl/examples/conjugate_models/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import beanmachine.ppl as bm import torch.distributions as dist from torch import Tensor class CategoricalDirichletModel: def __init__(self, alpha: Tensor) -> None: self.alpha_ = alpha @bm.random_variable def dirichlet(self) -> dist.Distribution: return dist.Dirichlet(self.alpha_) @bm.random_variable def categorical(self) -> dist.Distribution: return dist.Categorical(self.dirichlet())
beanmachine-main
src/beanmachine/ppl/examples/conjugate_models/categorical_dirichlet.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import beanmachine.ppl as bm import torch.distributions as dist from torch import Tensor class NormalNormalModel: def __init__(self, mu: Tensor, std: Tensor, sigma: Tensor) -> None: self.mu_ = mu self.std_ = std self.sigma_ = sigma @bm.random_variable def normal_p(self) -> dist.Distribution: return dist.Normal(self.mu_, self.std_) @bm.random_variable def normal(self) -> dist.Distribution: return dist.Normal(self.normal_p(), self.sigma_)
beanmachine-main
src/beanmachine/ppl/examples/conjugate_models/normal_normal.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import beanmachine.ppl as bm import torch.distributions as dist from torch import Tensor class BetaBernoulliModel: def __init__(self, alpha: Tensor, beta: Tensor) -> None: self.alpha_ = alpha self.beta_ = beta @bm.random_variable def theta(self) -> dist.Distribution: return dist.Beta(self.alpha_, self.beta_) @bm.random_variable def y(self, i: int) -> dist.Distribution: return dist.Bernoulli(self.theta())
beanmachine-main
src/beanmachine/ppl/examples/conjugate_models/beta_bernoulli.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import beanmachine.ppl as bm import torch import torch.distributions as dist from torch import Tensor class GammaNormalModel: def __init__(self, shape: Tensor, rate: Tensor, mu: Tensor) -> None: self.shape_ = shape self.rate_ = rate self.mu_ = mu @bm.random_variable def gamma(self) -> dist.Distribution: return dist.Gamma(self.shape_, self.rate_) @bm.random_variable def normal(self) -> dist.Distribution: # pyre-fixme[58]: `/` is not supported for operand types `int` and `Tensor`. return dist.Normal(self.mu_, 1 / torch.sqrt(self.gamma()))
beanmachine-main
src/beanmachine/ppl/examples/conjugate_models/gamma_normal.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import beanmachine.ppl as bm import torch.distributions as dist from torch import Tensor class GammaGammaModel: def __init__(self, shape: Tensor, rate: Tensor, alpha: Tensor) -> None: self.shape_ = shape self.rate_ = rate self.alpha_ = alpha @bm.random_variable def gamma_p(self) -> dist.Distribution: return dist.Gamma(self.shape_, self.rate_) @bm.random_variable def gamma(self) -> dist.Distribution: return dist.Gamma(self.alpha_, self.gamma_p())
beanmachine-main
src/beanmachine/ppl/examples/conjugate_models/gamma_gamma.py
beanmachine-main
src/beanmachine/ppl/testlib/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from abc import ABCMeta, abstractmethod from typing import Dict, List, Optional, Tuple import numpy as np import scipy.stats import torch from beanmachine.ppl.diagnostics.common_statistics import effective_sample_size from beanmachine.ppl.examples.conjugate_models.beta_binomial import BetaBinomialModel from beanmachine.ppl.examples.conjugate_models.categorical_dirichlet import ( CategoricalDirichletModel, ) from beanmachine.ppl.examples.conjugate_models.gamma_gamma import GammaGammaModel from beanmachine.ppl.examples.conjugate_models.gamma_normal import GammaNormalModel from beanmachine.ppl.examples.conjugate_models.normal_normal import NormalNormalModel from beanmachine.ppl.inference import utils from beanmachine.ppl.inference.base_inference import BaseInference from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.testlib.hypothesis_testing import ( mean_equality_hypothesis_confidence_interval, variance_equality_hypothesis_confidence_interval, ) from torch import Tensor, tensor class AbstractConjugateTests(metaclass=ABCMeta): """ Computes the posterior mean and standard deviation of some of the conjugate distributions included below. https://en.wikipedia.org/wiki/Conjugate_prior#Table_of_conjugate_distributions Note: Whenever possible, we will use same variable names as on that page. """ def compute_statistics(self, predictions: Tensor) -> Tuple[Tensor, Tensor]: """ Computes mean and standard deviation of a given tensor of samples. :param predictions: tensor of samples :returns: mean and standard deviation of the tensor of samples. """ return ( torch.mean(predictions, 0), torch.std(predictions, 0, unbiased=True, keepdim=True), ) def compute_beta_binomial_moments( self, ) -> Tuple[Tensor, Tensor, List[RVIdentifier], Dict[RVIdentifier, Tensor]]: """ Computes mean and standard deviation of a small beta binomial model. :return: expected mean, expected standard deviation, conjugate model queries and observations """ alpha = tensor([2.0, 2.0]) beta = tensor([1.0, 1.0]) n = tensor([1.0, 1.0]) obs = tensor([1.0, 0.0]) model = BetaBinomialModel(alpha, beta, n) queries = [model.theta()] observations = {model.x(): obs} alpha_prime = alpha + obs beta_prime = beta - obs + n mean_prime = alpha_prime / (alpha_prime + beta_prime) std_prime = ( (alpha_prime * beta_prime) / ((alpha_prime + beta_prime).pow(2.0) * (alpha_prime + beta_prime + 1.0)) ).pow(0.5) return (mean_prime, std_prime, queries, observations) def compute_gamma_gamma_moments( self, ) -> Tuple[Tensor, Tensor, List[RVIdentifier], Dict[RVIdentifier, Tensor]]: """ Computes mean and standard deviation of a small gamma gamma model. :return: expected mean, expected standard deviation, conjugate model queries and observations """ shape = tensor([2.0, 2.0]) rate = tensor([2.0, 2.0]) alpha = tensor([1.5, 1.5]) obs = tensor([2.0, 4.0]) model = GammaGammaModel(shape, rate, alpha) queries = [model.gamma_p()] observations = {model.gamma(): obs} shape = shape + alpha rate = rate + obs expected_mean = shape / rate expected_std = (expected_mean / rate).pow(0.5) return (expected_mean, expected_std, queries, observations) def compute_gamma_normal_moments( self, ) -> Tuple[Tensor, Tensor, List[RVIdentifier], Dict[RVIdentifier, Tensor]]: """ Computes mean and standard deviation of a small gamma normal model. :return: expected mean, expected standard deviation, conjugate model queries and observations """ shape = tensor([1.0, 1.0]) rate = tensor([2.0, 2.0]) mu = tensor([1.0, 2.0]) obs = tensor([1.5, 2.5]) model = GammaNormalModel(shape, rate, mu) queries = [model.gamma()] observations = {model.normal(): obs} shape = shape + tensor([0.5, 0.5]) deviations = (obs - mu).pow(2.0) rate = rate + (deviations * (0.5)) expected_mean = shape / rate expected_std = (expected_mean / rate).pow(0.5) return (expected_mean, expected_std, queries, observations) def compute_normal_normal_moments( self, ) -> Tuple[Tensor, Tensor, List[RVIdentifier], Dict[RVIdentifier, Tensor]]: """ Computes mean and standard deviation of a small normal normal model. :return: expected mean, expected standard deviation, conjugate model queries and observations """ mu = tensor([1.0, 1.0]) std = tensor([1.0, 1.0]) sigma = tensor([1.0, 1.0]) obs = tensor([1.5, 2.5]) model = NormalNormalModel(mu, std, sigma) queries = [model.normal_p()] observations = {model.normal(): obs} expected_mean = (mu / std.pow(2.0) + obs / sigma.pow(2.0)) / ( # pyre-fixme[58]: `/` is not supported for operand types `float` and # `Tensor`. 1.0 / sigma.pow(2.0) # pyre-fixme[58]: `/` is not supported for operand types `float` and # `Tensor`. + 1.0 / std.pow(2.0) ) expected_std = (std.pow(-2.0) + sigma.pow(-2.0)).pow(-0.5) return (expected_mean, expected_std, queries, observations) def compute_dirichlet_categorical_moments(self): """ Computes mean and standard deviation of a small dirichlet categorical model. :return: expected mean, expected standard deviation, conjugate model queries and observations """ alpha = tensor([0.5, 0.5]) model = CategoricalDirichletModel(alpha) obs = tensor([1.0]) queries = [model.dirichlet()] observations = {model.categorical(): obs} alpha = alpha + tensor([0.0, 1.0]) expected_mean = alpha / alpha.sum() expected_std = (expected_mean * (1 - expected_mean) / (alpha.sum() + 1)).pow( 0.5 ) return (expected_mean, expected_std, queries, observations) def _compare_run( self, moments: Tuple[Tensor, Tensor, List[RVIdentifier], Dict[RVIdentifier, Tensor]], mh: BaseInference, num_chains: int, num_samples: int, random_seed: Optional[int], num_adaptive_samples: int = 0, alpha: float = 0.01, ): # Helper functions for hypothesis tests def chi2(alpha, df): return scipy.stats.chi2.ppf(alpha, df) def z(alpha): return scipy.stats.norm.ppf(alpha) expected_mean, expected_std, queries, observations = moments if random_seed is None: random_seed = 123 utils.seed(random_seed) predictions = mh.infer( queries, observations, num_samples, num_chains=num_chains, num_adaptive_samples=num_adaptive_samples, ) for i in range(predictions.num_chains): sample = predictions.get_chain(i)[queries[0]] mean, std = self.compute_statistics(sample) total_samples = tensor(sample.size())[0].item() n_eff = effective_sample_size(sample.unsqueeze(dim=0)) # For out purposes, it seems more appropriate to use n_eff ONLY # to discount sample size. In particular, we should not allow # n_eff > total_samples n_eff = torch.min(n_eff, tensor(total_samples)) # Hypothesis Testing # First, let's start by making sure that we can assume normalcy of means # pyre-fixme[16]: `AbstractConjugateTests` has no attribute # `assertGreaterEqual`. self.assertGreaterEqual( total_samples, 30, msg="Sample size too small for normalcy assumption" ) self.assertGreaterEqual( torch.min(n_eff).item(), 30, msg="Effective sample size too small for normalcy assumption", ) # Second, let us check the means using confidence intervals: lower_bound, upper_bound = mean_equality_hypothesis_confidence_interval( expected_mean, expected_std, n_eff, # pyre-fixme[6]: For 4th param expected `int` but got `float`. alpha, ) below_upper = torch.min(lower_bound <= mean).item() above_lower = torch.min(mean <= upper_bound).item() accept_interval = below_upper and above_lower message = "abs(mean - expected_mean) * sqr(n_eff) / expected_std = " + str( torch.abs(mean - expected_mean) / (expected_std / np.sqrt(n_eff)) ) message = ( " alpha = " + str(alpha) + " z_alpha/2 = " + str(z(1 - alpha / 2)) + " => " + message ) message = ( str(lower_bound) + " <= " + str(mean) + " <= " + str(upper_bound) + ". " + message ) message = ( "Mean outside confidence interval.\n" + "n_eff = " + str(n_eff) + ".\nExpected: " + message ) # pyre-fixme[16]: `AbstractConjugateTests` has no attribute # `assertTrue`. self.assertTrue(accept_interval, msg=message) # Third, let us check the variance using confidence intervals: lower_bound, upper_bound = variance_equality_hypothesis_confidence_interval( expected_std, n_eff - 1, # pyre-fixme[6]: For 3rd param expected `int` but got `float`. alpha, ) below_upper = torch.min(lower_bound <= std).item() above_lower = torch.min(std <= upper_bound).item() accept_interval = below_upper and above_lower message = "(n_eff - 1) * (std/ expected_std) ** 2 = " + str( (n_eff - 1) # pyre-fixme[58]: `**` is not supported for operand types `Tensor` # and `int`. * (std / expected_std) ** 2 ) message = ( " alpha = " + str(alpha) + " chi2_alpha/2 = " + str(chi2(alpha / 2, n_eff - 1)) + " <= " + message + " <= " + " chi2_(1-alpha/2) = " + str(chi2(1 - alpha / 2, n_eff - 1)) ) message = ( str(lower_bound) + " <= " + str(std) + " <= " + str(upper_bound) + ". " + message ) message = ( "Standard deviation outside confidence interval.\n" + "n_eff = " + str(n_eff) + ".\nExpected: " + message ) self.assertTrue(accept_interval, msg=message) continue def beta_binomial_conjugate_run( self, mh: BaseInference, num_chains: int = 1, num_samples: int = 1000, random_seed: Optional[int] = 17, num_adaptive_samples: int = 0, ): """ Tests the inference run for a small beta binomial model. :param mh: inference algorithm :param num_samples: number of samples :param num_chains: number of chains :param random_seed: seed for pytorch random number generator """ moments = self.compute_beta_binomial_moments() self._compare_run( moments, mh, num_chains, num_samples, random_seed, num_adaptive_samples, ) def gamma_gamma_conjugate_run( self, mh: BaseInference, num_chains: int = 1, num_samples: int = 1000, random_seed: Optional[int] = 17, num_adaptive_samples: int = 0, ): """ Tests the inference run for a small gamma gamma model. :param mh: inference algorithm :param num_samples: number of samples :param num_chains: number of chains :param random_seed: seed for pytorch random number generator """ moments = self.compute_gamma_gamma_moments() self._compare_run( moments, mh, num_chains, num_samples, random_seed, num_adaptive_samples, ) def gamma_normal_conjugate_run( self, mh: BaseInference, num_chains: int = 1, num_samples: int = 1000, random_seed: Optional[int] = 17, num_adaptive_samples: int = 0, ): """ Tests the inference run for a small gamma normal model. :param mh: inference algorithm :param num_samples: number of samples :param num_chains: number of chains :param random_seed: seed for pytorch random number generator """ moments = self.compute_gamma_normal_moments() self._compare_run( moments, mh, num_chains, num_samples, random_seed, num_adaptive_samples, ) def normal_normal_conjugate_run( self, mh: BaseInference, num_chains: int = 1, num_samples: int = 1000, random_seed: Optional[int] = 17, num_adaptive_samples: int = 0, ): """ Tests the inference run for a small normal normal model. :param mh: inference algorithm :param num_samples: number of samples :param num_chains: number of chains :param random_seed: seed for pytorch random number generator """ moments = self.compute_normal_normal_moments() self._compare_run( moments, mh, num_chains, num_samples, random_seed, num_adaptive_samples, ) def dirichlet_categorical_conjugate_run( self, mh: BaseInference, num_chains: int = 1, num_samples: int = 1000, random_seed: Optional[int] = 17, num_adaptive_samples: int = 0, ): """ Tests the inference run for a small dirichlet categorical model. :param mh: inference algorithm :param num_samples: number of samples :param num_chains: number of chains :param random_seed: seed for pytorch random number generator """ moments = self.compute_dirichlet_categorical_moments() self._compare_run( moments, mh, num_chains, num_samples, random_seed, num_adaptive_samples, ) @abstractmethod def test_beta_binomial_conjugate_run(self): """ To be implemented for all classes extending AbstractConjugateTests. """ raise NotImplementedError( "Conjugate test must implement test_beta_binomial_conjugate_run." ) @abstractmethod def test_gamma_gamma_conjugate_run(self): """ To be implemented for all classes extending AbstractConjugateTests. """ raise NotImplementedError( "Conjugate test must implement test_gamma_gamma_conjugate_run." ) @abstractmethod def test_gamma_normal_conjugate_run(self): """ To be implemented for all classes extending AbstractConjugateTests. """ raise NotImplementedError( "Conjugate test must implement test_gamma_normal_conjugate_run." ) @abstractmethod def test_normal_normal_conjugate_run(self): """ To be implemented for all classes extending AbstractConjugateTests. """ raise NotImplementedError( "Conjugate test must implement test_normal_normal_conjugate_run." ) @abstractmethod def test_dirichlet_categorical_conjugate_run(self): """ To be implemented for all classes extending AbstractConjugateTests. """ raise NotImplementedError( "Conjugate test must implement test_categorical_dirichlet_conjugate_run." )
beanmachine-main
src/beanmachine/ppl/testlib/abstract_conjugate.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import scipy.stats as stats import torch # This module defines hypothesis tests for equal means and equal variance # Helper functions: # Inverse of CDF of normal distribution at given probability inverse_normal_cdf = stats.norm.ppf # Inverse of CDF of chi-squared distribution at given probability def inverse_chi2_cdf(df, p): return stats.chi2(df).ppf(p) # Hypothesis test for equality of sample mean to a true mean def mean_equality_hypothesis_test( sample_mean: torch.Tensor, true_mean: torch.Tensor, true_std: torch.Tensor, sample_size: torch.Tensor, p_value: int, ): """Test for the null hypothesis that the mean of a Gaussian distribution is within the central 1 - alpha confidence interval (CI) for a sample of size sample_size. We also apply an adjustment that takes into account that we do the test pointwise independently for each element of the tensor. This is basically the Dunn-Šidák correction, https://en.wikipedia.org/wiki/%C5%A0id%C3%A1k_correction""" if torch.min(sample_size) <= 0: return False dimensions = torch.numel(true_mean) # treat scalar and 1-D tensors the same # early exit for empty tensor if dimensions == 0: return False if torch.max(true_std <= 0): return False adjusted_p_value = 1 - (1 - p_value) ** (1.0 / dimensions) test_result = torch.max( torch.abs(sample_mean - true_mean) * np.sqrt(sample_size) / true_std ) <= inverse_normal_cdf(1 - adjusted_p_value / 2) return test_result # The following function explicitly constructs a confidence interval. # This provides an alternative way for performing the hypothesis test, # but which also makes reporting test failures easier. def mean_equality_hypothesis_confidence_interval( true_mean: torch.Tensor, true_std: torch.Tensor, sample_size: torch.Tensor, p_value: int, ): """Computes the central 1 - p_value confidence interval in which the sample mean can fall without causing us to reject the null hypothesis that the mean of a Gaussian distribution for a sample of size sample_size. We also apply an adjustment that takes into account that we do the test pointwise independently for each element of the tensor. This is basically the Dunn-Šidák correction, https://en.wikipedia.org/wiki/%C5%A0id%C3%A1k_correction""" # TODO: Consider refactoring the common input checks for both methods if torch.min(sample_size) <= 0: return None dimensions = torch.numel(true_mean) # treat scalar and 1-D tensors the same # early exit for empty tensor if dimensions == 0: return None if torch.max(true_std == 0): return None adjusted_p_value = 1 - (1 - p_value) ** (1.0 / dimensions) bound_std = true_std / np.sqrt(sample_size) z_score = inverse_normal_cdf(1 - adjusted_p_value / 2) # TODO: We use z_{1-alpha} instead of -z_alpha for compatibility # with mean_equality_hypothesis_test. Ideally, both should be # changed to use the unmodified bounds. In any case, the two # functions should be matched for consistency lower_bound = true_mean - bound_std * z_score upper_bound = true_mean + bound_std * z_score return lower_bound, upper_bound # Hypothesis test for equality of sample variance to a true variance def variance_equality_hypothesis_test( sample_std: torch.Tensor, true_std: torch.Tensor, degrees_of_freedom: torch.Tensor, alpha: int, ): """Test for the null hypothesis that the variance of a Gaussian distribution is within the central 1 - alpha confidence interval (CI) for a sample of effective sample size (ESS) degrees_of_freedom. We also apply an adjustment that takes into account that we do the test pointwise independently for each element of the tensor. This is basically the Dunn-Šidák correction, https://en.wikipedia.org/wiki/%C5%A0id%C3%A1k_correction""" if torch.min(degrees_of_freedom).item() <= 0: return False dimensions = torch.prod(torch.tensor(torch.Tensor.size(true_std))).item() if dimensions == 0: return False if torch.max(true_std <= 0).item(): return False adjusted_alpha = 1 - (1 - alpha) ** (1.0 / dimensions) # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. test_statistic = degrees_of_freedom * (sample_std / true_std) ** 2 lower_bound = inverse_chi2_cdf(degrees_of_freedom, adjusted_alpha / 2) upper_bound = inverse_chi2_cdf(degrees_of_freedom, 1 - adjusted_alpha / 2) lower_bound_result = lower_bound <= torch.min(test_statistic).item() upper_bound_result = torch.max(test_statistic).item() <= upper_bound test_result = lower_bound_result and upper_bound_result return test_result # The following function explicitly constructs a confidence interval. # This provides an alternative way for performing the hypothesis test, # but which also makes reporting test failures easier. def variance_equality_hypothesis_confidence_interval( true_std: torch.Tensor, degrees_of_freedom: torch.Tensor, alpha: int ): """Computes the central 1 - alpha confidence interval in which the sample variance can fall without causing us to reject the null hypothesis that the variance of a Gaussian distribution for a sample of size sample_size. We also apply an adjustment that takes into account that we do the test pointwise independently for each element of the tensor. This is basically the Dunn-Šidák correction, https://en.wikipedia.org/wiki/%C5%A0id%C3%A1k_correction""" if torch.min(degrees_of_freedom).item() <= 0: return None dimensions = torch.prod(torch.tensor(torch.Tensor.size(true_std))).item() if dimensions == 0: return None if torch.max(true_std == 0).item(): return None adjusted_alpha = 1 - (1 - alpha) ** (1.0 / dimensions) lower_bound = ( inverse_chi2_cdf(degrees_of_freedom, adjusted_alpha / 2) / degrees_of_freedom ) ** 0.5 * true_std upper_bound = ( inverse_chi2_cdf(degrees_of_freedom, 1 - adjusted_alpha / 2) / degrees_of_freedom ) ** 0.5 * true_std return lower_bound, upper_bound
beanmachine-main
src/beanmachine/ppl/testlib/hypothesis_testing.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from functools import wraps from typing import Callable, Union import torch import torch.distributions as dist from beanmachine.ppl.inference.vi.variational_world import VariationalWorld from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.world import get_world_context from typing_extensions import ParamSpec P = ParamSpec("P") class StatisticalModel: """ Parent class to all statistical models implemented in Bean Machine. Every random variable in the model needs to be defined with function declaration wrapped the ``bm.random_variable`` . Every deterministic functional that a user would like to query during inference should be wrapped in a ``bm.functional`` . Every parameter of the guide distribution that is to be learned via variational inference should be wrapped in a ``bm.param`` . """ @staticmethod def get_func_key(wrapper, arguments) -> RVIdentifier: """ Creates a key to uniquely identify the Random Variable. Args: wrapper: reference to the wrapper function arguments: function arguments Returns: Tuple of function and arguments which is to be used to identify a particular function call. """ return RVIdentifier(wrapper=wrapper, arguments=arguments) @staticmethod def random_variable( f: Callable[P, dist.Distribution] ) -> Callable[P, Union[RVIdentifier, torch.Tensor]]: """ Decorator to be used for every stochastic random variable defined in all statistical models. E.g.:: @bm.random_variable def foo(): return Normal(0., 1.) def foo(): return Normal(0., 1.) foo = bm.random_variable(foo) """ @wraps(f) def wrapper( *args: P.args, **kwargs: P.kwargs ) -> Union[RVIdentifier, torch.Tensor]: func_key = StatisticalModel.get_func_key(wrapper, args, **kwargs) world = get_world_context() if world is None: return func_key else: return world.update_graph(func_key) wrapper.is_functional = False wrapper.is_random_variable = True return wrapper @staticmethod def functional( f: Callable[P, torch.Tensor] ) -> Callable[P, Union[RVIdentifier, torch.Tensor]]: """ Decorator to be used for every query defined in statistical model, which are functions of ``bm.random_variable`` :: @bm.random_variable def foo(): return Normal(0., 1.) @bm.functional(): def bar(): return foo() * 2.0 """ @wraps(f) def wrapper( *args: P.args, **kwargs: P.kwargs ) -> Union[RVIdentifier, torch.Tensor]: world = get_world_context() if world is None: return StatisticalModel.get_func_key(wrapper, args, **kwargs) else: return f(*args, **kwargs) wrapper.is_functional = True wrapper.is_random_variable = False return wrapper @staticmethod def param(init_fn): """ Decorator to be used for params (variable to be optimized with VI).:: @bm.param def mu(): return torch.zeros(2) @bm.random_variable def foo(): return Normal(mu(), 1.) """ @wraps(init_fn) def wrapper(*args): func_key = StatisticalModel.get_func_key(wrapper, args) world = get_world_context() if world is None: return func_key else: assert isinstance( world, VariationalWorld ), "encountered params outside of VariationalWorld, this should never happen." return world.get_param(func_key) return wrapper random_variable = StatisticalModel.random_variable functional = StatisticalModel.functional param = StatisticalModel.param
beanmachine-main
src/beanmachine/ppl/model/statistical_model.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.model.statistical_model import ( functional, param, random_variable, StatisticalModel, ) from beanmachine.ppl.model.utils import get_beanmachine_logger __all__ = [ "Mode", "RVIdentifier", "StatisticalModel", "functional", "param", "query", "random_variable", "sample", "get_beanmachine_logger", ]
beanmachine-main
src/beanmachine/ppl/model/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import warnings from dataclasses import dataclass from typing import Any, Callable, Tuple import torch @dataclass(eq=True, frozen=True) class RVIdentifier: """ Struct representing the unique key corresponding to a BM random variable. """ wrapper: Callable arguments: Tuple def __post_init__(self): for arg in self.arguments: if torch.is_tensor(arg): warnings.warn( "PyTorch tensors are hashed by memory address instead of value. " "Therefore, it is not recommended to use tensors as indices of random variables.", stacklevel=3, ) def __str__(self): return str(self.function.__name__) + str(self.arguments) def __lt__(self, other: Any) -> bool: # define comparison so that functorch doesn't raise when it tries to # sort dictionary keys (https://fburl.com/0gomiv80). This can be # removed with the v0.2.1+ release of functorch. if isinstance(other, RVIdentifier): return str(self) < str(other) return NotImplemented @property def function(self): return self.wrapper.__wrapped__ @property def is_functional(self): w = self.wrapper assert hasattr(w, "is_functional") return w.is_functional @property def is_random_variable(self): w = self.wrapper assert hasattr(w, "is_random_variable") return w.is_random_variable
beanmachine-main
src/beanmachine/ppl/model/rv_identifier.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from enum import Enum import torch class LogLevel(Enum): """ Enum class mapping the logging levels to numeric values. """ ERROR = 40 WARNING = 30 INFO = 20 DEBUG_UPDATES = 16 DEBUG_PROPOSER = 14 DEBUG_GRAPH = 12 def get_beanmachine_logger( console_level: LogLevel = LogLevel.WARNING, file_level: LogLevel = LogLevel.INFO ) -> logging.Logger: console_handler = logging.StreamHandler() console_handler.setLevel(console_level.value) file_handler = logging.FileHandler("beanmachine.log") file_handler.setLevel(file_level.value) logger = logging.getLogger("beanmachine") logger.setLevel( file_level.value if file_level.value < console_level.value else console_level.value ) logger.handlers.clear() logger.addHandler(console_handler) logger.addHandler(file_handler) return logger float_types = (torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor)
beanmachine-main
src/beanmachine/ppl/model/utils.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import warnings from typing import Optional, Tuple import numpy as np import torch import torch.fft from torch import Tensor """ Common statistic functions, they all get a Tensor as input and return a Tensor as output """ def mean(query_samples: Tensor) -> Tensor: return query_samples.mean(dim=[0, 1]) def std(query_samples: Tensor) -> Tensor: return torch.std(query_samples, dim=[0, 1]) def confidence_interval(query_samples: Tensor) -> Tensor: percentile_list = [2.5, 50, 97.5] query_dim = query_samples.shape[2:] query_samples = query_samples.reshape(-1, *query_dim) return torch.tensor( np.percentile(query_samples.detach().numpy(), percentile_list, axis=0) ) def _compute_var(query_samples: Tensor) -> Tuple[Tensor, Tensor]: n_chains, n_samples = query_samples.shape[:2] if query_samples.dtype not in [torch.float32, torch.float64]: """TODO have separate diagnostics for discrete variables. This would require passing supprt-type information to Diagnostics. """ query_samples = query_samples.float() if n_chains > 1: per_chain_avg = query_samples.mean(1) b = n_samples * torch.var(per_chain_avg, dim=0) else: b = 0 w = torch.mean(torch.var(query_samples, dim=1), dim=0) # pyre-fixme[58]: `*` is not supported for operand types `float` and `Union[int, # torch._tensor.Tensor]`. var_hat = (n_samples - 1) / n_samples * w + (1 / n_samples) * b return w, var_hat.clamp(min=1e-10) def r_hat(query_samples: Tensor) -> Optional[Tensor]: n_chains = query_samples.shape[0] if n_chains < 2: return None w, var_hat = _compute_var(query_samples) return torch.sqrt(var_hat / w) def split_r_hat(query_samples: Tensor) -> Optional[Tensor]: n_chains, n_samples = query_samples.shape[:2] if n_chains < 2: return None n_chains = n_chains * 2 n_samples = n_samples // 2 query_samples = torch.cat(torch.split(query_samples, n_samples, dim=1)[0:2]) w, var_hat = _compute_var(query_samples) return torch.sqrt(var_hat / w) def effective_sample_size(query_samples: Tensor) -> Tensor: n_chains, n_samples, *query_dim = query_samples.shape if query_samples.dtype not in [torch.float32, torch.float64]: """TODO have separate diagnostics for discrete variables. This would require passing supprt-type information to Diagnostics. """ query_samples = query_samples.float() samples = query_samples - query_samples.mean(dim=1, keepdim=True) samples = samples.transpose(1, -1) # computes fourier transform (with padding) padding = torch.zeros(samples.shape, dtype=samples.dtype) padded_samples = torch.cat((samples, padding), dim=-1) fvi = torch.view_as_real(torch.fft.fft(padded_samples)) # multiply by complex conjugate acf = fvi.pow(2).sum(-1, keepdim=True) # transform back to reals (with padding) padding = torch.zeros(acf.shape, dtype=acf.dtype) padded_acf = torch.cat((acf, padding), dim=-1) rho_per_chain = torch.fft.ifft(torch.view_as_complex(padded_acf)).real rho_per_chain = rho_per_chain.narrow(-1, 0, n_samples) num_per_lag = torch.tensor(range(n_samples, 0, -1), dtype=samples.dtype) rho_per_chain = torch.div(rho_per_chain, num_per_lag) rho_per_chain = rho_per_chain.transpose(1, -1) rho_avg = rho_per_chain.mean(dim=0) w, var_hat = _compute_var(query_samples) if n_chains > 1: rho = 1 - ((w - rho_avg) / var_hat) else: rho = rho_avg / var_hat rho[0] = 1 # reshape to 2d matrix where each row contains all samples for specific dim rho_2d = torch.stack(torch.unbind(rho, dim=0), dim=-1).reshape(-1, n_samples) rho_sum = torch.zeros(rho_2d.shape[0]) for i, chain in enumerate(torch.unbind(rho_2d, dim=0)): total_sum = torch.tensor(0.0, dtype=samples.dtype) for t in range(n_samples // 2): rho_even = chain[2 * t] rho_odd = chain[2 * t + 1] if rho_even + rho_odd < 0: break else: total_sum += rho_even + rho_odd rho_sum[i] = total_sum rho_sum = torch.reshape(rho_sum, query_dim) tau = -1 + 2 * rho_sum n_eff = torch.div(n_chains * n_samples, tau) if n_eff.isnan().any(): warnings.warn("NaN encountered in computing effective sample size.") return torch.tensor(0.0) return n_eff
beanmachine-main
src/beanmachine/ppl/diagnostics/common_statistics.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import functools import math import warnings from typing import Callable, Dict, List, Optional, Tuple import numpy as np import pandas as pd import plotly from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples from beanmachine.ppl.model.rv_identifier import RVIdentifier from plotly.subplots import make_subplots from torch import Tensor from . import common_plots, common_statistics as common_stats class BaseDiagnostics: def __init__(self, samples: MonteCarloSamples): self.samples = samples self.statistics_dict = {} self.plots_dict = {} def _prepare_query_list( self, query_list: Optional[List[RVIdentifier]] = None ) -> List[RVIdentifier]: if query_list is None: return list(self.samples.keys()) for query in query_list: if not (query in self.samples): raise ValueError(f"query {self._stringify_query(query)} does not exist") return query_list def summaryfn(self, func: Callable, display_names: List[str]) -> Callable: """ this function keeps a directory of all summary-related functions, so it could handle the overridden functions and new ones that user defines :param func: method which is going to be executed when summary() is called. :param display_name: the name appears in the summary() output dataframe :returns: user-visible function that can be called over a list of queries """ statistics_name = func.__name__ self.statistics_dict[statistics_name] = (func, display_names) return self._standalone_summary_stat_function(statistics_name, func) def _prepare_summary_stat_input( self, query: RVIdentifier, chain: Optional[int] = None ): query_samples = self.samples[query] if query_samples.shape[0] != 1: # squeeze out non-chain singleton dims query_samples = query_samples.squeeze() if chain is not None: query_samples = query_samples[chain].unsqueeze(0) return query_samples def _create_table( self, query: RVIdentifier, results: List[Tensor], func_list: List[str] ) -> pd.DataFrame: """ this function turns output of each summary stat function to a dataframe """ out_pd = pd.DataFrame() if len(results) > 0: single_result_set = results[0] if single_result_set is not None and len(single_result_set) > 0: for flattened_index in range(single_result_set[0].numel()): index = np.unravel_index( flattened_index, tuple(single_result_set[0].size()) ) row_data = [] rowname = f"{self._stringify_query(query)}{list(index)}" for result in results: num_of_sets = result.size()[0] for set_num in range(num_of_sets): row_data.append(result[set_num][index].item()) cur = pd.DataFrame([row_data], columns=func_list, index=[rowname]) if out_pd.empty: out_pd = cur else: out_pd = pd.concat([out_pd, cur]) return out_pd def _stringify_query(self, query: RVIdentifier) -> str: return f"{query.function.__name__}{query.arguments}" def _execute_summary_stat_funcs( self, query: RVIdentifier, func_dict: Dict[str, Tuple[Callable, str]], chain: Optional[int] = None, raise_warning: bool = False, ): frames = pd.DataFrame() query_results = [] func_list = [] queried_samples = self._prepare_summary_stat_input(query, chain) for _k, (func, display_names) in func_dict.items(): result = func(queried_samples) if result is None: # in the case of r hat and other algorithms, they may return None # if the samples do not have enough chains or have the wrong shape if raise_warning: warnings.warn( f"{display_names} cannot be calculated for the provided samples" ) continue # the first dimension is equivalant to the size of the display_names if len(display_names) <= 1: result = result.unsqueeze(0) query_results.append(result) func_list.extend(display_names) out_df = self._create_table(query, query_results, func_list) if frames.empty: frames = out_df else: frames = pd.concat([frames, out_df]) return frames def summary( self, query_list: Optional[List[RVIdentifier]] = None, chain: Optional[int] = None, ) -> pd.DataFrame: """ this function outputs a table summarizing results of registered functions in self.statistics_dict for requested queries in query_list, if chain is None, results correspond to the aggreagated chains """ frames = pd.DataFrame() query_list = self._prepare_query_list(query_list) for query in query_list: out_df = self._execute_summary_stat_funcs( query, self.statistics_dict, chain ) frames = pd.concat([frames, out_df]) frames.sort_index(inplace=True) return frames def _prepare_plots_input( self, query: RVIdentifier, chain: Optional[int] = None ) -> Tensor: """ :param query: the query for which registered plot functions are called :param chain: the chain that query samples are extracted from :returns: tensor of query samples """ query_samples = self.samples[query] if chain is not None: return query_samples[chain].unsqueeze(0) return query_samples def plotfn(self, func: Callable, display_name: str) -> Callable: """ this function keeps a directory of all plot-related functions so it could handle the overridden functions and new ones that user defines :param func: method which is going to be executed when plot() is called. :param display_name: appears as part of the plot title for func :returns: user-visible function that can be called over a list of queries """ self.plots_dict[func.__name__] = (func, display_name) return self._standalone_plot_function(func.__name__, func) def _execute_plot_funcs( self, query: RVIdentifier, func_dict: Dict[str, Tuple[Callable, str]], chain: Optional[int] = None, display: Optional[bool] = False, ): # task T57168727 to add type figs = [] queried_samples = self._prepare_plots_input(query, chain) for _k, (func, display_name) in func_dict.items(): trace, labels = common_plots.plot_helper(queried_samples, func) title = f"{self._stringify_query(query)} {display_name}" fig = self._display_results( trace, [title + label for label in labels], # pyre-fixme[6]: Expected `bool` for 3rd param but got `Optional[bool]`. display, ) figs.append(fig) return figs def plot( self, query_list: Optional[List[RVIdentifier]] = None, display: Optional[bool] = False, chain: Optional[int] = None, ): # task T57168727 to add type """ this function outputs plots related to registered functions in self.plots_dict for requested queries in query_list :param query_list: list of queries for which plot functions will be called :param chain: the chain that query samples are extracted from :returns: plotly object holding the results from registered plot functions """ figs = [] query_list = self._prepare_query_list(query_list) for query in query_list: fig = self._execute_plot_funcs(query, self.plots_dict, chain, display) figs.extend(fig) return figs def _display_results( self, traces, labels: List[str], display: bool ): # task T57168727 to add type """ :param traces: a list of plotly objects :param labels: plot labels :returns: a plotly subplot object """ fig = make_subplots( rows=math.ceil(len(traces) / 2), cols=2, subplot_titles=tuple(labels) ) r = 1 for trace in traces: for data in trace: fig.add_trace(data, row=math.ceil(r / 2), col=((r - 1) % 2) + 1) r += 1 if display: plotly.offline.iplot(fig) return fig def _standalone_plot_function(self, func_name: str, func: Callable) -> Callable: """ this function makes each registered plot function directly callable by the user """ @functools.wraps(func) def _wrapper( query_list: List[RVIdentifier], chain: Optional[int] = None, display: Optional[bool] = False, ): figs = [] query_list = self._prepare_query_list(query_list) for query in query_list: fig = self._execute_plot_funcs( query, {func_name: self.plots_dict[func_name]}, chain, display ) figs.extend(fig) return figs return _wrapper def _standalone_summary_stat_function( self, func_name: str, func: Callable ) -> Callable: """ this function makes each registered summary-stat related function directly callable by the user """ @functools.wraps(func) def _wrapper(query_list: List[RVIdentifier], chain: Optional[int] = None): frames = pd.DataFrame() query_list = self._prepare_query_list(query_list) for query in query_list: out_df = self._execute_summary_stat_funcs( query, {func_name: self.statistics_dict[func_name]}, chain, True ) frames = pd.concat([frames, out_df]) return frames return _wrapper class Diagnostics(BaseDiagnostics): def __init__(self, samples: MonteCarloSamples): super().__init__(samples) """ every function related to summary stat should be registered in the constructor """ self.mean = self.summaryfn(common_stats.mean, display_names=["avg"]) self.std = self.summaryfn(common_stats.std, display_names=["std"]) self.confidence_interval = self.summaryfn( common_stats.confidence_interval, display_names=["2.5%", "50%", "97.5%"] ) self.split_r_hat = self.summaryfn( common_stats.split_r_hat, display_names=["r_hat"] ) self.effective_sample_size = self.summaryfn( common_stats.effective_sample_size, display_names=["n_eff"] ) self.trace = self.plotfn(common_plots.trace_plot, display_name="trace") self.autocorr = self.plotfn(common_plots.autocorr, display_name="autocorr")
beanmachine-main
src/beanmachine/ppl/diagnostics/diagnostics.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Tools for supporting MCMC qiagnostics. Will be deprecated in a future release in favor of arviz integration. """ from beanmachine.ppl.diagnostics.diagnostics import Diagnostics __all__ = ["Diagnostics"]
beanmachine-main
src/beanmachine/ppl/diagnostics/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, List, NamedTuple, Tuple import numpy as np import plotly.graph_objs as go import torch from torch import Tensor class SamplesSummary(NamedTuple): num_chain: int num_samples: int single_sample_sz: Tensor def _samples_info(query_samples: Tensor) -> SamplesSummary: return SamplesSummary( num_chain=query_samples.size(0), num_samples=query_samples.size(1), # pyre-fixme[6]: For 3rd param expected `Tensor` but got `Size`. single_sample_sz=query_samples.size()[2:], ) def trace_helper( x: List[List[List[int]]], y: List[List[List[float]]], labels: List[str] ) -> Tuple[List[go.Scatter], List[str]]: """ this function gets results prepared by a plot-related function and outputs a tuple including plotly object and its corresponding legend. """ all_traces = [] num_chains = len(x) num_indices = len(x[0]) for index in range(num_indices): trace = [] for chain in range(num_chains): trace.append( go.Scatter( x=x[chain][index], y=y[chain][index], mode="lines", name="chain" + str(chain), ) ) all_traces.append(trace) return (all_traces, labels) def plot_helper( query_samples: Tensor, func: Callable ) -> Tuple[List[go.Scatter], List[str]]: """ this function executes a plot-related function, passed as input parameter func, and outputs a tuple including plotly object and its corresponding legend. """ num_chain, num_samples, single_sample_sz = _samples_info(query_samples) x_axis, y_axis, all_labels = [], [], [] for chain in range(num_chain): flattened_data = query_samples[chain].reshape(num_samples, -1) numel = flattened_data[0].numel() x_axis_data, y_axis_data, labels = [], [], [] for i in range(numel): index = np.unravel_index(i, single_sample_sz) data = flattened_data[:, i] partial_label = f" for {list(index)}" x_data, y_data = func(data.detach()) x_axis_data.append(x_data) y_axis_data.append(y_data) labels.append(partial_label) x_axis.append(x_axis_data) y_axis.append(y_axis_data) all_labels.append(labels) return trace_helper(x_axis, y_axis, all_labels[0]) def autocorr(x: Tensor) -> Tuple[List[int], List[float]]: def autocorr_calculation(x: Tensor, lag: int) -> Tensor: y1 = x[: (len(x) - lag)] y2 = x[lag:] sum_product = ( (y1 - (x.mean(dim=0).expand(y1.size()))) * (y2 - (x.mean(dim=0).expand(y2.size()))) ).sum(0) return sum_product / ((len(x) - lag) * torch.var(x, dim=0)) max_lag = x.size(0) y_axis_data = [autocorr_calculation(x, lag).item() for lag in range(max_lag)] x_axis_data = list(range(max_lag)) return (x_axis_data, y_axis_data) def trace_plot(x: Tensor) -> Tuple[List[int], Tensor]: return (list(range(x.size(0))), x)
beanmachine-main
src/beanmachine/ppl/diagnostics/common_plots.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # flake8: noqa """Visual diagnostic tools for Bean Machine models.""" import sys from pathlib import Path if sys.version_info >= (3, 8): # NOTE: We need to import NotRequired from typing_extensions until PEP 655 is # accepted, see https://peps.python.org/pep-0655/. This is to follow the # interface objects in JavaScript that allow keys to not be required using ?. from typing import TypedDict from typing_extensions import NotRequired else: from typing_extensions import NotRequired, TypedDict TOOLS_DIR = Path(__file__).parent.resolve() JS_DIR = TOOLS_DIR.joinpath("js") JS_DIST_DIR = JS_DIR.joinpath("dist")
beanmachine-main
src/beanmachine/ppl/diagnostics/tools/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Visual diagnostics tools for Bean Machine models.""" from __future__ import annotations from functools import wraps from typing import Callable, TypeVar from beanmachine.ppl.diagnostics.tools.utils import accessor from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples from typing_extensions import ParamSpec P = ParamSpec("P") R = TypeVar("R") def _requires_dev_packages(f: Callable[P, R]) -> Callable[P, R]: """A utility decorator that allow us to lazily imports the plotting modules and throw a useful error message when the required dependencies are not installed.""" @wraps(f) def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: try: return f(*args, **kwargs) except ModuleNotFoundError as e: # The diagnostic tools uses packages that are not part of the core # BM dependency, so we need to prompt users to manually install # those raise ModuleNotFoundError( "Dev packages are required for the diagnostic widgets, which " "can be installed with `pip install 'beanmachine[dev]'" ) from e return wrapper @accessor.register_mcs_accessor("diagnostics") class DiagnosticsTools: """Accessor object for the visual diagnostics tools.""" def __init__(self: DiagnosticsTools, mcs: MonteCarloSamples) -> None: """Initialize.""" self.mcs = mcs self.idata = self.mcs.to_inference_data() @_requires_dev_packages def marginal1d(self: DiagnosticsTools) -> None: """ Marginal 1D diagnostic tool for a Bean Machine model. Returns: None: Displays the tool directly in a Jupyter notebook. """ from beanmachine.ppl.diagnostics.tools.marginal1d.tool import Marginal1d Marginal1d(self.mcs).show() @_requires_dev_packages def trace(self: DiagnosticsTools) -> None: """ Trace diagnostic tool for a Bean Machine model. Returns: None: Displays the tool directly in a Jupyter notebook. """ from beanmachine.ppl.diagnostics.tools.trace.tool import Trace Trace(self.mcs).show()
beanmachine-main
src/beanmachine/ppl/diagnostics/tools/viz.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
beanmachine-main
src/beanmachine/ppl/diagnostics/tools/trace/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Methods used to generate the diagnostic tool.""" from typing import List from beanmachine.ppl.diagnostics.tools.trace import typing from beanmachine.ppl.diagnostics.tools.utils import plotting_utils from bokeh.core.property.wrappers import PropertyValueList from bokeh.models.annotations import Legend, LegendItem from bokeh.models.glyphs import Circle, Line, Quad from bokeh.models.layouts import Column, Row from bokeh.models.sources import ColumnDataSource from bokeh.models.tools import HoverTool from bokeh.models.widgets.inputs import Select from bokeh.models.widgets.markups import Div from bokeh.models.widgets.panels import Panel, Tabs from bokeh.models.widgets.sliders import Slider from bokeh.plotting.figure import figure PLOT_WIDTH = 400 PLOT_HEIGHT = 500 TRACE_PLOT_WIDTH = 600 FIGURE_NAMES = ["marginals", "forests", "traces", "ranks"] # Define what the empty data object looks like in order to make the browser handle all # computations. EMPTY_DATA = {} def create_empty_data(num_chains: int) -> typing.Data: """Create an empty data object for the tool. We do not know a priori how many chains a model will have, so we use this method to build an empty data object with the given number of chains. Parameters ---------- num_chains : int The number of chains from the model. Returns ------- typing.Data An empty data object to be filled by JavaScript. """ output = { "marginals": {}, "forests": {}, "traces": {}, "ranks": {}, } for chain in range(num_chains): chain_index = chain + 1 chain_name = f"chain{chain_index}" marginal = { "line": {"x": [], "y": []}, "chain": [], "mean": [], "bandwidth": [], } forest = { "line": {"x": [], "y": []}, "circle": {"x": [], "y": []}, "chain": [], "mean": [], } trace = { "line": {"x": [], "y": []}, "chain": [], "mean": [], } rank = { "quad": { "left": [], "top": [], "right": [], "bottom": [], "chain": [], "draws": [], "rank": [], }, "line": {"x": [], "y": []}, "chain": [], "rankMean": [], "mean": [], } single_chain_data = [marginal, forest, trace, rank] chain_data = dict(zip(FIGURE_NAMES, single_chain_data)) for figure_name in FIGURE_NAMES: output[figure_name][chain_name] = chain_data[figure_name] return output def create_sources(num_chains: int) -> typing.Sources: """Create Bokeh sources from the given data that will be bound to glyphs. Parameters ---------- num_chains : int The number of chains from the model. Returns ------- typing.Sources A dictionary of Bokeh ColumnDataSource objects. """ global EMPTY_DATA if not EMPTY_DATA: EMPTY_DATA = create_empty_data(num_chains=num_chains) output = {} for figure_name, figure_data in EMPTY_DATA.items(): output[figure_name] = {} for chain_name, chain_data in figure_data.items(): output[figure_name][chain_name] = {} if figure_name == "marginals": output[figure_name][chain_name]["line"] = ColumnDataSource( { "x": chain_data["line"]["x"], "y": chain_data["line"]["y"], "chain": chain_data["chain"], "mean": chain_data["mean"], }, ) if figure_name == "forests": output[figure_name][chain_name]["line"] = ColumnDataSource( { "x": chain_data["line"]["x"], "y": chain_data["line"]["y"], }, ) output[figure_name][chain_name]["circle"] = ColumnDataSource( { "x": chain_data["circle"]["x"], "y": chain_data["circle"]["y"], "chain": chain_data["chain"], }, ) if figure_name == "traces": output[figure_name][chain_name]["line"] = ColumnDataSource( { "x": chain_data["line"]["x"], "y": chain_data["line"]["y"], "chain": chain_data["chain"], "mean": chain_data["mean"], }, ) if figure_name == "ranks": output[figure_name][chain_name]["line"] = ColumnDataSource( { "x": chain_data["line"]["x"], "y": chain_data["line"]["y"], "chain": chain_data["chain"], "rankMean": chain_data["rankMean"], }, ) output[figure_name][chain_name]["quad"] = ColumnDataSource( { "left": chain_data["quad"]["left"], "top": chain_data["quad"]["top"], "right": chain_data["quad"]["right"], "bottom": chain_data["quad"]["bottom"], "chain": chain_data["chain"], "draws": chain_data["quad"]["draws"], "rank": chain_data["quad"]["rank"], }, ) return output def create_figures(rv_name: str, num_chains: int) -> typing.Figures: """Create the Bokeh figures used for the tool. Parameters ---------- rv_name : str The string representation of the random variable data. num_chains : int The number of chains from the model. Returns ------- typing.Figures A dictionary of Bokeh Figure objects. """ output = {} for figure_name in FIGURE_NAMES: fig = figure( width=PLOT_WIDTH, height=PLOT_HEIGHT, outline_line_color="black", sizing_mode="scale_both", ) plotting_utils.style_figure(fig) # NOTE: There are several figures where we do not want the x-axis to change its # limits. This is why we set the x_range to an object from Bokeh called # Range1d. if figure_name == "marginals": fig.title = "Marginal" fig.xaxis.axis_label = rv_name fig.yaxis.visible = False elif figure_name == "forests": fig.title = "Forest" fig.xaxis.axis_label = rv_name fig.yaxis.axis_label = "Chain" fig.yaxis.minor_tick_line_color = None fig.yaxis.ticker.desired_num_ticks = num_chains elif figure_name == "traces": fig.title = "Trace" fig.xaxis.axis_label = "Draw from single chain" fig.yaxis.axis_label = rv_name fig.width = TRACE_PLOT_WIDTH elif figure_name == "ranks": fig.title = "Rank" fig.xaxis.axis_label = "Rank from all chains" fig.yaxis.axis_label = "Chain" fig.width = TRACE_PLOT_WIDTH fig.yaxis.minor_tick_line_color = None fig.yaxis.ticker.desired_num_ticks = num_chains output[figure_name] = fig return output def create_glyphs(num_chains: int) -> typing.Glyphs: """Create the glyphs used for the figures of the tool. Parameters ---------- num_chains : int The number of chains from the model. Returns ------- typing.Glyphs A dictionary of Bokeh Glyphs objects. """ global EMPTY_DATA if not EMPTY_DATA: EMPTY_DATA = create_empty_data(num_chains=num_chains) palette = plotting_utils.choose_palette(num_colors=num_chains) output = {} for figure_name, figure_data in EMPTY_DATA.items(): output[figure_name] = {} for i, (chain_name, _) in enumerate(figure_data.items()): output[figure_name][chain_name] = {} color = palette[i] if figure_name == "marginals": output[figure_name][chain_name]["line"] = { "glyph": Line( x="x", y="y", line_color=color, line_alpha=0.7, line_width=2.0, name=f"{figure_name}{chain_name.title()}LineGlyph", ), "hover_glyph": Line( x="x", y="y", line_color=color, line_alpha=1.0, line_width=2.0, name=f"{figure_name}{chain_name.title()}LineHoverGlyph", ), } elif figure_name == "forests": output[figure_name][chain_name] = { "line": { "glyph": Line( x="x", y="y", line_color=color, line_alpha=0.7, line_width=2.0, name=f"{figure_name}{chain_name.title()}LineGlyph", ), "hover_glyph": Line( x="x", y="y", line_color=color, line_alpha=1.0, line_width=2.0, name=f"{figure_name}{chain_name.title()}LineHoverGlyph", ), }, "circle": { "glyph": Circle( x="x", y="y", size=10, fill_color=color, fill_alpha=0.7, line_color="white", name=f"{figure_name}{chain_name.title()}CircleGlyph", ), "hover_glyph": Circle( x="x", y="y", size=10, fill_color=color, fill_alpha=1.0, line_color="black", name=f"{figure_name}{chain_name.title()}CircleHoverGlyph", ), }, } if figure_name == "traces": output[figure_name][chain_name]["line"] = { "glyph": Line( x="x", y="y", line_color=color, line_alpha=0.6, line_width=0.6, name=f"{figure_name}{chain_name.title()}LineGlyph", ), "hover_glyph": Line( x="x", y="y", line_color=color, line_alpha=0.6, line_width=1.0, name=f"{figure_name}{chain_name.title()}LineHoverGlyph", ), } if figure_name == "ranks": output[figure_name][chain_name] = { "quad": { "glyph": Quad( left="left", top="top", right="right", bottom="bottom", fill_color=color, fill_alpha=0.7, line_color="white", name=f"{figure_name}{chain_name.title()}QuadGlyph", ), "hover_glyph": Quad( left="left", top="top", right="right", bottom="bottom", fill_color=color, fill_alpha=1.0, line_color="black", name=f"{figure_name}{chain_name.title()}QuadHoverGlyph", ), }, "line": { "glyph": Line( x="x", y="y", line_color="grey", line_alpha=0.7, line_width=3.0, line_dash="dashed", name=f"{figure_name}{chain_name.title()}LineGlyph", ), "hover_glyph": Line( x="x", y="y", line_color="grey", line_alpha=1.0, line_width=3.0, line_dash="solid", name=f"{figure_name}{chain_name.title()}LineGlyph", ), }, } return output def add_glyphs( figures: typing.Figures, glyphs: typing.Glyphs, sources: typing.Sources, ) -> None: """Bind source data to glyphs and add the glyphs to the given figures. Parameters ---------- figures : typing.Figures A dictionary of Bokeh Figure objects. glyphs : typing.Glyphs A dictionary of Bokeh Glyphs objects. sources : typing.Sources A dictionary of Bokeh ColumnDataSource objects. Returns ------- None Adds data bound glyphs to the given figures directly. """ for figure_name, figure_sources in sources.items(): fig = figures[figure_name] for chain_name, source in figure_sources.items(): chain_glyphs = glyphs[figure_name][chain_name] # NOTE: Every figure has a line glyph, so we always add it here. fig.add_glyph( source_or_glyph=source["line"], glyph=chain_glyphs["line"]["glyph"], hover_glyph=chain_glyphs["line"]["hover_glyph"], name=chain_glyphs["line"]["glyph"].name, ) # We want to keep the x-axis from moving when changing queries, so we add # the bounds below from the marginal figure. All figures that need to keep # its range stable are linked to the marginal figure's range below. if figure_name == "marginals": pass elif figure_name == "forests": fig.add_glyph( source_or_glyph=source["circle"], glyph=chain_glyphs["circle"]["glyph"], hover_glyph=chain_glyphs["circle"]["hover_glyph"], name=chain_glyphs["circle"]["glyph"].name, ) elif figure_name == "ranks": fig.add_glyph( source_or_glyph=source["quad"], glyph=chain_glyphs["quad"]["glyph"], hover_glyph=chain_glyphs["quad"]["hover_glyph"], name=chain_glyphs["quad"]["glyph"].name, ) # Link figure ranges together. figures["forests"].x_range = figures["marginals"].x_range def create_annotations(figures: typing.Figures, num_chains: int) -> typing.Annotations: """Create any annotations for the figures of the tool. Parameters ---------- figures : typing.Figures A dictionary of Bokeh Figure objects. num_chains : int The number of chains of the model. Returns ------- typing.Annotations A dictionary of Bokeh Annotation objects. """ renderers = [] for _, fig in figures.items(): renderers.extend(PropertyValueList(fig.renderers)) legend_items = [] for chain in range(num_chains): chain_index = chain + 1 chain_name = f"chain{chain_index}" legend_items.append( LegendItem( renderers=[ renderer for renderer in renderers if chain_name in renderer.name.lower() ], label=chain_name, ), ) legend = Legend( items=legend_items, orientation="horizontal", border_line_color="black", click_policy="hide", ) output = {"traces": {"legend": legend}, "ranks": {"legend": legend}} return output def add_annotations(figures: typing.Figures, annotations: typing.Annotations) -> None: """Add the given annotations to the given figures of the tool. Parameters ---------- figures : typing.Figures A dictionary of Bokeh Figure objects. annotations : typing.Annotations A dictionary of Bokeh Annotation objects. Returns ------- None Adds annotations directly to the given figures. """ for figure_name, figure_annotations in annotations.items(): fig = figures[figure_name] for _, annotation in figure_annotations.items(): fig.add_layout(annotation, "below") def create_tooltips( rv_name: str, figures: typing.Figures, num_chains: int, ) -> typing.Tooltips: """Create hover tools for the glyphs used in the figures of the tool. Parameters ---------- rv_name : str The string representation of the random variable data. figures : typing.Figures A dictionary of Bokeh Figure objects. num_chains : int The number of chains of the model. Returns ------- typing.Tooltips A dictionary of Bokeh HoverTools objects. """ output = {} for figure_name, fig in figures.items(): output[figure_name] = [] for chain in range(num_chains): chain_index = chain + 1 chain_name = f"chain{chain_index}" if figure_name == "marginals": glyph_name = f"{figure_name}{chain_name.title()}LineGlyph" output[figure_name].append( HoverTool( renderers=plotting_utils.filter_renderers(fig, glyph_name), tooltips=[ ("Chain", "@chain"), ("Mean", "@mean"), (rv_name, "@x"), ], ), ) if figure_name == "forests": glyph_name = f"{figure_name}{chain_name.title()}CircleGlyph" output[figure_name].append( HoverTool( renderers=plotting_utils.filter_renderers(fig, glyph_name), tooltips=[ ("Chain", "@chain"), (rv_name, "@x"), ], ), ) if figure_name == "traces": glyph_name = f"{figure_name}{chain_name.title()}LineGlyph" output[figure_name].append( HoverTool( renderers=plotting_utils.filter_renderers(fig, glyph_name), tooltips=[ ("Chain", "@chain"), ("Mean", "@mean"), (rv_name, "@y"), ], ), ) if figure_name == "ranks": output[figure_name].append( { "line": HoverTool( renderers=plotting_utils.filter_renderers( fig, f"{figure_name}{chain_name.title()}LineGlyph", ), tooltips=[("Chain", "@chain"), ("Rank mean", "@rankMean")], ), "quad": HoverTool( renderers=plotting_utils.filter_renderers( fig, f"{figure_name}{chain_name.title()}QuadGlyph", ), tooltips=[ ("Chain", "@chain"), ("Draws", "@draws"), ("Rank", "@rank"), ], ), }, ) return output def add_tooltips(figures: typing.Figures, tooltips: typing.Tooltips) -> None: """Add the given tools to the figures. Parameters ---------- figures : typing.Figures A dictionary of Bokeh Figure objects. tooltips : typing.Tooltips A dictionary of Bokeh HoverTools objects. Returns ------- None Adds the tooltips directly to the given figures. """ for figure_name, fig in figures.items(): for tips in tooltips[figure_name]: if figure_name == "ranks": for _, tips_ in tips.items(): fig.add_tools(tips_) else: fig.add_tools(tips) def create_widgets(rv_names: List[str], rv_name: str) -> typing.Widgets: """Create the widgets used in the tool. Parameters ---------- rv_names : List[str] A list of all available random variable names. rv_name : str The string representation of the random variable data. Returns ------- typing.Widgets A dictionary of Bokeh widget objects. """ output = { "rv_select": Select(value=rv_name, options=rv_names, title="Query"), "bw_factor_slider": Slider( start=0.01, end=2.00, step=0.01, value=1.0, title="Bandwidth factor", ), "hdi_slider": Slider(start=1, end=99, step=1, value=89, title="HDI"), } return output def help_page() -> Div: """Help tab for the tool. Returns ------- Div Bokeh Div widget containing the help tab information. """ text = """ <h2>Rank plots</h2> <p style="margin-bottom: 10px"> Rank plots are a histogram of the samples over time. All samples across all chains are ranked and then we plot the average rank for each chain on regular intervals. If the chains are mixing well this histogram should look roughly uniform. If it looks highly irregular that suggests chains might be getting stuck and not adequately exploring the sample space. See the paper by Vehtari <em>et al</em> for more information. </p> <h2>Trace plots</h2> <p style="margin-bottom: 10px"> The more familiar trace plots are also included in this widget. You can click on the legend to show/hide different chains and compare them to the rank plots. </p> <ul> <li> Vehtari A, Gelman A, Simpson D, Carpenter B, Bürkner PC (2021) <b> Rank-normalization, folding, and localization: An improved \\(\\hat{R}\\) for assessing convergence of MCMC (with discussion) </b>. <em>Bayesian Analysis</em> 16(2) 667–718. <a href=https://dx.doi.org/10.1214/20-BA1221 style="color: blue" target="_blank" > doi: 10.1214/20-BA1221 </a>. </li> </ul> """ return Div(text=text, disable_math=False, min_width=PLOT_WIDTH) def create_view(figures: typing.Figures, widgets: typing.Widgets) -> Tabs: """Create the tool view. Parameters ---------- figures : typing.Figures A dictionary of Bokeh Figure objects. widgets : typing.Widgets A dictionary of Bokeh widget objects. Returns ------- Tabs Bokeh Tabs objects. """ toolbar = plotting_utils.create_toolbar(list(figures.values())) help_panel = Panel(child=help_page(), title="Help", name="helpPanel") marginal_panel = Panel( child=Column( children=[figures["marginals"], widgets["bw_factor_slider"]], sizing_mode="scale_both", ), title="Marginals", ) forest_panel = Panel( child=Column( children=[figures["forests"], widgets["hdi_slider"]], sizing_mode="scale_both", ), title="HDIs", ) left_panels = Tabs(tabs=[marginal_panel, forest_panel], sizing_mode="scale_both") trace_panel = Panel( child=Column(children=[figures["traces"]], sizing_mode="scale_both"), title="Traces", ) rank_panel = Panel( child=Column(children=[figures["ranks"]], sizing_mode="scale_both"), title="Ranks", ) right_panels = Tabs(tabs=[trace_panel, rank_panel], sizing_mode="scale_both") tool_panel = Panel( child=Column( children=[ widgets["rv_select"], Row( children=[left_panels, right_panels, toolbar], sizing_mode="scale_both", ), ], sizing_mode="scale_both", ), title="Trace tool", ) return Tabs(tabs=[tool_panel, help_panel], sizing_mode="scale_both")
beanmachine-main
src/beanmachine/ppl/diagnostics/tools/trace/utils.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Trace diagnostic tool types for a Bean Machine model.""" from typing import Any, Dict, List, Union from beanmachine.ppl.diagnostics.tools import NotRequired, TypedDict from bokeh.models.annotations import Legend from bokeh.models.glyphs import Circle, Line, Quad from bokeh.models.sources import ColumnDataSource from bokeh.models.tools import HoverTool from bokeh.models.widgets.inputs import Select from bokeh.models.widgets.sliders import Slider from bokeh.plotting.figure import Figure # NOTE: These are the types pyre gives us when using `reveal_type(...)` on the outputs # of the methods. Data = Dict[str, Dict[Any, Any]] Sources = Dict[Any, Any] Figures = Dict[Any, Any] Glyphs = Dict[Any, Any] Annotations = Dict[str, Dict[str, Legend]] Tooltips = Dict[Any, Any] Widgets = Dict[str, Union[Select, Slider]] # NOTE: TypedDict objects are for reference only. Due to the way pyre accesses keys in # dictionaries, and how NumPy casts arrays when using tolist(), we are unable to # use them, but they provide semantic information for the different types. We must # ignore a lot of lines due to the issue discussed here # https://pyre-check.org/docs/errors/#13-uninitialized-attribute. class _LineOrCircleGlyphData(TypedDict): # pyre-ignore x: List[float] y: List[float] class _QuadGlyphData(TypedDict): # pyre-ignore """Follow the RankHistogram interface in stats/histogram.js.""" left: List[float] top: List[float] right: List[float] bottom: List[float] chain: List[int] draws: List[str] rank: List[float] class _MarginalDataSingleChain(TypedDict): # pyre-ignore line: _LineOrCircleGlyphData chain: int mean: float bandwidth: float class _ForestDataSingleChain(TypedDict): # pyre-ignore line: _LineOrCircleGlyphData circle: _LineOrCircleGlyphData chain: int mean: float class _TraceDataSingleChain(TypedDict): # pyre-ignore line: _LineOrCircleGlyphData chain: int mean: float class _RankDataSingleChain(TypedDict): # pyre-ignore quad: _QuadGlyphData line: _LineOrCircleGlyphData chain: List[int] rankMean: List[float] mean: List[float] _MarginalDataAllChains = Dict[str, _MarginalDataSingleChain] _ForestDataAllChains = Dict[str, _ForestDataSingleChain] _TraceDataAllChains = Dict[str, _TraceDataSingleChain] _RankDataAllChains = Dict[str, _RankDataSingleChain] class _Data(TypedDict): # pyre-ignore marginals: _MarginalDataAllChains forests: _ForestDataAllChains traces: _TraceDataAllChains ranks: _RankDataAllChains class _SourceSingleChain(TypedDict): # pyre-ignore line: ColumnDataSource circle: NotRequired[ColumnDataSource] quad: NotRequired[ColumnDataSource] _SourceAllChains = Dict[str, _SourceSingleChain] class _Sources(TypedDict): # pyre-ignore marginals: _SourceAllChains forests: _SourceAllChains traces: _SourceAllChains ranks: _SourceAllChains class _Figures(TypedDict): # pyre-ignore marginals: Figure forests: Figure traces: Figure ranks: Figure class _RankTooltip(TypedDict): # pyre-ignore line: HoverTool quad: HoverTool class _Tooltips(TypedDict): # pyre-ignore marginals: List[HoverTool] forests: List[HoverTool] traces: List[HoverTool] ranks: List[_RankTooltip] class _Glyph(TypedDict): # pyre-ignore glyph: Union[Circle, Line, Quad] hover_glyph: Union[Circle, Line, Quad] class _GlyphSingleChain(TypedDict): # pyre-ignore line: _Glyph circle: NotRequired[_Glyph] quad: NotRequired[_Glyph] _GlyphAllChains = Dict[str, _GlyphSingleChain] class _Glyphs(TypedDict): # pyre-ignore marginals: _GlyphAllChains forests: _GlyphAllChains traces: _GlyphAllChains ranks: _GlyphAllChains _Annotations = Dict[str, Dict[str, Legend]] class _Widgets(TypedDict): # pyre-ignore rv_select: Select bw_factor_slider: Slider hdi_slider: Slider
beanmachine-main
src/beanmachine/ppl/diagnostics/tools/trace/typing.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Trace diagnostic tool for a Bean Machine model.""" from __future__ import annotations from beanmachine.ppl.diagnostics.tools.trace import utils from beanmachine.ppl.diagnostics.tools.utils.diagnostic_tool_base import ( DiagnosticToolBaseClass, ) from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples from bokeh.models import Model from bokeh.models.callbacks import CustomJS class Trace(DiagnosticToolBaseClass): """Trace tool. Args: mcs (MonteCarloSamples): The return object from running a Bean Machine model. Attributes: data (Dict[str, List[List[float]]]): JSON serializable representation of the given `mcs` object. rv_names (List[str]): The list of random variables string names for the given model. num_chains (int): The number of chains of the model. num_draws (int): The number of draws of the model for each chain. palette (List[str]): A list of color values used for the glyphs in the figures. The colors are specifically chosen from the Colorblind palette defined in Bokeh. tool_js (str):The JavaScript callbacks needed to render the Bokeh tool independently from a Python server. """ def __init__(self: Trace, mcs: MonteCarloSamples) -> None: super(Trace, self).__init__(mcs) def create_document(self: Trace) -> Model: # Initialize widget values using Python. rv_name = self.rv_names[0] # NOTE: We are going to use Python and Bokeh to render the tool in the notebook # output cell, however, we WILL NOT use Python to calculate any of the # statistics displayed in the tool. We do this so we can make the BROWSER # run all the calculations based on user interactions. If we did not # employ this strategy, then the initial display a user would receive # would be calculated by Python, and any subsequent updates would be # calculated by JavaScript. The side-effect of having two backends # calculate data could cause the figures to flicker, which would not be a # good end user experience. # # Bokeh 3.0 is implementing an "on load" feature, which would nullify this # requirement, and until that version is released, we have to employ this # work-around. # Create empty Bokeh sources using Python. sources = utils.create_sources(num_chains=self.num_chains) # Create empty figures for the tool using Python. figures = utils.create_figures(rv_name=rv_name, num_chains=self.num_chains) # Create empty glyphs and attach them to the figures using Python. glyphs = utils.create_glyphs(num_chains=self.num_chains) utils.add_glyphs(sources=sources, figures=figures, glyphs=glyphs) # Create empty annotations and attach them to the figures using Python. annotations = utils.create_annotations( figures=figures, num_chains=self.num_chains, ) utils.add_annotations(figures=figures, annotations=annotations) # Create empty tool tips and attach them to the figures using Python. tooltips = utils.create_tooltips( figures=figures, rv_name=rv_name, num_chains=self.num_chains, ) utils.add_tooltips(figures=figures, tooltips=tooltips) # Create the widgets for the tool using Python. widgets = utils.create_widgets(rv_names=self.rv_names, rv_name=rv_name) # Create the view of the tool and serialize it into HTML using static resources # from Bokeh. Embedding the tool in this manner prevents external CDN calls for # JavaScript resources, and prevents the user from having to know where the # Bokeh server is. tool_view = utils.create_view(figures=figures, widgets=widgets) # Create callbacks for the tool using JavaScript. callback_js = f""" const rvName = widgets.rv_select.value; const rvData = data[rvName]; let bw = 0.0; // Remove the CSS classes that dim the tool output on initial load. const toolTab = toolView.tabs[0]; const toolChildren = toolTab.child.children; const dimmedComponent = toolChildren[1]; dimmedComponent.css_classes = []; try {{ trace.update( rvData, rvName, bwFactor, hdiProbability, sources, figures, tooltips, ); }} catch (error) {{ {self.tool_js} trace.update( rvData, rvName, bwFactor, hdiProbability, sources, figures, tooltips, ); }} """ # Each widget requires the following dictionary for the CustomJS method. Notice # that the callback_js object above uses the names defined as keys in the below # object with values defined by the Python objects. callback_arguments = { "data": self.data, "widgets": widgets, "sources": sources, "figures": figures, "tooltips": tooltips, "toolView": tool_view, } # Each widget requires slightly different JS. rv_select_js = f""" const bwFactor = 1.0; const hdiProbability = 0.89; widgets.bw_factor_slider.value = bwFactor; widgets.hdi_slider.value = 100 * hdiProbability; {callback_js}; figures.marginals.reset.emit(); """ slider_js = f""" const bwFactor = widgets.bw_factor_slider.value; const hdiProbability = widgets.hdi_slider.value / 100; {callback_js}; """ slider_callback = CustomJS(args=callback_arguments, code=slider_js) rv_select_callback = CustomJS(args=callback_arguments, code=rv_select_js) # Tell Python to use the JavaScript. widgets["rv_select"].js_on_change("value", rv_select_callback) widgets["bw_factor_slider"].js_on_change("value", slider_callback) widgets["hdi_slider"].js_on_change("value", slider_callback) return tool_view
beanmachine-main
src/beanmachine/ppl/diagnostics/tools/trace/tool.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
beanmachine-main
src/beanmachine/ppl/diagnostics/tools/marginal1d/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Methods used to generate the diagnostic tool.""" from typing import List import numpy as np from beanmachine.ppl.diagnostics.tools.marginal1d import typing from beanmachine.ppl.diagnostics.tools.utils import plotting_utils from bokeh.models.annotations import Band, LabelSet from bokeh.models.glyphs import Circle, Line from bokeh.models.layouts import Column, Row from bokeh.models.sources import ColumnDataSource from bokeh.models.tools import HoverTool from bokeh.models.widgets.inputs import Select from bokeh.models.widgets.markups import Div from bokeh.models.widgets.panels import Panel, Tabs from bokeh.models.widgets.sliders import Slider from bokeh.plotting.figure import figure PLOT_WIDTH = 500 PLOT_HEIGHT = 500 FIGURE_NAMES = ["marginal", "cumulative"] # Define what the empty data object looks like in order to make the browser handle all # computations. EMPTY_DATA = { "marginal": { "distribution": {"x": [], "y": [], "bandwidth": np.NaN}, "hdi": {"base": [], "lower": [], "upper": []}, "stats": {"x": [], "y": [], "text": []}, "labels": { "x": [], "y": [], "text": [], "text_align": [], "x_offset": [], "y_offset": [], }, }, "cumulative": { "distribution": {"x": [], "y": [], "bandwidth": np.NaN}, "hdi": {"base": [], "lower": [], "upper": []}, "stats": {"x": [], "y": [], "text": []}, "labels": { "x": [], "y": [], "text": [], "text_align": [], "x_offset": [], "y_offset": [], }, }, } SIZING = { "sizing_mode": "scale_both", "max_height": PLOT_HEIGHT + 250, # drop down menus and tabs "max_width": 2 * PLOT_WIDTH + 30, # tool bars } def create_sources() -> typing.Sources: """Create Bokeh sources from the given data that will be bound to glyphs. Returns ------- typing.Sources A dictionary of Bokeh ColumnDataSource objects. """ output = {} for figure_name, figure_data in EMPTY_DATA.items(): output[figure_name] = {} for glyph_name, glyph_data in figure_data.items(): if "bandwidth" in list(glyph_data.keys()): glyph_data.pop("bandwidth") output[figure_name][glyph_name] = ColumnDataSource(data=glyph_data) return output def create_figures(rv_name: str) -> typing.Figures: """Create the Bokeh figures used for the tool. Parameters ---------- rv_name : str The string representation of the random variable data. Returns ------- typing.Figures A dictionary of Bokeh Figure objects. """ output = {} for figure_name in FIGURE_NAMES: fig = figure( max_width=PLOT_WIDTH, max_height=PLOT_HEIGHT, outline_line_color="black", title=f"{figure_name} distribution", x_axis_label=rv_name, y_axis_label=None, sizing_mode="scale_both", ) fig.yaxis.visible = False plotting_utils.style_figure(fig) output[figure_name] = fig output[FIGURE_NAMES[0]].x_range = output[FIGURE_NAMES[1]].x_range output[FIGURE_NAMES[0]].y_range = output[FIGURE_NAMES[1]].y_range return output def create_glyphs() -> typing.Glyphs: """Create the glyphs used for the figures of the tool. Returns ------- typing.Glyphs A dictionary of Bokeh Glyphs objects. """ palette = plotting_utils.choose_palette(num_colors=2) output = {} for figure_name, figure_data in EMPTY_DATA.items(): output[figure_name] = {} for glyph_name, _ in figure_data.items(): if glyph_name in ["distribution", "stats"]: if glyph_name == "distribution": output[figure_name][glyph_name] = { "glyph": Line( x="x", y="y", line_color=palette[0], line_alpha=0.7, line_width=2.0, name=f"{figure_name}DistributionGlyph", ), "hover_glyph": Line( x="x", y="y", line_color=palette[1], line_alpha=1.0, line_width=2.0, name=f"{figure_name}DistributionHoverGlyph", ), } if glyph_name == "stats": output[figure_name][glyph_name] = { "glyph": Circle( x="x", y="y", size=10, fill_color=palette[0], line_color="white", fill_alpha=1.0, name=f"{figure_name}StatsGlyph", ), "hover_glyph": Circle( x="x", y="y", size=10, fill_color=palette[1], line_color="black", fill_alpha=1.0, name=f"{figure_name}StatsHoverGlyph", ), } return output def add_glyphs( figures: typing.Figures, glyphs: typing.Glyphs, sources: typing.Sources, ) -> None: """Bind source data to glyphs and add the glyphs to the given figures. Parameters ---------- figures : typing.Figures A dictionary of Bokeh Figure objects. glyphs : typing.Glyphs A dictionary of Bokeh Glyphs objects. sources : typing.Sources A dictionary of Bokeh ColumnDataSource objects. Returns ------- None Adds data bound glyphs to the given figures directly. """ for figure_name, figure_glyphs in glyphs.items(): fig = figures[figure_name] figure_sources = sources[figure_name] for glyph_name, glyphs in figure_glyphs.items(): glyph_source = figure_sources[glyph_name] fig.add_glyph( source_or_glyph=glyph_source, glyph=glyphs["glyph"], hover_glyph=glyphs["hover_glyph"], name=glyphs["glyph"].name, ) def create_annotations(sources: typing.Sources) -> typing.Annotations: """Create any annotations for the figures of the tool. Parameters ---------- source : typing.Sources A dictionary of Bokeh ColumnDataSource objects. Returns ------- typing.Annotations A dictionary of Bokeh Annotation objects. """ palette = plotting_utils.choose_palette(num_colors=1) output = {} for figure_name, figure_sources in sources.items(): output[figure_name] = {} for glyph_name, glyph_source in figure_sources.items(): if glyph_name == "hdi": output[figure_name][glyph_name] = Band( base="base", lower="lower", upper="upper", source=glyph_source, level="underlay", fill_color=palette[0], fill_alpha=0.2, line_width=1.0, line_color="white", name=f"{figure_name}HdiAnnotation", ) elif glyph_name == "labels": output[figure_name][glyph_name] = LabelSet( x="x", y="y", text="text", x_offset="x_offset", y_offset="y_offset", text_align="text_align", source=glyph_source, background_fill_color="white", background_fill_alpha=0.8, name=f"{figure_name}LabelAnnotation", ) return output def add_annotations(figures: typing.Figures, annotations: typing.Annotations) -> None: """Add the given annotations to the given figures of the tool. Parameters ---------- figures : typing.Figures A dictionary of Bokeh Figure objects. annotations : typing.Annotations A dictionary of Bokeh Annotation objects. Returns ------- None Adds annotations directly to the given figures. """ for figure_name, annotation_sources in annotations.items(): fig = figures[figure_name] for _, annotation in annotation_sources.items(): fig.add_layout(annotation) def create_tooltips(rv_name: str, figures: typing.Figures) -> typing.Tooltips: """Create hover tools for the glyphs used in the figures of the tool. Parameters ---------- rv_name : str The string representation of the random variable data. figures : typing.Figures A dictionary of Bokeh Figure objects. Returns ------- typing.Tooltips A dictionary of Bokeh HoverTools objects. """ output = {} for figure_name, fig in figures.items(): output[figure_name] = { "distribution": HoverTool( renderers=plotting_utils.filter_renderers( figure=fig, search="DistributionGlyph", glyph_type="GlyphRenderer", substring=True, ), tooltips=[(rv_name, "@x")], ), "stats": HoverTool( renderers=plotting_utils.filter_renderers( figure=fig, search="StatsGlyph", glyph_type="GlyphRenderer", substring=True, ), tooltips=[("", "@text")], ), } return output def add_tooltips(figures: typing.Figures, tooltips: typing.Tooltips) -> None: """Add the given tools to the figures. Parameters ---------- figures : typing.Figures A dictionary of Bokeh Figure objects. tooltips : typing.Tooltips A dictionary of Bokeh HoverTools objects. Returns ------- None Adds the tooltips directly to the given figures. """ for figure_name, figure_tooltips in tooltips.items(): fig = figures[figure_name] for _, tooltip in figure_tooltips.items(): fig.add_tools(tooltip) def create_widgets( rv_name: str, rv_names: List[str], bw_factor: float, bandwidth: float, ) -> typing.Widgets: """Create the widgets used in the tool. Parameters ---------- rv_name : str The string representation of the random variable data. rv_names : List[str] A list of all available random variable names. bw_factor : float Multiplicative factor used when calculating the kernel density estimate. bandwidth : float The bandwidth used to calculate the KDE. Returns ------- typing.Widgets A dictionary of Bokeh widget objects. """ return { "rv_select": Select(value=rv_name, options=rv_names, title="Query"), "bw_factor_slider": Slider( title="Bandwidth factor", start=0.01, end=2.00, value=1.00, step=0.01, ), "bw_div": Div(text=f"Bandwidth: {bw_factor * bandwidth}"), "hdi_slider": Slider(start=1, end=99, step=1, value=89, title="HDI"), } def help_page() -> Div: """Help tab for the tool. Returns ------- Div Bokeh Div widget containing the help tab information. """ text = """ <h2> Highest density interval </h2> <p style="margin-bottom: 10px"> The highest density interval region is not equal tailed like a typical equal tailed interval of 2.5%. Thus it will include the mode(s) of the posterior distribution. </p> <p style="margin-bottom: 10px"> There is nothing particularly specific about having a default HDI of 89%. If fact, the only remarkable thing about defaulting to 89% is that it is the highest prime number that does not exceed the unstable 95% threshold. See the link to McElreath's book below for further discussion. </p> <ul> <li> McElreath R (2020) <b> Statistical Rethinking: A Bayesian Course with Examples in R and Stan 2nd edition. </b> <em>Chapman and Hall/CRC</em> <a href=https://dx.doi.org/10.1201/9780429029608 style="color: blue" target="_blank" > doi: 10.1201/9780429029608 </a>. </li> </ul> """ return Div(text=text, disable_math=False, min_width=PLOT_WIDTH) def create_figure_grid(figures: typing.Figures) -> Row: """Layout the given figures in a grid, and make one toolbar. Parameters ---------- figures : typing.Figures A dictionary of Bokeh Figure objects. Returns ------- Row A Bokeh layout object. """ toolbar = plotting_utils.create_toolbar(figures=list(figures.values())) return Row(children=[*list(figures.values()), toolbar], css_classes=["bk-loading"]) def create_view(widgets: typing.Widgets, figures: typing.Figures) -> Tabs: """Create the tool view. Parameters ---------- widgets : typing.Widgets A dictionary of Bokeh widget objects. figures : typing.Figures A dictionary of Bokeh Figure objects. Returns ------- Tabs Bokeh Tabs objects. """ help_panel = Panel(child=help_page(), title="Help", name="helpPanel") fig_child = Column( children=[ create_figure_grid(figures), widgets["bw_factor_slider"], widgets["bw_div"], widgets["hdi_slider"], ], css_classes=["bm-tool-loading", "arcs"], ) fig_child.update_from_json(SIZING) tool_child = Column(children=[widgets["rv_select"], fig_child]) tool_child.update_from_json(SIZING) tool_panel = Panel( child=tool_child, title="Marginal 1D", name="toolPanel", ) tabs = Tabs(tabs=[tool_panel, help_panel]) tabs.update_from_json(SIZING) return tabs
beanmachine-main
src/beanmachine/ppl/diagnostics/tools/marginal1d/utils.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Marginal 1D diagnostic tool types for a Bean Machine model.""" from typing import Any, Dict, List, Union from beanmachine.ppl.diagnostics.tools import TypedDict from bokeh.models.annotations import Band, LabelSet from bokeh.models.glyphs import Circle, Line from bokeh.models.sources import ColumnDataSource from bokeh.models.tools import HoverTool from bokeh.models.widgets.inputs import Select from bokeh.models.widgets.markups import Div from bokeh.models.widgets.sliders import Slider from bokeh.plotting.figure import Figure # NOTE: These are the types pyre gives us when using `reveal_type(...)` on the outputs # of the methods. StatsAndLabelsData = Dict[str, Dict[str, Any]] HDIData = Dict[str, Any] Data = Dict[Any, Any] Sources = Dict[Any, Any] Figures = Dict[Any, Any] Glyphs = Dict[Any, Any] Annotations = Dict[Any, Any] Tooltips = Dict[Any, Any] Widgets = Dict[str, Union[Div, Select, Slider]] # NOTE: TypedDict objects are for reference only. Due to the way pyre accesses keys in # dictionaries, and how NumPy casts arrays when using tolist(), we are unable to # use them, but they provide semantic information for the different types. We must # ignore a lot of lines due to the issue discussed here # https://pyre-check.org/docs/errors/#13-uninitialized-attribute. class _DistributionData(TypedDict): # pyre-ignore x: List[float] y: List[float] bandwidth: float class _HDIData(TypedDict): # pyre-ignore base: List[float] lower: List[float] upper: List[float] class _StatsData(TypedDict): # pyre-ignore x: List[float] y: List[float] text: List[str] class _LabelsData(TypedDict): # pyre-ignore x: List[float] y: List[float] text: List[str] text_align: List[str] x_offset: List[int] y_offset: List[int] class _GlyphData(TypedDict): # pyre-ignore distribtution: _DistributionData hdi: _HDIData stats: _StatsData labels: _LabelsData class _Data(TypedDict): # pyre-ignore marginal: _GlyphData cumulative: _GlyphData class _Source(TypedDict): # pyre-ignore distribution: ColumnDataSource hdi: ColumnDataSource stats: ColumnDataSource labels: ColumnDataSource class _Sources(TypedDict): # pyre-ignore marginal: _Source cumulative: _Source class _Figures(TypedDict): # pyre-ignore marginal: Figure cumulative: Figure class _DistributionGlyph(TypedDict): # pyre-ignore glyph: Line hover_glyph: Line class _StatsGlyph(TypedDict): # pyre-ignore glyph: Circle hover_glyph: Circle class _FigureGlyphs(TypedDict): # pyre-ignore distribution: _DistributionGlyph stats: _StatsGlyph class _Glyphs(TypedDict): # pyre-ignore marginal: _FigureGlyphs cumulative: _FigureGlyphs class _FigureAnnotations(TypedDict): # pyre-ignore hdi: Band labels: LabelSet class _Annotations(TypedDict): # pyre-ignore marginal: _FigureAnnotations cumulative: _FigureAnnotations class _Tooltip(TypedDict): # pyre-ignore distribution: HoverTool stats: HoverTool class _Tooltips(TypedDict): # pyre-ignore marginal: _Tooltip cumulative: _Tooltip class _Widgets(TypedDict): # pyre-ignore rv_select: Select bw_factor_slider: Slider bw_div: Div hdi_slider: Slider
beanmachine-main
src/beanmachine/ppl/diagnostics/tools/marginal1d/typing.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Marginal 1D diagnostic tool for a Bean Machine model.""" from __future__ import annotations from beanmachine.ppl.diagnostics.tools.marginal1d import utils from beanmachine.ppl.diagnostics.tools.utils.diagnostic_tool_base import ( DiagnosticToolBaseClass, ) from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples from bokeh.models import Model from bokeh.models.callbacks import CustomJS class Marginal1d(DiagnosticToolBaseClass): """ Marginal 1D diagnostic tool. Args: mcs (MonteCarloSamples): The return object from running a Bean Machine model. Attributes: data (Dict[str, List[List[float]]]): JSON serializable representation of the given `mcs` object. rv_names (List[str]): The list of random variables string names for the given model. num_chains (int): The number of chains of the model. num_draws (int): The number of draws of the model for each chain. palette (List[str]): A list of color values used for the glyphs in the figures. The colors are specifically chosen from the Colorblind palette defined in Bokeh. tool_js (str):The JavaScript callbacks needed to render the Bokeh tool independently from a Python server. """ def __init__(self: Marginal1d, mcs: MonteCarloSamples) -> None: super(Marginal1d, self).__init__(mcs) def create_document(self: Marginal1d) -> Model: # Initialize widget values using Python. rv_name = self.rv_names[0] bw_factor = 1.0 bandwidth = 1.0 # NOTE: We are going to use Python and Bokeh to render the tool in the notebook # output cell, however, we WILL NOT use Python to calculate any of the # statistics displayed in the tool. We do this so we can make the BROWSER # run all the calculations based on user interactions. If we did not # employ this strategy, then the initial display a user would receive # would be calculated by Python, and any subsequent updates would be # calculated by JavaScript. The side-effect of having two backends # calculate data could cause the figures to flicker, which would not be a # good end user experience. # # Bokeh 3.0 is implementing an "on load" feature, which would nullify this # requirement, and until that version is released, we have to employ this # work-around. # Create empty Bokeh sources using Python. sources = utils.create_sources() # Create empty figures for the tool using Python. figures = utils.create_figures(rv_name=rv_name) # Create empty glyphs and attach them to the figures using Python. glyphs = utils.create_glyphs() utils.add_glyphs(sources=sources, figures=figures, glyphs=glyphs) # Create empty annotations and attach them to the figures using Python. annotations = utils.create_annotations(sources=sources) utils.add_annotations(figures=figures, annotations=annotations) # Create empty tool tips and attach them to the figures using Python. tooltips = utils.create_tooltips(figures=figures, rv_name=rv_name) utils.add_tooltips(figures=figures, tooltips=tooltips) # Create the widgets for the tool using Python. widgets = utils.create_widgets( rv_names=self.rv_names, rv_name=rv_name, bandwidth=bandwidth, bw_factor=bw_factor, ) # Create the view of the tool and serialize it into HTML using static resources # from Bokeh. Embedding the tool in this manner prevents external CDN calls for # JavaScript resources, and prevents the user from having to know where the # Bokeh server is. tool_view = utils.create_view(figures=figures, widgets=widgets) # Create callbacks for the tool using JavaScript. callback_js = f""" const rvName = widgets.rv_select.value; const rvData = data[rvName].flat(); let bw = 0.0; // Remove the CSS classes that dim the tool output on initial load. const toolTab = toolView.tabs[0]; const toolChildren = toolTab.child.children; const dimmedComponent = toolChildren[1]; dimmedComponent.css_classes = []; try {{ bw = marginal1d.update( rvData, rvName, bwFactor, hdiProbability, sources, figures, tooltips, ); }} catch (error) {{ {self.tool_js} bw = marginal1d.update( rvData, rvName, bwFactor, hdiProbability, sources, figures, tooltips, ); }} """ # Each widget requires the following dictionary for the CustomJS method. Notice # that the callback_js object above uses the names defined as keys in the below # object with values defined by the Python objects. callback_arguments = { "data": self.data, "widgets": widgets, "sources": sources, "figures": figures, "tooltips": tooltips, "toolView": tool_view, } # Each widget requires slightly different JS, except for the sliders. rv_select_js = f""" const bwFactor = 1.0; const hdiProbability = 0.89; widgets.bw_factor_slider.value = bwFactor; widgets.hdi_slider.value = 100 * hdiProbability; {callback_js}; widgets.bw_div.text = `Bandwidth: ${{bwFactor * bw}}`; figures.marginal.reset.emit(); """ slider_js = f""" const bwFactor = widgets.bw_factor_slider.value; const hdiProbability = widgets.hdi_slider.value / 100; {callback_js}; widgets.bw_div.text = `Bandwidth: ${{bwFactor * bw}}`; """ rv_select_callback = CustomJS(args=callback_arguments, code=rv_select_js) slider_callback = CustomJS(args=callback_arguments, code=slider_js) # Tell Python to use the JavaScript. widgets["rv_select"].js_on_change("value", rv_select_callback) widgets["bw_factor_slider"].js_on_change("value", slider_callback) widgets["hdi_slider"].js_on_change("value", slider_callback) return tool_view
beanmachine-main
src/beanmachine/ppl/diagnostics/tools/marginal1d/tool.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Plotting utilities for the diagnostic tools.""" from typing import List from bokeh.core.property.nullable import Nullable from bokeh.core.property.primitive import Null from bokeh.core.property.wrappers import PropertyValueList from bokeh.models.layouts import Column, Row from bokeh.models.renderers import GlyphRenderer from bokeh.models.tools import ProxyToolbar, ResetTool, SaveTool, ToolbarBox from bokeh.palettes import Colorblind from bokeh.plotting.figure import Figure def style_figure(figure: Figure) -> None: """ Style the given Bokeh `figure`. Args: figure (Figure): A Bokeh `Figure` object. Returns: None: Styles the given figure without copying. """ figure.grid.grid_line_alpha = 0.3 figure.grid.grid_line_color = "grey" figure.grid.grid_line_width = 0.3 figure.xaxis.minor_tick_line_color = "grey" figure.yaxis.minor_tick_line_color = "grey" def choose_palette(num_colors: int) -> List[str]: """ Determine which palette from Bokeh's Colorblind to use. Args: num_colors (int): The number of colors to use for the palette. Returns: List[str]: A list of colors to be used as the palette for a figure. """ palette_indices = [key for key in Colorblind.keys() if num_colors <= key] if not palette_indices: palette_index = max(Colorblind.keys()) else: palette_index = min(palette_indices) return Colorblind[palette_index] def create_toolbar(figures: List[Figure]) -> ToolbarBox: """ Create a single toolbar for the given list of figures. This method ignores all `HoverTool` entries in the final toolbar object. The rational for ignoring them is to prevent the final toolbar from having too much visual clutter. Args: figures (List[Figure]): A list of Bokeh `Figure` objects that all have their own toolbars that will be merged into one. Returns: ToolbarBox: The merged toolbar. """ toolbars = [] for figure in figures: toolbars.append(figure.toolbar) figure.toolbar_location = Nullable(Null)._default tools = [] for toolbar in toolbars: tools.extend(toolbar.tools) tools = [tool for tool in tools if type(tool).__name__ != "HoverTool"] if len(tools) == 0: tools = [SaveTool(), ResetTool()] return ToolbarBox( toolbar=ProxyToolbar(toolbars=toolbars, tools=tools), toolbar_location="right", ) def create_figure_grid(figures: List[Figure], num_figure_columns: int) -> Row: """ Similar to Bokeh's `grid_plot` method, except we merge toolbars in this method. Args: figures (List[Figure]): A list of Bokeh `Figure` objects. num_figure_columns (int): The number of columns for the grid. Returns: Row: Returns a single Bokeh `Row` object that contains all the given figures. """ toolbar = create_toolbar(figures) figure_rows = [] while len(figures): figs = figures[:num_figure_columns] for i, fig in enumerate(figs): if i != 0: fig.yaxis.axis_label = None figure_rows.append(figs) for fig in figs: figures.pop(figures.index(fig)) for i, figure_row in enumerate(figure_rows): if i != len(figure_rows) - 1: for fig in figure_row: fig.xaxis.axis_label = None figure_layout = [] for i in range(len(figure_rows)): figure_layout.append(Row(children=figure_rows[i])) return Row(children=[Column(children=figure_layout), toolbar]) def filter_renderers( figure: Figure, search: str, glyph_type: str = "GlyphRenderer", substring: bool = False, ) -> List[GlyphRenderer]: """ Find renderers in the given figure using the `search` string. Filters renderers from the given figure based on renderer names that match the given search parameters. Args: figure (Figure): A Bokeh `Figure` object. search (str): A string to filter renderer names with. glyph_type (:obj:`str`, optional): The type of renderer to search for in the figure. Default is `GlyphRenderer`. substring (:obj:`bool`, optional): Flag to indicate if the given `search` string should be used as a substring search. Default is `False`. Returns: List[GlyphRenderer]: A list of renderers that match the search parameters. """ output = [] renderers = PropertyValueList(figure.renderers) for renderer in renderers: if renderer.name is not None and type(renderer).__name__ == glyph_type: if substring and search in renderer.name: output.append(renderer) if not substring and renderer.name == search: output.append(renderer) return output
beanmachine-main
src/beanmachine/ppl/diagnostics/tools/utils/plotting_utils.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Accessor definition for extending Bean Machine `MonteCarloSamples` objects. These methods are heavily influenced by the implementations by pandas and xarray. - `pandas`: https://github.com/pandas-dev/pandas/blob/main/pandas/core/accessor.py - `xarray`: https://github.com/pydata/xarray/blob/main/xarray/core/extensions.py """ from __future__ import annotations import contextlib import warnings from typing import Callable from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples class CachedAccessor: """ A descriptor for caching accessors. Args: name (str): Namespace for the accessor. accessor (object): Class that defines the extension methods. Attributes: _name (str): Namespace for the accessor. _accessor (object): Class that defines the extension methods. Raises: RuntimeError: Returned if attempting to overwrite an existing accessor on the object. """ def __init__(self: CachedAccessor, name: str, accessor: object) -> None: self._name = name self._accessor = accessor def __get__(self: CachedAccessor, obj: object, cls: object) -> object: """ Method to retrieve the accessor namespace. Args: obj (object): Object that the accessor is attached to. cls (object): Needed for registering the accessor. Returns: object: The accessor object. """ # Accessing an attribute of the class. if obj is None: return self._accessor try: cache = obj._cache # type: ignore except AttributeError: cache = obj._cache = {} try: return cache[self._name] except KeyError: contextlib.suppress(KeyError) try: accessor_obj = self._accessor(obj) # type: ignore except Exception as error: msg = f"error initializing {self._name!r} accessor." raise RuntimeError(msg) from error cache[self._name] = accessor_obj return accessor_obj # noqa: R504 (unnecessary variable assignment) def _register_accessor(name: str, cls: object) -> Callable: """ Method used for registering an accessor to a given object. Args: name (str): The name for the accessor. cls (object): The object the accessor should be attached to. Returns: Callable: A decorator for creating accessors. Raises: RuntimeError: Returned if attempting to overwrite an existing accessor on the object. """ def decorator(accessor: object) -> object: if hasattr(cls, name): warnings.warn( f"registration of accessor {repr(accessor)} under name " f"{repr(name)} for type {repr(cls)} is overriding a preexisting " f"attribute with the same name.", UserWarning, stacklevel=2, ) setattr(cls, name, CachedAccessor(name, accessor)) return accessor return decorator def register_mcs_accessor(name: str) -> Callable: """ Register an accessor object for `MonteCarloSamples` objects. Args: name (str): The name for the accessor. Returns: Callable: A decorator for creating the `MonteCarloSamples` accessor. Raises: RuntimeError: Returned if attempting to overwrite an existing accessor on the object. Example: >>> from __future__ import annotations >>> from typing import Dict, List >>> >>> import beanmachine.ppl as bm >>> import numpy as np >>> import torch.distributions as dist >>> from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples >>> from beanmachine.ppl.diagnostics.tools.utils import accessor >>> from torch import tensor >>> >>> @bm.random_variable >>> def alpha(): >>> return dist.Normal(0, 1) >>> >>> @bm.random_variable >>> def beta(): >>> return dist.Normal(0, 1) >>> >>> @accessor.register_mcs_accessor("magic") >>> class MagicAccessor: >>> def __init__(self: MagicAccessor, mcs: MonteCarloSamples) -> None: >>> self.mcs = mcs >>> def show_me(self: MagicAccessor) -> Dict[str, List[List[float]]]: >>> # Return a JSON serializable object from a MonteCarloSamples object. >>> return dict( >>> sorted( >>> { >>> str(key): value.tolist() >>> for key, value in self.mcs.items() >>> }.items(), >>> key=lambda item: item[0], >>> ), >>> ) >>> >>> chain_results = { >>> beta(): tensor([4, 3], [2, 1]), >>> alpha(): tensor([[1, 2], [3, 4]]), >>> } >>> samples = MonteCarloSamples(chain_results=chain_results) >>> samples.magic.show_me() {'alpha()': [[1, 2], [3, 4]], 'beta()': [[4, 3], [2, 1]]} """ return _register_accessor(name, MonteCarloSamples)
beanmachine-main
src/beanmachine/ppl/diagnostics/tools/utils/accessor.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Base class for diagnostic tools of a Bean Machine model.""" from __future__ import annotations import re from abc import ABC, abstractmethod from typing import Any, Mapping from beanmachine.ppl.diagnostics.tools import JS_DIST_DIR from beanmachine.ppl.diagnostics.tools.utils import plotting_utils from beanmachine.ppl.diagnostics.tools.utils.model_serializers import serialize_bm from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples from bokeh.embed import file_html, json_item from bokeh.models import Model from bokeh.resources import INLINE class DiagnosticToolBaseClass(ABC): """ Base class for visual diagnostic tools. Args: mcs (MonteCarloSamples): The return object from running a Bean Machine model. Attributes: data (Dict[str, List[List[float]]]): JSON serializable representation of the given `mcs` object. rv_names (List[str]): The list of random variables string names for the given model. num_chains (int): The number of chains of the model. num_draws (int): The number of draws of the model for each chain. palette (List[str]): A list of color values used for the glyphs in the figures. The colors are specifically chosen from the Colorblind palette defined in Bokeh. tool_js (str):The JavaScript callbacks needed to render the Bokeh tool independently from a Python server. """ @abstractmethod def __init__(self: DiagnosticToolBaseClass, mcs: MonteCarloSamples) -> None: self.data = serialize_bm(mcs) self.rv_names = ["Select a random variable..."] + list(self.data.keys()) self.num_chains = mcs.num_chains self.num_draws = mcs.get_num_samples() self.palette = plotting_utils.choose_palette(self.num_chains) self.tool_js = self.load_tool_js() def load_tool_js(self: DiagnosticToolBaseClass) -> str: """ Load the JavaScript for the diagnostic tool. Tools must be built by `yarn` in order for this method to find the appropriate file. If no file is found, then the tools will not function, and an error will be shown to the user. Returns: str: A string containing all the JavaScript necessary to run the tool in a notebook. Raises: FileNotFoundError: Raised if the diagnostic tool has not been built by `yarn` prior to its use. """ name = self.__class__.__name__ name_tokens = re.findall(r"[A-Z][^A-Z]*", name) name = "_".join(name_tokens) path = JS_DIST_DIR.joinpath(f"{name.lower()}.js") with path.open() as f: tool_js = f.read() return tool_js def show(self: DiagnosticToolBaseClass) -> None: """ Show the diagnostic tool in the notebook. This method uses IPython's `display` and `HTML` methods in order to display the diagnostic tool in a notebook. The Bokeh `Model` object returned by the `create_document` method is converted to HTML using Bokeh's `file_html` method. The advantage of encapsulating the tool in this manner is that it allows all the JavaScript needed to render the tool to exist in the output cell of the notebook. Doing so will allow the Bokeh Application to render in Google's Colab or Meta's Bento notebooks, which do not allow calls to third party JavaScript to be loaded and executed. The disadvantage is that it embeds duplicate JavaScript if more than one tool is used in a notebook. """ # import Ipython only when we need to render the plot, so that we don't # need to have jupyter notebook as one of our dependencies from IPython.display import display, HTML doc = self.create_document() html = file_html(doc, resources=INLINE, template=self.html_template()) display(HTML(html)) def html_template(self: DiagnosticToolBaseClass) -> str: """ HTML template object used to inject CSS styles for Bokeh Applications. We inject CSS into the output for the diagnostic tools because we need users to interact with the tool before it renders any statistics. The reason for this is due to the lack of a callback between Bokeh and BokehJS for an "on load" event. The necessary callback for an "on load" event is being worked on for the Bokeh 3.0 release. Until Bokeh 3.0 is released, this is a work-around that makes the user interact with the tool before any rendering occurs. Returns: str: Template for injecting CSS in the HTML returned by `create_document`. """ return """ {% block postamble %} <style> .bk.bm-tool-loading { overflow: hidden; } .bk.bm-tool-loading:before { position: absolute; height: 100%; width: 100%; content: ''; z-index: 1000; background-color: rgb(255, 255, 255, 0.75); border-color: lightgray; background-repeat: no-repeat; background-position: center; background-size: auto 50%; border-width: 1px; cursor: progress|; } .bk.bm-tool-loading.arcs:hover:before { content: "Please select a Query from the Select menu above."; font: x-large Arial, sans-serif; color: black; cursor: progress; display: flex; justify-content: center; align-items: center; } </style> {% endblock %} """ @abstractmethod def create_document(self: DiagnosticToolBaseClass) -> Model: """To be implemented by the inheriting class.""" ... def _tool_json(self: DiagnosticToolBaseClass) -> Mapping[Any, Any]: """ Debugging method used primarily when creating a new diagnostic tool. Returns: Dict[Any, Any]: Creates a JSON serializable object using Bokeh's `json_item` method and the output from `create_document`. """ doc = self.create_document() json_data = json_item(doc) return json_data
beanmachine-main
src/beanmachine/ppl/diagnostics/tools/utils/diagnostic_tool_base.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
beanmachine-main
src/beanmachine/ppl/diagnostics/tools/utils/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Collection of serializers for the diagnostics tool use.""" from typing import Dict, List from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples def serialize_bm(samples: MonteCarloSamples) -> Dict[str, List[List[float]]]: """ Convert Bean Machine models to a JSON serializable object. Args: samples (MonteCarloSamples): Output of a model from Bean Machine. Returns Dict[str, List[List[float]]]: The JSON serializable object for use in the diagnostics tools. """ rv_identifiers = list(samples.keys()) reshaped_data = {} for rv_identifier in rv_identifiers: rv_data = samples[rv_identifier] rv_shape = rv_data.shape num_rv_chains = rv_shape[0] reshaped_data[f"{str(rv_identifier)}"] = [] for rv_chain in range(num_rv_chains): chain_data = rv_data[rv_chain, :] chain_shape = chain_data.shape if len(chain_shape) > 3 and 1 not in list(chain_shape): msg = ( "Unable to handle data with dimensionality larger than " "mxnxkx1." ) raise ValueError(msg) elif len(chain_shape) == 3 and 1 in list(chain_shape): if chain_shape[1] == 1 in list(chain_shape): reshape_dimensions = chain_shape[2] else: reshape_dimensions = chain_shape[1] for i, reshape_dimension in enumerate(range(reshape_dimensions)): data = rv_data[rv_chain, :, reshape_dimension].reshape(-1) if f"{str(rv_identifier)}[{i}]" not in reshaped_data: reshaped_data[f"{str(rv_identifier)}[{i}]"] = [] reshaped_data[f"{str(rv_identifier)}[{i}]"].append(data.tolist()) elif len(chain_shape) == 1: reshaped_data[f"{str(rv_identifier)}"].append( rv_data[rv_chain, :].tolist(), ) model = dict(sorted(reshaped_data.items(), key=lambda item: item[0])) return model
beanmachine-main
src/beanmachine/ppl/diagnostics/tools/utils/model_serializers.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from beanmachine.ppl.inference.proposer.single_site_uniform_proposer import ( SingleSiteUniformProposer, ) from beanmachine.ppl.inference.single_site_inference import SingleSiteInference class SingleSiteUniformMetropolisHastings(SingleSiteInference): """ Single site uniform Metropolis-Hastings. This single site algorithm proposes from a uniform distribution (uniform Categorical for discrete variables). """ def __init__(self): super().__init__(SingleSiteUniformProposer)
beanmachine-main
src/beanmachine/ppl/inference/single_site_uniform_mh.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import ( SingleSiteAncestralProposer, ) from beanmachine.ppl.inference.single_site_inference import SingleSiteInference class SingleSiteAncestralMetropolisHastings(SingleSiteInference): def __init__(self): super().__init__(SingleSiteAncestralProposer)
beanmachine-main
src/beanmachine/ppl/inference/single_site_ancestral_mh.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional, Union import torch from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples from beanmachine.ppl.inference.single_site_ancestral_mh import ( SingleSiteAncestralMetropolisHastings, ) from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.world import init_from_prior, RVDict, World from torch import Tensor from torch.distributions import Categorical from tqdm.auto import trange def _concat_rv_dicts(rvdict: List) -> Dict: out_dict = {} keys = list(rvdict[0].keys()) for k in keys: t = [] for x in rvdict: t.append(x[k]) out_dict[k] = torch.cat(t, -1).squeeze(0) return out_dict class Predictive(object): """ Class for the posterior predictive distribution. """ @staticmethod def _extract_values_from_world( world: World, queries: List[RVIdentifier] ) -> Dict[RVIdentifier, Tensor]: query_dict = {query: [] for query in queries} # Extract samples for query in queries: raw_val = world.call(query) if not isinstance(raw_val, torch.Tensor): raise TypeError( "The value returned by a queried function must be a tensor." ) query_dict[query].append(raw_val) query_dict = {node: torch.stack(val) for node, val in query_dict.items()} return query_dict @staticmethod # noqa: C901 def simulate( # noqa: C901 queries: List[RVIdentifier], posterior: Optional[Union[MonteCarloSamples, RVDict]] = None, num_samples: Optional[int] = None, vectorized: Optional[bool] = False, progress_bar: Optional[bool] = True, ) -> MonteCarloSamples: """ Generates predictives from a generative model. For example:: obs_queries = [likelihood(i) for i in range(10))] posterior = SinglesiteHamiltonianMonteCarlo(10, 0.1).infer(...) # generates one sample per world (same shape as `posterior` samples) predictives = simulate(obs_queries, posterior=posterior) To generate prior predictives:: queries = [prior(), likelihood()] # specify the full generative model # Monte carlo samples of shape (num_samples, sample_shape) predictives = simulate(queries, num_samples=1000) :param query: list of `random_variable`'s corresponding to the observations. :param posterior: Optional `MonteCarloSamples` or `RVDict` of the latent variables. :param num_samples: Number of prior predictive samples, defaults to 1. Should not be specified if `posterior` is specified. :returns: `MonteCarloSamples` of the generated predictives. """ assert ( (posterior is not None) + (num_samples is not None) ) == 1, "Only one of posterior or num_samples should be set." inference = SingleSiteAncestralMetropolisHastings() if posterior is not None: if isinstance(posterior, dict): posterior = MonteCarloSamples([posterior]) obs = dict(posterior) if vectorized: sampler = inference.sampler( queries, obs, num_samples, initialize_fn=init_from_prior ) query_dict = Predictive._extract_values_from_world( next(sampler), queries ) for rvid, rv in query_dict.items(): if rv.dim() > 2: query_dict[rvid] = rv.squeeze(0) post_pred = MonteCarloSamples( query_dict, default_namespace="posterior_predictive", ) post_pred.add_groups(posterior) return post_pred else: # predictives are sequentially sampled preds = [] for c in range(posterior.num_chains): rv_dicts = [] for i in trange( posterior.get_num_samples(), desc="Samples collected", disable=not progress_bar, ): obs = {rv: posterior.get_chain(c)[rv][i] for rv in posterior} sampler = inference.sampler( queries, obs, num_samples, initialize_fn=init_from_prior ) rv_dicts.append( Predictive._extract_values_from_world( next(sampler), queries ) ) preds.append(_concat_rv_dicts(rv_dicts)) post_pred = MonteCarloSamples( preds, default_namespace="posterior_predictive", ) post_pred.add_groups(posterior) return post_pred else: obs = {} predictives = [] for _ in trange( # pyre-fixme[6]: For 1st param expected `int` but got `Optional[int]`. num_samples, desc="Samples collected", disable=not progress_bar, ): sampler = inference.sampler( queries, obs, num_samples, initialize_fn=init_from_prior ) query_dict = Predictive._extract_values_from_world( next(sampler), queries ) predictives.append(query_dict) rv_dict = {} for k in predictives: for rvid, rv in k.items(): if rvid not in rv_dict: rv_dict[rvid] = [] if rv.dim() < 2: rv = rv.unsqueeze(0) rv_dict[rvid].append(rv) for k, v in rv_dict.items(): rv_dict[k] = torch.cat(v, dim=1) prior_pred = MonteCarloSamples( rv_dict, default_namespace="prior_predictive", ) return prior_pred @staticmethod def empirical( queries: List[RVIdentifier], samples: MonteCarloSamples, num_samples: Optional[int] = 1, ) -> MonteCarloSamples: """ Samples from the empirical (marginal) distribution of the queried variables. :param queries: list of `random_variable`'s to be sampled. :param samples: `MonteCarloSamples` of the distribution. :param num_samples: Number of samples to sample (with replacement). Defaults to 1. :returns: `MonteCarloSamples` object containing the sampled random variables. """ rv_dict = {} num_chains = samples.num_chains total_num_samples = samples.get_num_samples() chain_indices = Categorical(torch.ones(num_chains)).sample((num_samples,)) sample_indices = Categorical(torch.ones(total_num_samples)).sample( (num_samples,) ) for q in queries: rv_dict[q] = samples.get_variable(q, include_adapt_steps=False)[ chain_indices, sample_indices ] return MonteCarloSamples([rv_dict]) simulate = Predictive.simulate empirical = Predictive.empirical
beanmachine-main
src/beanmachine/ppl/inference/predictive.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import inspect from collections import defaultdict from typing import ( Callable, cast, Dict, List, Optional, Set, Tuple, TYPE_CHECKING, Union, ) from beanmachine.ppl.experimental.torch_jit_backend import get_backend from beanmachine.ppl.inference.base_inference import BaseInference from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer from beanmachine.ppl.inference.proposer.nuts_proposer import NUTSProposer from beanmachine.ppl.inference.proposer.sequential_proposer import SequentialProposer from beanmachine.ppl.inference.proposer.single_site_uniform_proposer import ( SingleSiteUniformProposer, ) from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.world import World if TYPE_CHECKING: from enum import Enum class EllipsisClass(Enum): Ellipsis = "..." def __iter__(self): pass Ellipsis: EllipsisClass = EllipsisClass.Ellipsis else: EllipsisClass = type(Ellipsis) class _DefaultInference(BaseInference): """ Mixed inference class that handles both discrete and continuous RVs """ def __init__(self, nnc_compile: bool = True): self._disc_proposers = {} self._cont_proposer = None self._continuous_rvs = set() self._jit_backend = get_backend(nnc_compile, False) def get_proposers( self, world: World, target_rvs: Set[RVIdentifier], num_adaptive_sample: int, ) -> List[BaseProposer]: proposers = [] for node in target_rvs: if node not in self._disc_proposers: support = world.get_variable(node).distribution.support if not support.is_discrete: self._continuous_rvs.add(node) continue else: self._disc_proposers[node] = SingleSiteUniformProposer(node) proposers.append(self._disc_proposers[node]) if self._cont_proposer is not None: if len(self._cont_proposer._target_rvs) != len(self._continuous_rvs): raise ValueError( "Graph has changed between iterations. NUTS requires a" " static model." ) proposers.append(self._cont_proposer) else: if len(self._continuous_rvs): continuous_proposer = NUTSProposer( world, self._continuous_rvs, num_adaptive_sample, jit_backend=self._jit_backend, ) self._cont_proposer = continuous_proposer proposers.append(self._cont_proposer) return proposers def _get_rv_family(rv_wrapper: Callable) -> Callable: """A helper function that return the unbounded function for a give random variable wrapper""" if inspect.ismethod(rv_wrapper): # For methods, we'll need to use the unbounded function instead of the # bounded method to determine which proposer to apply return cast(Callable, rv_wrapper.__func__) else: return rv_wrapper def _get_nodes_for_rv_family( rv_families: Union[Callable, Tuple[Callable, ...]], rv_family_to_node: Dict[Callable, Set[RVIdentifier]], ) -> Set[RVIdentifier]: """A helper function that returns a list of nodes that belong to a particular RV family (or a particular tuple of RV families)""" # collect all nodes that belong to rv_families families = {rv_families} if isinstance(rv_families, Callable) else set(rv_families) nodes = set().union(*(rv_family_to_node.get(family, set()) for family in families)) return nodes class CompositionalInference(BaseInference): """ The ``CompositionalInference`` class enables combining multiple inference algorithms and blocking random variables together. By default, continuous variables will be blocked together and use the ``GlobalNoUTurnProposer``. Discrete variables will be proposed independently with ``SingleSiteUniformProposer``. To override the default behavior, you can pass an ``inference_dict``. To learn more about Compositional Inference, please see the `Compositional Inference <https://beanmachine.org/docs/compositional_inference/>`_ page on our website. Example 0 (use different inference method for different random variable families):: CompositionalInference({ model.foo: bm.SingleSiteAncestralMetropolisHastings(), model.bar: bm.SingleSiteNewtonianMonteCarlo(), }) Example 1 (override default inference method):: CompositionalInference({...: bm.SingleSiteAncestralMetropolisHastings()}) Example 2 (block inference (jointly propose) ``model.foo`` and ``model.bar``):: CompositionalInference({(model.foo, model.bar): bm.GlobalNoUTurnSampler()}) .. warning:: When using the default inference behavior, graphs (i.e. the number of latent variables) must be static and cannot change between iterations. Args: inference_dict: an optional inference configuration as shown above. nnc_compile: where available, use NNC to compile proposers. """ def __init__( self, inference_dict: Optional[ Dict[ Union[Callable, Tuple[Callable, ...], EllipsisClass], Union[BaseInference, Tuple[BaseInference, ...], EllipsisClass], ] ] = None, nnc_compile: bool = True, ): self.config: Dict[Union[Callable, Tuple[Callable, ...]], BaseInference] = {} # create a set for the RV families that are being covered in the config; this is # useful in get_proposers to determine which RV needs to be handle by the # default inference method self._covered_rv_families = set() default_inference = _DefaultInference(nnc_compile=nnc_compile) if inference_dict is not None: default_inference = inference_dict.pop(Ellipsis, default_inference) assert isinstance(default_inference, BaseInference) # preprocess inference dict for rv_families, inference in inference_dict.items(): # parse key if isinstance(rv_families, Callable): config_key = _get_rv_family(rv_families) self._covered_rv_families.add(config_key) else: # key is a tuple/block of families config_key = tuple(map(_get_rv_family, rv_families)) self._covered_rv_families.update(config_key) # parse value if isinstance(inference, BaseInference): config_val = inference elif inference == Ellipsis: config_val = default_inference else: # value is a tuple of inferences assert isinstance(inference, tuple) # there should be a one to one relationship between key and value assert isinstance(config_key, tuple) and len(config_key) == len( inference ) # convert to an equivalent nested compositional inference config_val = CompositionalInference( { rv_family: algorithm for rv_family, algorithm in zip(config_key, inference) } ) self.config[config_key] = config_val self._default_inference = default_inference def _get_default_num_adaptive_samples(self, num_samples: int) -> int: """Returns the default number of adaptive samples for CompositionalInference, which equals to the maximum number of adaptive samples recommended by each algorithm in the inference config.""" num_adaptive_per_algorithm = [ self._default_inference._get_default_num_adaptive_samples(num_samples) ] for inference in self.config.values(): num_adaptive_per_algorithm.append( inference._get_default_num_adaptive_samples(num_samples) ) return max(num_adaptive_per_algorithm) def get_proposers( self, world: World, target_rvs: Set[RVIdentifier], num_adaptive_sample: int, ) -> List[BaseProposer]: # create a RV family to RVIdentifier lookup map rv_family_to_node = defaultdict(set) for node in target_rvs: rv_family_to_node[node.wrapper].add(node) all_proposers = [] for target_families, inference in self.config.items(): nodes = _get_nodes_for_rv_family(target_families, rv_family_to_node) if len(nodes) > 0: proposers = inference.get_proposers(world, nodes, num_adaptive_sample) if isinstance(target_families, tuple): # tuple of RVs == block into a single accept/reject step proposers = [SequentialProposer(proposers)] all_proposers.extend(proposers) # apply default proposers on nodes whose family are not covered by any of the # proposers listed in the config remaining_families = rv_family_to_node.keys() - self._covered_rv_families remaining_nodes = _get_nodes_for_rv_family( tuple(remaining_families), rv_family_to_node ) if len(remaining_nodes) > 0: proposers = self._default_inference.get_proposers( world, remaining_nodes, num_adaptive_sample ) all_proposers.extend(proposers) return all_proposers
beanmachine-main
src/beanmachine/ppl/inference/compositional_infer.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import copy import warnings from abc import ABCMeta, abstractmethod from functools import partial from typing import List, Optional, Set, Tuple import torch from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer from beanmachine.ppl.inference.sampler import Sampler from beanmachine.ppl.inference.utils import ( _execute_in_new_thread, _verify_queries_and_observations, seed as set_seed, VerboseLevel, ) from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.world import init_to_uniform, InitializeFn, RVDict, World from torch import multiprocessing as mp from tqdm.auto import tqdm from tqdm.notebook import tqdm as notebook_tqdm from typing_extensions import Literal class BaseInference(metaclass=ABCMeta): """ Abstract class all inference methods should inherit from. """ # maximum value of a seed _MAX_SEED_VAL: int = 2**32 - 1 @abstractmethod def get_proposers( self, world: World, target_rvs: Set[RVIdentifier], num_adaptive_sample: int, ) -> List[BaseProposer]: """ Returns the proposer(s) corresponding to every non-observed variable in target_rvs. Should be implemented by the specific inference algorithm. """ raise NotImplementedError def _get_default_num_adaptive_samples(self, num_samples: int) -> int: """ Returns a reasonable default number of adaptive samples for the algorithm. """ return 0 def _single_chain_infer( self, queries: List[RVIdentifier], observations: RVDict, num_samples: int, num_adaptive_samples: int, show_progress_bar: bool, initialize_fn: InitializeFn, max_init_retries: int, chain_id: int, seed: Optional[int] = None, ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: """ Run a single chain of inference. Return a list of samples (in the same order as the queries) and a list of log likelihood on observations Args: queries: A list of queries. observations: A dictionary of observations. num_samples: Number of samples. num_adaptive_samples: Number of adaptive samples. show_progress_bar: Whether to display the progress bar. initialize_fn: A callable that takes in a distribution and returns a Tensor. max_init_retries: The number of attempts to make to initialize values for an inference before throwing an error. chain_id: The index of the current chain. seed: If provided, the seed will be used to initialize the state of the random number generators for the current chain """ if seed is not None: set_seed(seed) # A hack to fix the issue where tqdm doesn't render progress bar correctly in # subprocess in Jupyter notebook (https://github.com/tqdm/tqdm/issues/485) if show_progress_bar and issubclass(tqdm, notebook_tqdm): print(" ", end="", flush=True) sampler = self.sampler( queries, observations, num_samples, num_adaptive_samples, initialize_fn, max_init_retries, ) samples = [[] for _ in queries] log_likelihoods = [[] for _ in observations] # Main inference loop for world in tqdm( sampler, total=num_samples + num_adaptive_samples, desc="Samples collected", disable=not show_progress_bar, position=chain_id, ): for idx, obs in enumerate(observations): log_likelihoods[idx].append(world.log_prob([obs])) # Extract samples for idx, query in enumerate(queries): raw_val = world.call(query) if not isinstance(raw_val, torch.Tensor): raise TypeError( "The value returned by a queried function must be a tensor." ) samples[idx].append(raw_val) samples = [torch.stack(val) for val in samples] log_likelihoods = [torch.stack(val) for val in log_likelihoods] return samples, log_likelihoods def infer( self, queries: List[RVIdentifier], observations: RVDict, num_samples: int, num_chains: int = 4, num_adaptive_samples: Optional[int] = None, show_progress_bar: bool = True, initialize_fn: InitializeFn = init_to_uniform, max_init_retries: int = 100, run_in_parallel: bool = False, mp_context: Optional[Literal["fork", "spawn", "forkserver"]] = None, verbose: Optional[VerboseLevel] = None, ) -> MonteCarloSamples: """ Performs inference and returns a ``MonteCarloSamples`` object with samples from the posterior. Args: queries: List of queries observations: Observations as an RVDict keyed by RVIdentifier num_samples: Number of samples. num_chains: Number of chains to run, defaults to 4. num_adaptive_samples: Number of adaptive samples. If not provided, BM will fall back to algorithm-specific default value based on num_samples. show_progress_bar: Whether to display the progress bar, defaults to True. initialize_fn: A callable that takes in a distribution and returns a Tensor. The default behavior is to sample from Uniform(-2, 2) then biject to the support of the distribution. max_init_retries: The number of attempts to make to initialize values for an inference before throwing an error (default to 100). run_in_parallel: Whether to run multiple chains in parallel (with multiple processes). mp_context: The `multiprocessing context <https://docs.python.org/3.8/library/multiprocessing.html#contexts-and-start-methods>`_ to used for parallel inference. verbose: (Deprecated) Whether to display the progress bar. This option is deprecated, please use ``show_progress_bar`` instead. """ if verbose is not None: warnings.warn( "The `verbose` argument and `VerboseLevel` are " "deprecated and will be removed in the next release of Bean " "Machine. Please use `show_progress_bar` instead.", DeprecationWarning, stacklevel=2, # show the caller rather than this line ) show_progress_bar = bool(verbose) _verify_queries_and_observations( queries, observations, observations_must_be_rv=True ) if num_adaptive_samples is None: num_adaptive_samples = self._get_default_num_adaptive_samples(num_samples) single_chain_infer = partial( self._single_chain_infer, queries, observations, num_samples, num_adaptive_samples, show_progress_bar, initialize_fn, max_init_retries, ) if not run_in_parallel: chain_results = map(single_chain_infer, range(num_chains)) else: ctx = mp.get_context(mp_context) # We'd like to explicitly set a different seed for each process to avoid # duplicating the same RNG state for all chains first_seed = torch.randint(self._MAX_SEED_VAL, ()).item() seeds = [ (first_seed + 31 * chain_id) % self._MAX_SEED_VAL for chain_id in range(num_chains) ] # run single chain inference in a new thread in subprocesses to avoid # forking corrupted internal states # (https://github.com/pytorch/pytorch/issues/17199) single_chain_infer = partial(_execute_in_new_thread, single_chain_infer) with ctx.Pool( processes=num_chains, initializer=tqdm.set_lock, initargs=(ctx.Lock(),) ) as p: chain_results = p.starmap(single_chain_infer, enumerate(seeds)) all_samples, all_log_liklihoods = zip(*chain_results) # the hash of RVIdentifier can change when it is being sent to another process, # so we have to rely on the order of the returned list to determine which samples # correspond to which RVIdentifier all_samples = [dict(zip(queries, samples)) for samples in all_samples] # in python the order of keys in a dict is fixed, so we can rely on it all_log_liklihoods = [ dict(zip(observations.keys(), log_likelihoods)) for log_likelihoods in all_log_liklihoods ] return MonteCarloSamples( all_samples, num_adaptive_samples, all_log_liklihoods, observations, ) def sampler( self, queries: List[RVIdentifier], observations: RVDict, num_samples: Optional[int] = None, num_adaptive_samples: Optional[int] = None, initialize_fn: InitializeFn = init_to_uniform, max_init_retries: int = 100, ) -> Sampler: """ Returns a generator that returns a new world (represents a new state of the graph) each time it is iterated. If num_samples is not provided, this method will return an infinite generator. Args: queries: List of queries observations: Observations as an RVDict keyed by RVIdentifier num_samples: Number of samples, defaults to None for an infinite sampler. num_adaptive_samples: Number of adaptive samples. If not provided, BM will fall back to algorithm-specific default value based on num_samples. If num_samples is not provided either, then defaults to 0. initialize_fn: A callable that takes in a distribution and returns a Tensor. The default behavior is to sample from Uniform(-2, 2) then biject to the support of the distribution. max_init_retries: The number of attempts to make to initialize values for an inference before throwing an error (default to 100). """ _verify_queries_and_observations( queries, observations, observations_must_be_rv=True ) if num_adaptive_samples is None: if num_samples is None: num_adaptive_samples = 0 else: num_adaptive_samples = self._get_default_num_adaptive_samples( num_samples ) world = World.initialize_world( queries, observations, initialize_fn, max_init_retries, ) # start inference with a copy of self to ensure that multi-chain or multi # inference runs all start with the same pristine state kernel = copy.deepcopy(self) sampler = Sampler(kernel, world, num_samples, num_adaptive_samples) return sampler
beanmachine-main
src/beanmachine/ppl/inference/base_inference.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import List, Set import torch.distributions as dist from beanmachine.ppl.inference.base_inference import BaseInference from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer from beanmachine.ppl.inference.proposer.nmc import ( SingleSiteHalfSpaceNMCProposer, SingleSiteRealSpaceNMCProposer, SingleSiteSimplexSpaceNMCProposer, ) from beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import ( SingleSiteAncestralProposer, ) from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.world import World from beanmachine.ppl.world.utils import BetaDimensionTransform, is_constraint_eq LOGGER = logging.getLogger("beanmachine") class SingleSiteNewtonianMonteCarlo(BaseInference): """ Single site Newtonian Monte Carlo [1]. This algorithm selects a proposer based on the support of the random variable. Valid supports include real, positive real, and simplex. Each site is proposed independently. [1] Arora, Nim, et al. `Newtonian Monte Carlo: single-site MCMC meets second-order gradient methods` Args: real_space_alpha: alpha value for real space as specified in [1], defaults to 10.0 real_space_beta: beta value for real space as specified in [1], defaults to 1.0 """ def __init__( self, real_space_alpha: float = 10.0, real_space_beta: float = 1.0, ): self._proposers = {} self.alpha = real_space_alpha self.beta = real_space_beta def get_proposers( self, world: World, target_rvs: Set[RVIdentifier], num_adaptive_sample: int, ) -> List[BaseProposer]: proposers = [] for node in target_rvs: if node not in self._proposers: self._proposers[node] = self._init_nmc_proposer(node, world) proposers.append(self._proposers[node]) return proposers def _init_nmc_proposer(self, node: RVIdentifier, world: World) -> BaseProposer: """ A helper function that initialize a NMC proposer for the given node. The type of NMC proposer will be chosen based on a node's support. """ distribution = world.get_variable(node).distribution support = distribution.support if is_constraint_eq(support, dist.constraints.real): return SingleSiteRealSpaceNMCProposer(node, self.alpha, self.beta) elif any( is_constraint_eq( support, (dist.constraints.greater_than, dist.constraints.greater_than_eq), ) ): return SingleSiteHalfSpaceNMCProposer(node) elif is_constraint_eq(support, dist.constraints.simplex) or ( isinstance(support, dist.constraints.independent) and (support.base_constraint == dist.constraints.unit_interval) ): return SingleSiteSimplexSpaceNMCProposer(node) elif isinstance(distribution, dist.Beta): return SingleSiteSimplexSpaceNMCProposer( node, transform=BetaDimensionTransform() ) else: LOGGER.warning( f"Node {node} has unsupported constraints. " + "Proposer falls back to SingleSiteAncestralProposer.\n" ) return SingleSiteAncestralProposer(node)
beanmachine-main
src/beanmachine/ppl/inference/single_site_nmc.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from beanmachine.ppl.inference.bmg_inference import BMGInference from beanmachine.ppl.inference.compositional_infer import CompositionalInference from beanmachine.ppl.inference.hmc_inference import ( GlobalHamiltonianMonteCarlo, SingleSiteHamiltonianMonteCarlo, ) from beanmachine.ppl.inference.nuts_inference import ( GlobalNoUTurnSampler, SingleSiteNoUTurnSampler, ) from beanmachine.ppl.inference.predictive import empirical, simulate from beanmachine.ppl.inference.single_site_ancestral_mh import ( SingleSiteAncestralMetropolisHastings, ) from beanmachine.ppl.inference.single_site_nmc import SingleSiteNewtonianMonteCarlo from beanmachine.ppl.inference.single_site_random_walk import SingleSiteRandomWalk from beanmachine.ppl.inference.single_site_uniform_mh import ( SingleSiteUniformMetropolisHastings, ) from beanmachine.ppl.inference.utils import seed, VerboseLevel __all__ = [ "BMGInference", "CompositionalInference", "GlobalHamiltonianMonteCarlo", "GlobalNoUTurnSampler", "SingleSiteAncestralMetropolisHastings", "SingleSiteHamiltonianMonteCarlo", "SingleSiteNewtonianMonteCarlo", "SingleSiteNoUTurnSampler", "SingleSiteRandomWalk", "SingleSiteUniformMetropolisHastings", "VerboseLevel", "empirical", "seed", "simulate", ]
beanmachine-main
src/beanmachine/ppl/inference/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Set from beanmachine.ppl.experimental.torch_jit_backend import get_backend from beanmachine.ppl.inference.base_inference import BaseInference from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer from beanmachine.ppl.inference.proposer.hmc_proposer import HMCProposer from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.world import World class GlobalHamiltonianMonteCarlo(BaseInference): """ Global (multi-site) Hamiltonian Monte Carlo [1] sampler. This global sampler blocks all of the target random_variables in the World together and proposes them jointly. [1] Neal, Radford. `MCMC Using Hamiltonian Dynamics`. Args: trajectory_length (float): Length of single trajectory. initial_step_size (float): Defaults to 1.0. adapt_step_size (bool): Whether to adapt the step size, Defaults to True, adapt_mass_matrix (bool): Whether to adapt the mass matrix. Defaults to True, target_accept_prob (float): Target accept prob. Increasing this value would lead to smaller step size. Defaults to 0.8. nnc_compile: If True, NNC compiler will be used to accelerate the inference. experimental_inductor_compile: If True, TorchInductor will be used to accelerate the inference. """ def __init__( self, trajectory_length: float, initial_step_size: float = 1.0, adapt_step_size: bool = True, adapt_mass_matrix: bool = True, full_mass_matrix: bool = False, target_accept_prob: float = 0.8, nnc_compile: bool = False, experimental_inductor_compile: bool = False, ): self.trajectory_length = trajectory_length self.initial_step_size = initial_step_size self.adapt_step_size = adapt_step_size self.adapt_mass_matrix = adapt_mass_matrix self.full_mass_matrix = full_mass_matrix self.target_accept_prob = target_accept_prob self.jit_backend = get_backend(nnc_compile, experimental_inductor_compile) self._proposer = None def _get_default_num_adaptive_samples(self, num_samples: int) -> int: return num_samples // 2 def get_proposers( self, world: World, target_rvs: Set[RVIdentifier], num_adaptive_sample: int, ) -> List[BaseProposer]: if self._proposer is None: self._proposer = HMCProposer( world, target_rvs, num_adaptive_sample, self.trajectory_length, self.initial_step_size, self.adapt_step_size, self.adapt_mass_matrix, self.full_mass_matrix, self.target_accept_prob, self.jit_backend, ) return [self._proposer] class SingleSiteHamiltonianMonteCarlo(BaseInference): """ Single site Hamiltonian Monte Carlo [1] sampler. During inference, each random variable is proposed through its own leapfrog trajectory while fixing the rest of World as constant. [1] Neal, Radford. `MCMC Using Hamiltonian Dynamics`. Args: trajectory_length (float): Length of single trajectory. initial_step_size (float): Defaults to 1.0. adapt_step_size (bool): Whether to adapt the step size, Defaults to True, adapt_mass_matrix (bool): Whether to adapt the mass matrix. Defaults to True, target_accept_prob (float): Target accept prob. Increasing this value would lead to smaller step size. Defaults to 0.8. nnc_compile: If True, NNC compiler will be used to accelerate the inference. experimental_inductor_compile: If True, TorchInductor will be used to accelerate the inference. """ def __init__( self, trajectory_length: float, initial_step_size: float = 1.0, adapt_step_size: bool = True, adapt_mass_matrix: bool = True, full_mass_matrix: bool = False, target_accept_prob: float = 0.8, nnc_compile: bool = True, experimental_inductor_compile: bool = False, ): self.trajectory_length = trajectory_length self.initial_step_size = initial_step_size self.adapt_step_size = adapt_step_size self.adapt_mass_matrix = adapt_mass_matrix self.full_mass_matrix = full_mass_matrix self.target_accept_prob = target_accept_prob self.jit_backend = get_backend(nnc_compile, experimental_inductor_compile) self._proposers = {} def _get_default_num_adaptive_samples(self, num_samples: int) -> int: return num_samples // 2 def get_proposers( self, world: World, target_rvs: Set[RVIdentifier], num_adaptive_sample: int, ) -> List[BaseProposer]: proposers = [] for node in target_rvs: if node not in self._proposers: self._proposers[node] = HMCProposer( world, {node}, num_adaptive_sample, self.trajectory_length, self.initial_step_size, self.adapt_step_size, self.adapt_mass_matrix, self.full_mass_matrix, self.target_accept_prob, self.jit_backend, ) proposers.append(self._proposers[node]) return proposers
beanmachine-main
src/beanmachine/ppl/inference/hmc_inference.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Set, Type from beanmachine.ppl.inference.base_inference import BaseInference from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer from beanmachine.ppl.inference.proposer.base_single_site_mh_proposer import ( BaseSingleSiteMHProposer, ) from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.world import World class SingleSiteInference(BaseInference): """ Base class for single site inference algorithms. Args: proposer_class: Class of proposer to initialize with """ def __init__(self, proposer_class: Type[BaseSingleSiteMHProposer], **kwargs): self.proposer_class = proposer_class self.inference_args = kwargs self._proposers = {} def get_proposers( self, world: World, target_rvs: Set[RVIdentifier], num_adaptive_sample: int, ) -> List[BaseProposer]: proposers = [] for node in target_rvs: if node not in self._proposers: self._proposers[node] = self.proposer_class( # pyre-ignore [45] node, **self.inference_args ) proposers.append(self._proposers[node]) return proposers
beanmachine-main
src/beanmachine/ppl/inference/single_site_inference.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Set from beanmachine.ppl.experimental.torch_jit_backend import get_backend from beanmachine.ppl.inference.base_inference import BaseInference from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer from beanmachine.ppl.inference.proposer.nuts_proposer import NUTSProposer from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.world import World class GlobalNoUTurnSampler(BaseInference): """ Global No U-turn sampler [1]. This sampler blocks multiple variables together in the World and samples them jointly. This sampler adaptively sets the hyperparameters of the HMC kernel. [1] Hoffman and Gelman. `The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo`. [2] Betancourt, Michael. `A Conceptual Introduction to Hamiltonian Monte Carlo`. Args: max_tree_depth (int): Maximum tree depth, defaults to 10. max_delta_energy (float): Maximum delta energy (for numerical stability), defaults to 1000. initial_step_size (float): Defaults to 1.0. adapt_step_size (bool): Whether to adapt step size with Dual averaging as suggested in [1], defaults to True. adapt_mass_matrix (bool) Whether to adapt mass matrix using Welford Scheme, defaults to True. multinomial_sampling (bool): Whether to use multinomial sampling as in [2], defaults to True. target_accept_prob (float): Target accept probability. Increasing this would lead to smaller step size. Defaults to 0.8. nnc_compile: If True, NNC compiler will be used to accelerate the inference. experimental_inductor_compile: If True, TorchInductor will be used to accelerate the inference. """ def __init__( self, max_tree_depth: int = 10, max_delta_energy: float = 1000.0, initial_step_size: float = 1.0, adapt_step_size: bool = True, adapt_mass_matrix: bool = True, full_mass_matrix: bool = False, multinomial_sampling: bool = True, target_accept_prob: float = 0.8, nnc_compile: bool = True, experimental_inductor_compile: bool = False, ): self.max_tree_depth = max_tree_depth self.max_delta_energy = max_delta_energy self.initial_step_size = initial_step_size self.adapt_step_size = adapt_step_size self.adapt_mass_matrix = adapt_mass_matrix self.full_mass_matrix = full_mass_matrix self.multinomial_sampling = multinomial_sampling self.target_accept_prob = target_accept_prob self.jit_backend = get_backend(nnc_compile, experimental_inductor_compile) self._proposer = None def _get_default_num_adaptive_samples(self, num_samples: int) -> int: return num_samples // 2 def get_proposers( self, world: World, target_rvs: Set[RVIdentifier], num_adaptive_sample: int, ) -> List[BaseProposer]: if self._proposer is None: self._proposer = NUTSProposer( world, target_rvs, num_adaptive_sample, self.max_tree_depth, self.max_delta_energy, self.initial_step_size, self.adapt_step_size, self.adapt_mass_matrix, self.full_mass_matrix, self.multinomial_sampling, self.target_accept_prob, self.jit_backend, ) return [self._proposer] class SingleSiteNoUTurnSampler(BaseInference): """ Single site No U-turn sampler [1]. This sampler proposes value for each random variable in the World one at a time. This sampler adaptively sets the hyperparameters of the HMC kernel. [1] Hoffman and Gelman. `The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo`. [2] Betancourt, Michael. `A Conceptual Introduction to Hamiltonian Monte Carlo`. Args: max_tree_depth (int): Maximum tree depth, defaults to 10. max_delta_energy (float): Maximum delta energy (for numerical stability), defaults to 1000. initial_step_size (float): Defaults to 1.0. adapt_step_size (bool): Whether to adapt step size with Dual averaging as suggested in [1], defaults to True. adapt_mass_matrix (bool) Whether to adapt mass matrix using Welford Scheme, defaults to True. multinomial_sampling (bool): Whether to use multinomial sampling as in [2], defaults to True. target_accept_prob (float): Target accept probability. Increasing this would lead to smaller step size. Defaults to 0.8. nnc_compile: If True, NNC compiler will be used to accelerate the inference. experimental_inductor_compile: If True, TorchInductor will be used to accelerate the inference. """ def __init__( self, max_tree_depth: int = 10, max_delta_energy: float = 1000.0, initial_step_size: float = 1.0, adapt_step_size: bool = True, adapt_mass_matrix: bool = True, full_mass_matrix: bool = False, multinomial_sampling: bool = True, target_accept_prob: float = 0.8, nnc_compile: bool = False, experimental_inductor_compile: bool = False, ): self.max_tree_depth = max_tree_depth self.max_delta_energy = max_delta_energy self.initial_step_size = initial_step_size self.adapt_step_size = adapt_step_size self.adapt_mass_matrix = adapt_mass_matrix self.full_mass_matrix = full_mass_matrix self.multinomial_sampling = multinomial_sampling self.target_accept_prob = target_accept_prob self.jit_backend = get_backend(nnc_compile, experimental_inductor_compile) self._proposers = {} def _get_default_num_adaptive_samples(self, num_samples: int) -> int: return num_samples // 2 def get_proposers( self, world: World, target_rvs: Set[RVIdentifier], num_adaptive_sample: int, ) -> List[BaseProposer]: proposers = [] for node in target_rvs: if node not in self._proposers: self._proposers[node] = NUTSProposer( world, {node}, num_adaptive_sample, self.max_tree_depth, self.max_delta_energy, self.initial_step_size, self.adapt_step_size, self.adapt_mass_matrix, self.full_mass_matrix, self.multinomial_sampling, self.target_accept_prob, self.jit_backend, ) proposers.append(self._proposers[node]) return proposers
beanmachine-main
src/beanmachine/ppl/inference/nuts_inference.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Dict, Iterator, List, Mapping, NamedTuple, Optional, Union import arviz as az import torch import xarray as xr from beanmachine.ppl.inference.utils import detach_samples, merge_dicts from beanmachine.ppl.model.rv_identifier import RVIdentifier RVDict = Dict[RVIdentifier, torch.Tensor] class Samples(NamedTuple): samples: RVDict adaptive_samples: RVDict class MonteCarloSamples(Mapping[RVIdentifier, torch.Tensor]): """ Represents a view of the data representing the results of infer If no chain is specified, the data across all chains is accessible If a chain is specified, only the data from the chain will be accessible """ def __init__( self, chain_results: Union[List[RVDict], RVDict], num_adaptive_samples: int = 0, logll_results: Optional[Union[List[RVDict], RVDict]] = None, observations: Optional[RVDict] = None, stack_not_cat: bool = True, default_namespace: str = "posterior", ): self.namespaces = {} self.default_namespace = default_namespace if default_namespace not in self.namespaces: self.namespaces[default_namespace] = {} if isinstance(chain_results, list): self.num_chains = len(chain_results) chain_results = merge_dicts(chain_results, 0, stack_not_cat) else: self.num_chains = next(iter(chain_results.values())).shape[0] self.num_adaptive_samples = num_adaptive_samples self.namespaces[default_namespace] = Samples({}, {}) for rv, val in chain_results.items(): self.adaptive_samples[rv] = val[:, :num_adaptive_samples] self.samples[rv] = val[:, num_adaptive_samples:] if logll_results is not None: if isinstance(logll_results, list): logll = merge_dicts(logll_results, 0, stack_not_cat) else: logll = logll_results self.log_likelihoods = {} self.adaptive_log_likelihoods = {} for rv, val in logll.items(): self.adaptive_log_likelihoods[rv] = val[:, :num_adaptive_samples] self.log_likelihoods[rv] = val[:, num_adaptive_samples:] else: self.log_likelihoods = None self.adaptive_log_likelihoods = None self.observations = observations # single_chain_view is only set when self.get_chain is called self.single_chain_view = False @property def samples(self): return self.namespaces[self.default_namespace].samples @property def adaptive_samples(self): return self.namespaces[self.default_namespace].adaptive_samples def __getitem__(self, rv: RVIdentifier) -> torch.Tensor: """ :param rv: random variable to view values of :results: samples drawn during inference for the specified variable """ return self.get_variable(rv, include_adapt_steps=False) def __iter__(self) -> Iterator[RVIdentifier]: return iter(self.samples) def __len__(self) -> int: return len(self.samples) def __str__(self) -> str: return str(self.samples) def get_chain(self, chain: int = 0) -> "MonteCarloSamples": """ Return a MonteCarloSamples with restricted view to a specified chain :param chain: specific chainto view. :returns: view of the data restricted to specified chain """ if self.single_chain_view: raise ValueError( "The current MonteCarloSamples object has already been" " restricted to a single chain" ) elif chain < 0 or chain >= self.num_chains: raise IndexError("Please specify a valid chain") samples = {rv: self.get_variable(rv, True)[[chain]] for rv in self} if self.log_likelihoods is None: logll = None else: logll = { rv: self.get_log_likelihoods(rv, True)[[chain]] for rv in self.log_likelihoods } new_mcs = MonteCarloSamples( chain_results=samples, num_adaptive_samples=self.num_adaptive_samples, logll_results=logll, observations=self.observations, default_namespace=self.default_namespace, ) new_mcs.single_chain_view = True return new_mcs def get_variable( self, rv: RVIdentifier, include_adapt_steps: bool = False, thinning: int = 1, namespace: Optional[str] = None, ) -> torch.Tensor: """ Let C be the number of chains, S be the number of samples If include_adapt_steps, S' = S. Else, S' = S - num_adaptive_samples. if no chain specified: samples[var] returns a Tensor of (C, S', (shape of Var)) if a chain is specified: samples[var] returns a Tensor of (S', (shape of Var)) :param rv: random variable to see samples :param include_adapt_steps: Indicates whether the beginning of the chain should be included with the healthy samples. :returns: samples drawn during inference for the specified variable """ if not isinstance(rv, RVIdentifier): raise TypeError( "The key is required to be a random variable " + f"but is of type {type(rv).__name__}." ) if namespace is None: namespace = self.default_namespace samples = self.namespaces[namespace].samples[rv] if include_adapt_steps: samples = torch.cat( [self.namespaces[namespace].adaptive_samples[rv], samples], dim=1, ) if thinning > 1: samples = samples[:, ::thinning] if self.single_chain_view: samples = samples.squeeze(0) return samples def get_log_likelihoods( self, rv: RVIdentifier, include_adapt_steps: bool = False, ) -> torch.Tensor: """ :returns: log_likelihoods computed during inference for the specified variable """ if not isinstance(rv, RVIdentifier): raise TypeError( "The key is required to be a random variable " + f"but is of type {type(rv).__name__}." ) logll = self.log_likelihoods[rv] if include_adapt_steps: logll = torch.cat([self.adaptive_log_likelihoods[rv], logll], dim=1) if self.single_chain_view: logll = logll.squeeze(0) return logll def get( self, rv: RVIdentifier, default: Any = None, chain: Optional[int] = None, include_adapt_steps: bool = False, thinning: int = 1, ): """ Return the value of the random variable if rv is in the dictionary, otherwise return the default value. This method is analogous to Python's dict.get(). The chain and include_adapt_steps parameters serve the same purpose as in get_chain and get_variable. """ if rv not in self.samples: return default if chain is None: samples = self else: samples = self.get_chain(chain) return samples.get_variable(rv, include_adapt_steps, thinning) def get_num_samples(self, include_adapt_steps: bool = False) -> int: """ :returns: the number of samples run during inference """ num_samples = next(iter(self.samples.values())).shape[1] if include_adapt_steps: return num_samples + self.num_adaptive_samples return num_samples def to_xarray(self, include_adapt_steps: bool = False) -> xr.Dataset: """ Return an xarray.Dataset from MonteCarloSamples. """ inference_data = self.to_inference_data(include_adapt_steps) if not include_adapt_steps: return inference_data["posterior"] else: return xr.concat( [inference_data["warmup_posterior"], inference_data["posterior"]], dim="draw", ) def add_groups(self, mcs: "MonteCarloSamples"): if self.observations is None: self.observations = mcs.observations if self.log_likelihoods is None: self.log_likelihoods = mcs.log_likelihoods if self.adaptive_log_likelihoods is None: self.adaptive_log_likelihoods = mcs.adaptive_log_likelihoods for n in mcs.namespaces: if n not in self.namespaces: self.namespaces[n] = mcs.namespaces[n] def to_inference_data(self, include_adapt_steps: bool = False) -> az.InferenceData: """ Return an az.InferenceData from MonteCarloSamples. """ if "posterior" in self.namespaces: posterior = detach_samples(self.namespaces["posterior"].samples) if self.num_adaptive_samples > 0: warmup_posterior = detach_samples( self.namespaces["posterior"].adaptive_samples ) else: warmup_posterior = None else: posterior = None warmup_posterior = None if self.num_adaptive_samples > 0: warmup_log_likelihood = self.adaptive_log_likelihoods if warmup_log_likelihood is not None: warmup_log_likelihood = detach_samples(warmup_log_likelihood) else: warmup_log_likelihood = None if "posterior_predictive" in self.namespaces: posterior_predictive = detach_samples( self.namespaces["posterior_predictive"].samples ) if self.num_adaptive_samples > 0: warmup_posterior_predictive = detach_samples( self.namespaces["posterior"].adaptive_samples ) else: warmup_posterior_predictive = None else: posterior_predictive = None warmup_posterior_predictive = None if "prior_predictive" in self.namespaces: prior_predictive = detach_samples( self.namespaces["prior_predictive"].samples ) else: prior_predictive = None if self.log_likelihoods is not None: log_likelihoods = detach_samples(self.log_likelihoods) else: log_likelihoods = None if self.observations is not None: observed_data = detach_samples(self.observations) else: observed_data = None return az.from_dict( posterior=posterior, warmup_posterior=warmup_posterior, posterior_predictive=posterior_predictive, warmup_posterior_predictive=warmup_posterior_predictive, prior_predictive=prior_predictive, save_warmup=include_adapt_steps, warmup_log_likelihood=warmup_log_likelihood, log_likelihood=log_likelihoods, observed_data=observed_data, )
beanmachine-main
src/beanmachine/ppl/inference/monte_carlo_samples.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import random from concurrent.futures import ThreadPoolExecutor from enum import Enum from typing import Any, Callable, Dict, List import numpy as np import numpy.random import torch from beanmachine.ppl.model.rv_identifier import RVIdentifier RVDict = Dict[RVIdentifier, torch.Tensor] # Detect and report if a user fails to meet the inference contract. def _verify_queries(queries: List[RVIdentifier]) -> None: if not isinstance(queries, list): t = type(queries).__name__ raise TypeError( f"Parameter 'queries' is required to be a list but is of type {t}." ) for query in queries: if not isinstance(query, RVIdentifier): t = type(query).__name__ raise TypeError( f"A query is required to be a random variable but is of type {t}." ) for arg in query.arguments: if isinstance(arg, RVIdentifier): raise TypeError( "The arguments to a query must not be random variables." ) def _verify_observations( observations: Dict[RVIdentifier, torch.Tensor], must_be_rv: bool ) -> None: if not isinstance(observations, dict): t = type(observations).__name__ raise TypeError( f"Parameter 'observations' is required to be a dictionary but is of type {t}." ) for rv, value in observations.items(): if not isinstance(rv, RVIdentifier): t = type(rv).__name__ raise TypeError( f"An observation is required to be a random variable but is of type {t}." ) if not isinstance(value, torch.Tensor): t = type(value).__name__ raise TypeError( f"An observed value is required to be a tensor but is of type {t}." ) if must_be_rv and rv.is_functional: raise TypeError( "An observation must observe a random_variable, not a functional." ) for arg in rv.arguments: if isinstance(arg, RVIdentifier): raise TypeError( "The arguments to an observation must not be random variables." ) def _verify_queries_and_observations( queries: List[RVIdentifier], observations: Dict[RVIdentifier, torch.Tensor], observations_must_be_rv: bool, ) -> None: _verify_queries(queries) _verify_observations(observations, observations_must_be_rv) class VerboseLevel(Enum): """ Enum class which is used to set how much output is printed during inference. LOAD_BAR enables tqdm for full inference loop. """ OFF = 0 LOAD_BAR = 1 def safe_log_prob_sum(distrib, value: torch.Tensor) -> torch.Tensor: "Computes log_prob, converting out of support exceptions to -Infinity." try: return distrib.log_prob(value).sum() except (RuntimeError, ValueError) as e: if not distrib.support.check(value).all(): return torch.tensor(float("-Inf")).to(value.device) else: raise e def merge_dicts( dicts: List[RVDict], dim: int = 0, stack_not_cat: bool = True ) -> RVDict: """ A helper function that merge multiple dicts of samples into a single dictionary, stacking across a new dimension """ rv_keys = set().union(*(rv_dict.keys() for rv_dict in dicts)) for idx, d in enumerate(dicts): if not rv_keys.issubset(d.keys()): raise ValueError(f"{rv_keys - d.keys()} are missing in dict {idx}") if stack_not_cat: return {rv: torch.stack([d[rv] for d in dicts], dim=dim) for rv in rv_keys} else: return {rv: torch.cat([d[rv] for d in dicts], dim=dim) for rv in rv_keys} def seed(seed: int) -> None: torch.manual_seed(seed) random.seed(seed) numpy.random.seed(seed) def _execute_in_new_thread(f: Callable, *args, **kwargs) -> Any: """A helper function to execute the given function in a new thread. This is used to resolve the deadlock issue with fork-based multiprocessing (see this PyTorch issue for details <https://github.com/pytorch/pytorch/issues/17199#issuecomment-833226969>_)""" with ThreadPoolExecutor() as executor: return executor.submit(f, *args, **kwargs).result() def detach_samples( samples: Dict[RVIdentifier, torch.Tensor], ) -> Dict[RVIdentifier, np.ndarray]: """Detach pytorch tensors. Args: samples (Dict[RVIdentifier, torch.Tensor]): Dictionary of RVIdentifiers with original torch tensors. Returns: Dict[RVIdentifier, np.ndarray]: Dictionary of RVIdentifiers with converted NumPy arrays. """ return {key: value.detach().numpy() for key, value in samples.items()}
beanmachine-main
src/beanmachine/ppl/inference/utils.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations import random import warnings from types import TracebackType from typing import Generator, NoReturn, Optional, Type, TYPE_CHECKING import torch if TYPE_CHECKING: from beanmachine.ppl.inference.base_inference import BaseInference from beanmachine.ppl.world import World class Sampler(Generator[World, Optional[World], None]): """ Samplers are generators of Worlds that generate samples from the joint. It is used to generate Monte Carlo samples during MCMC inference. At each iteration, the proposer(s) proposer a values for the random variables, which are then accepted according to the MH ratio. The next world is then returned. Args: kernel (BaseInference): Inference class to get proposers from. initial_world (World): Optional initial world to initialize from. num_samples (int, Optional): Number of samples. If none is specified, num_samples = inf. num_adaptive_samples (int, Optional): Number of adaptive samples, defaults to 0. """ def __init__( self, kernel: BaseInference, initial_world: World, num_samples: Optional[int] = None, num_adaptive_samples: int = 0, ): self.kernel = kernel self.world = initial_world self._num_samples_remaining = ( float("inf") if num_samples is None else num_samples ) self._num_samples_remaining += num_adaptive_samples self._num_adaptive_sample_remaining = num_adaptive_samples def send(self, world: Optional[World] = None) -> World: """ At each iteration, the following is executed: 1. Shuffle all the proposers in the world. 2. For each proposer, propose a world and accept/reject it based on MH ratio. 3. Run adaptation method if applicable. 4. Update the new current world as `self.world`. Args: world: Optional World to use to propose. If none is provided, `self.world` is used. """ if world is None: world = self.world if self._num_samples_remaining <= 0: raise StopIteration proposers = self.kernel.get_proposers( world, world.latent_nodes, self._num_adaptive_sample_remaining ) random.shuffle(proposers) for proposer in proposers: try: new_world, accept_log_prob = proposer.propose(world) accept_log_prob = accept_log_prob.clamp(max=0.0) accepted = torch.rand_like(accept_log_prob).log() < accept_log_prob if accepted: world = new_world except RuntimeError as e: if "singular U" in str(e) or "input is not positive-definite" in str(e): # since it's normal to run into cholesky error during GP, instead of # throwing an error, we simply skip current proposer (which is # equivalent to a rejection) and will retry in the next iteration warnings.warn(f"Proposal rejected: {e}", RuntimeWarning) accepted = False accept_log_prob = -torch.inf else: raise e if self._num_adaptive_sample_remaining > 0: proposer.do_adaptation( world=world, accept_log_prob=accept_log_prob, is_accepted=accepted ) if self._num_adaptive_sample_remaining == 1: # we just reach the end of adaptation period proposer.finish_adaptation() # update attributes at last, so that exceptions during inference won't leave # self in an invalid state self.world = world if self._num_adaptive_sample_remaining > 0: self._num_adaptive_sample_remaining -= 1 self._num_samples_remaining -= 1 return self.world def throw( self, typ: Type[BaseException], val: Optional[BaseException] = None, tb: Optional[TracebackType] = None, ) -> NoReturn: """Use the default error handling behavior (thorw Exception as-is)""" # pyre-fixme[7]: Function declared non-returnable, but got `None`. super().throw(typ, val, tb)
beanmachine-main
src/beanmachine/ppl/inference/sampler.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """An inference engine which uses Bean Machine Graph to make inferences on Bean Machine models.""" from typing import Dict, List, Optional, Set, Tuple import beanmachine.ppl.compiler.performance_report as pr import beanmachine.ppl.compiler.profiler as prof import graphviz import torch from beanmachine.graph import Graph, InferConfig, InferenceType from beanmachine.ppl.compiler.bm_graph_builder import rv_to_query from beanmachine.ppl.compiler.fix_problems import default_skip_optimizations from beanmachine.ppl.compiler.gen_bm_python import to_bm_python from beanmachine.ppl.compiler.gen_bmg_cpp import to_bmg_cpp from beanmachine.ppl.compiler.gen_bmg_graph import to_bmg_graph from beanmachine.ppl.compiler.gen_bmg_python import to_bmg_python from beanmachine.ppl.compiler.gen_dot import to_dot from beanmachine.ppl.compiler.gen_mini import to_mini from beanmachine.ppl.compiler.performance_report import PerformanceReport from beanmachine.ppl.compiler.runtime import BMGRuntime from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples from beanmachine.ppl.inference.utils import _verify_queries_and_observations from beanmachine.ppl.model.rv_identifier import RVIdentifier # TODO[Walid]: At some point, to facilitate checking the idea that this works pretty # much like any other BM inference, we should probably make this class a subclass of # AbstractMCInference. class BMGInference: """ Interface to Bean Machine Graph (BMG) Inference, an experimental framework for high-performance implementations of inference algorithms. Internally, BMGInference consists of a compiler and C++ runtime implementations of various inference algorithms. Currently, only Newtonian Monte Carlo (NMC) inference is supported, and is the algorithm used by default. Please note that this is a highly experimental implementation under active development, and that the subset of Bean Machine model is limited. Limitations include that the runtime graph should be static (meaning, it does not change during inference), and that the types of primitive distributions supported is currently limited. """ _fix_observe_true: bool = False _pd: Optional[prof.ProfilerData] = None def __init__(self): pass def _begin(self, s: str) -> None: pd = self._pd if pd is not None: pd.begin(s) def _finish(self, s: str) -> None: pd = self._pd if pd is not None: pd.finish(s) def _accumulate_graph( self, queries: List[RVIdentifier], observations: Dict[RVIdentifier, torch.Tensor], ) -> BMGRuntime: _verify_queries_and_observations(queries, observations, True) rt = BMGRuntime() rt._pd = self._pd bmg = rt.accumulate_graph(queries, observations) # TODO: Figure out a better way to pass this flag around bmg._fix_observe_true = self._fix_observe_true return rt def _transpose_samples(self, raw): self._begin(prof.transpose_samples) samples = [] num_samples = len(raw) bmg_query_count = len(raw[0]) # Suppose we have two queries and three samples; # the shape we get from BMG is: # # [ # [s00, s01], # [s10, s11], # [s20, s21] # ] # # That is, each entry in the list has values from both queries. # But what we need in the final dictionary is: # # { # RV0: tensor([[s00, s10, s20]]), # RV1: tensor([[s01, s11, s21]]) # } transposed = [torch.tensor([x]) for x in zip(*raw)] assert len(transposed) == bmg_query_count assert len(transposed[0]) == 1 assert len(transposed[0][0]) == num_samples # We now have # # [ # tensor([[s00, s10, s20]]), # tensor([[s01, s11, s21]]) # ] # # which looks like what we need. But we have an additional problem: # if the the sample is a matrix then it is in columns but we need it in rows. # # If an element of transposed is (1 x num_samples x rows x 1) then we # will just reshape it to (1 x num_samples x rows). # # If it is (1 x num_samples x rows x columns) for columns > 1 then # we transpose it to (1 x num_samples x columns x rows) # # If it is any other shape we leave it alone. for i in range(len(transposed)): t = transposed[i] if len(t.shape) == 4: if t.shape[3] == 1: assert t.shape[0] == 1 assert t.shape[1] == num_samples samples.append(t.reshape(1, num_samples, t.shape[2])) else: samples.append(t.transpose(2, 3)) else: samples.append(t) assert len(samples) == bmg_query_count assert len(samples[0]) == 1 assert len(samples[0][0]) == num_samples self._finish(prof.transpose_samples) return samples def _build_mcsamples( self, rv_to_query, samples, query_to_query_id, num_samples: int, num_chains: int, num_adaptive_samples: int, ) -> MonteCarloSamples: self._begin(prof.build_mcsamples) assert len(samples) == num_chains results = [] for chain_num in range(num_chains): result: Dict[RVIdentifier, torch.Tensor] = {} for (rv, query) in rv_to_query.items(): query_id = query_to_query_id[query] result[rv] = samples[chain_num][query_id] results.append(result) # MonteCarloSamples almost provides just what we need here, # but it requires the input to be of a different type in the # cases of num_chains==1 and !=1 respectively. Furthermore, # we had to tweak it to support the right operator for merging # saumple values when num_chains!=1. if num_chains == 1: mcsamples = MonteCarloSamples( results[0], num_adaptive_samples, stack_not_cat=True ) else: mcsamples = MonteCarloSamples( results, num_adaptive_samples, stack_not_cat=False ) self._finish(prof.build_mcsamples) return mcsamples def _infer( self, queries: List[RVIdentifier], observations: Dict[RVIdentifier, torch.Tensor], num_samples: int, num_chains: int = 1, num_adaptive_samples: int = 0, inference_type: InferenceType = InferenceType.NMC, produce_report: bool = True, skip_optimizations: Set[str] = default_skip_optimizations, ) -> Tuple[MonteCarloSamples, PerformanceReport]: if produce_report: self._pd = prof.ProfilerData() rt = self._accumulate_graph(queries, observations) bmg = rt._bmg report = pr.PerformanceReport() self._begin(prof.infer) generated_graph = to_bmg_graph(bmg, skip_optimizations) g = generated_graph.graph query_to_query_id = generated_graph.query_to_query_id samples = [] # BMG requires that we have at least one query. if len(query_to_query_id) != 0: g.collect_performance_data(produce_report) self._begin(prof.graph_infer) default_config = InferConfig() default_config.num_warmup = num_adaptive_samples num_adaptive_samples = 0 # TODO[Walid]: In the following we were previously silently using the default seed # specified in pybindings.cpp (and not passing the local one in). In the current # code we are explicitly passing in the same default value used in that file (5123401). # We really need a way to defer to the value defined in pybindings.py here. try: raw = g.infer( num_samples, inference_type, 5123401, num_chains, default_config ) except RuntimeError as e: raise RuntimeError( "Error during BMG inference\n" + "Note: the runtime error from BMG may not be interpretable.\n" ) from e self._finish(prof.graph_infer) if produce_report: self._begin(prof.deserialize_perf_report) js = g.performance_report() report = pr.json_to_perf_report(js) self._finish(prof.deserialize_perf_report) assert len(raw) == num_chains assert all([len(r) == num_samples for r in raw]) samples = [self._transpose_samples(r) for r in raw] # TODO: Make _rv_to_query public. Add it to BMGraphBuilder? mcsamples = self._build_mcsamples( rv_to_query(generated_graph.bmg), samples, query_to_query_id, num_samples, num_chains, num_adaptive_samples, ) self._finish(prof.infer) if produce_report: report.profiler_report = self._pd.to_report() # pyre-ignore return mcsamples, report def infer( self, queries: List[RVIdentifier], observations: Dict[RVIdentifier, torch.Tensor], num_samples: int, num_chains: int = 4, num_adaptive_samples: int = 0, inference_type: InferenceType = InferenceType.NMC, skip_optimizations: Set[str] = default_skip_optimizations, ) -> MonteCarloSamples: """ Perform inference by (runtime) compilation of Python source code associated with its parameters, constructing a BMG graph, and then calling the BMG implementation of a particular inference method on this graph. Args: queries: queried random variables observations: observations dict num_samples: number of samples in each chain num_chains: number of chains generated num_adaptive_samples: number of burn in samples to discard inference_type: inference method, currently only NMC is supported skip_optimizations: list of optimization to disable in this call Returns: MonteCarloSamples: The requested samples """ # TODO: Add verbose level # TODO: Add logging samples, _ = self._infer( queries, observations, num_samples, num_chains, num_adaptive_samples, inference_type, False, skip_optimizations, ) return samples def to_dot( self, queries: List[RVIdentifier], observations: Dict[RVIdentifier, torch.Tensor], after_transform: bool = True, label_edges: bool = False, skip_optimizations: Set[str] = default_skip_optimizations, ) -> str: """Produce a string containing a program in the GraphViz DOT language representing the graph deduced from the model.""" node_types = False node_sizes = False edge_requirements = False bmg = self._accumulate_graph(queries, observations)._bmg return to_dot( bmg, node_types, node_sizes, edge_requirements, after_transform, label_edges, skip_optimizations, ) def _to_mini( self, queries: List[RVIdentifier], observations: Dict[RVIdentifier, torch.Tensor], indent=None, ) -> str: """Internal test method for Neal's MiniBMG prototype.""" bmg = self._accumulate_graph(queries, observations)._bmg return to_mini(bmg, indent) def to_graphviz( self, queries: List[RVIdentifier], observations: Dict[RVIdentifier, torch.Tensor], after_transform: bool = True, label_edges: bool = False, skip_optimizations: Set[str] = default_skip_optimizations, ) -> graphviz.Source: """Small wrapper to generate an actual graphviz object""" s = self.to_dot( queries, observations, after_transform, label_edges, skip_optimizations ) return graphviz.Source(s) def to_cpp( self, queries: List[RVIdentifier], observations: Dict[RVIdentifier, torch.Tensor], ) -> str: """Produce a string containing a C++ program fragment which produces the graph deduced from the model.""" bmg = self._accumulate_graph(queries, observations)._bmg return to_bmg_cpp(bmg).code def to_python( self, queries: List[RVIdentifier], observations: Dict[RVIdentifier, torch.Tensor], ) -> str: """Produce a string containing a Python program fragment which produces the graph deduced from the model.""" bmg = self._accumulate_graph(queries, observations)._bmg return to_bmg_python(bmg).code def to_bm_python( self, queries: List[RVIdentifier], observations: Dict[RVIdentifier, torch.Tensor], ) -> str: """Produce a string containing a BM Python program from the graph.""" bmg = self._accumulate_graph(queries, observations)._bmg return to_bm_python(bmg) def to_graph( self, queries: List[RVIdentifier], observations: Dict[RVIdentifier, torch.Tensor], ) -> Tuple[Graph, Dict[RVIdentifier, int]]: """Produce a BMG graph and a map from queried RVIdentifiers to the corresponding indices of the inference results.""" rt = self._accumulate_graph(queries, observations) bmg = rt._bmg generated_graph = to_bmg_graph(bmg) g = generated_graph.graph query_to_query_id = generated_graph.query_to_query_id rv_to_query_map = rv_to_query(generated_graph.bmg) rv_to_query_id = {rv: query_to_query_id[rv_to_query_map[rv]] for rv in queries} return g, rv_to_query_id
beanmachine-main
src/beanmachine/ppl/inference/bmg_inference.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import List, Set from beanmachine.ppl.inference.base_inference import BaseInference from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer from beanmachine.ppl.inference.proposer.single_site_random_walk_proposer import ( SingleSiteRandomWalkProposer, ) from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.world import World class SingleSiteRandomWalk(BaseInference): """ Single Site random walk Metropolis-Hastings. This single site algorithm uses a Normal distribution proposer. Args: step_size: Step size, defaults to 1.0 """ def __init__(self, step_size: float = 1.0): self.step_size = step_size self._proposers = {} def get_proposers( self, world: World, target_rvs: Set[RVIdentifier], num_adaptive_sample: int, ) -> List[BaseProposer]: proposers = [] for node in target_rvs: if node not in self._proposers: self._proposers[node] = SingleSiteRandomWalkProposer( node, self.step_size ) proposers.append(self._proposers[node]) return proposers
beanmachine-main
src/beanmachine/ppl/inference/single_site_random_walk.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from abc import ABCMeta, abstractmethod from typing import Iterable import torch from beanmachine import ppl as bm from beanmachine.ppl.distributions.delta import Delta from beanmachine.ppl.inference.vi.variational_infer import VariationalInfer from beanmachine.ppl.inference.vi.variational_world import VariationalWorld from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.world import RVDict from torch import distributions as dist from torch.distributions.constraint_registry import biject_to, transform_to from torch.nn.functional import softplus class AutoGuideVI(VariationalInfer, metaclass=ABCMeta): """VI with guide distributions automatically generated.""" def __init__( self, queries: Iterable[RVIdentifier], observations: RVDict, **kwargs, ): queries_to_guides = {} # runs all queries to discover their dimensions world = VariationalWorld( observations=observations, params={}, queries_to_guides=queries_to_guides, ) # automatically instantiate `queries_to_guides` for query in queries: world.call(query) if query.is_random_variable: distrib = world.get_variable(query).distribution queries_to_guides[query] = self.get_guide(query, distrib) super().__init__( queries_to_guides=queries_to_guides, observations=observations, **kwargs, ) @staticmethod @abstractmethod def get_guide(query, distrib) -> RVIdentifier: pass class ADVI(AutoGuideVI): """Automatic Differentiation Variational Inference (ADVI). ADVI automates construction of guides by initializing variational distributions as Gaussians and possibly bijecting them so the supports match. See https://arxiv.org/abs/1506.03431. """ @staticmethod def get_guide(query, distrib): @bm.param def param_loc(): return ( torch.rand_like(biject_to(distrib.support).inv(distrib.sample())) * 4.0 - 2.0 ) @bm.param def param_scale(): return ( 0.01 + torch.rand_like(biject_to(distrib.support).inv(distrib.sample())) * 4.0 - 2.0 ) def f(): loc = param_loc() scale = softplus(param_scale()) q = dist.Normal(loc, scale) if distrib.support != dist.constraints.real: if distrib.support == dist.constraints.positive: # override exp transform with softplus q = dist.TransformedDistribution( q, [dist.transforms.SoftplusTransform()] ) else: q = dist.TransformedDistribution(q, [biject_to(distrib.support)]) return q f.__name__ = "guide_" + str(query) return bm.random_variable(f)() class MAP(AutoGuideVI): """Maximum A Posteriori (MAP) Inference. Uses ``Delta`` distributions to perform a point estimate of the posterior mode. """ @staticmethod def get_guide(query, distrib): @bm.param def param_loc(): # TODO: use event shape return ( torch.rand_like(transform_to(distrib.support).inv(distrib.sample())) * 4.0 - 2.0 ) def f(): loc = param_loc() if distrib.support != dist.constraints.real: if distrib.support == dist.constraints.positive: loc = dist.transforms.SoftplusTransform()(loc) else: loc = transform_to(distrib.support)(loc) q = Delta(loc) return q f.__name__ = "guide_" + str(query) return bm.random_variable(f)()
beanmachine-main
src/beanmachine/ppl/inference/vi/autoguide.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .autoguide import ADVI, MAP from .variational_infer import VariationalInfer __all__ = [ "ADVI", "MAP", "VariationalInfer", ]
beanmachine-main
src/beanmachine/ppl/inference/vi/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations from typing import Mapping, MutableMapping, Optional import torch import torch.distributions as dist from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.world import init_from_prior, World from beanmachine.ppl.world.initialize_fn import InitializeFn from beanmachine.ppl.world.world import RVDict class VariationalWorld(World): """A World which also contains (variational) parameters.""" def __init__( self, observations: Optional[RVDict] = None, initialize_fn: InitializeFn = init_from_prior, params: Optional[MutableMapping[RVIdentifier, torch.Tensor]] = None, queries_to_guides: Optional[Mapping[RVIdentifier, RVIdentifier]] = None, ) -> None: self._params = params or {} self._queries_to_guides = queries_to_guides or {} super().__init__(observations, initialize_fn) def copy(self): world_copy = VariationalWorld( observations=self.observations.copy(), initialize_fn=self._initialize_fn, params=self._params.copy(), queries_to_guides=self._queries_to_guides.copy(), ) world_copy._variables = self._variables.copy() return world_copy # TODO: distinguish params vs random_variables at the type-level def get_param(self, param: RVIdentifier) -> torch.Tensor: """Gets a parameter or initializes it if not found.""" if param not in self._params: init_value = param.function(*param.arguments) assert isinstance(init_value, torch.Tensor) self._params[param] = init_value self._params[param].requires_grad = True return self._params[param] def set_params(self, params: MutableMapping[RVIdentifier, torch.Tensor]): """Sets the parameters in this World to specified values.""" self._params = params def get_guide_distribution(self, rv: RVIdentifier) -> dist.Distribution: guide_rv = self._queries_to_guides[rv] return self.get_variable(guide_rv).distribution def update_graph(self, node: RVIdentifier) -> torch.Tensor: """ Initialize a new node using its guide if available and the prior otherwise. Args: node (RVIdentifier): RVIdentifier of node to update in the graph. Returns: The value of the node stored in world (in original space). """ if node in self._queries_to_guides: node = self._queries_to_guides[node] return super().update_graph(node)
beanmachine-main
src/beanmachine/ppl/inference/vi/variational_world.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. "Gradient estimators of f-divergences." from typing import Callable, Mapping import torch from beanmachine.ppl.inference.vi.variational_world import VariationalWorld from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.world import RVDict, World _CPU_DEVICE = torch.device("cpu") DiscrepancyFn = Callable[[torch.Tensor], torch.Tensor] # NOTE: right now it is either all reparameterizable # or all score function gradient estimators. We should # be able to support both depending on the guide used. def monte_carlo_approximate_reparam( observations: RVDict, num_samples: int, discrepancy_fn: DiscrepancyFn, params: Mapping[RVIdentifier, torch.Tensor], queries_to_guides: Mapping[RVIdentifier, RVIdentifier], subsample_factor: float = 1.0, device: torch.device = _CPU_DEVICE, ) -> torch.Tensor: """The pathwise derivative / reparameterization trick (https://arxiv.org/abs/1312.6114) gradient estimator.""" loss = torch.zeros(1).to(device) for _ in range(num_samples): variational_world = VariationalWorld.initialize_world( queries=queries_to_guides.values(), observations=observations, initialize_fn=lambda d: d.rsample(), params=params, queries_to_guides=queries_to_guides, ) world = World.initialize_world( queries=[], observations={ **{ query: variational_world[guide] for query, guide in queries_to_guides.items() }, **observations, }, ) # form log density ratio logu = logp - logq # We want to avoid using world.latent_nodes/world.observations. # The preceding World.initialize_world puts everything into observations, which results in latent_nodes being empty. # That results in everything being scaled by the scaling factor (we don't want that) logu = ( world.log_prob(queries_to_guides.keys()) + (1.0 / subsample_factor) * world.log_prob(observations.keys()) - variational_world.log_prob(queries_to_guides.values()) ) loss += discrepancy_fn(logu) # reparameterized estimator return loss / num_samples def monte_carlo_approximate_sf( observations: RVDict, num_samples: int, discrepancy_fn: DiscrepancyFn, params: Mapping[RVIdentifier, torch.Tensor], queries_to_guides: Mapping[RVIdentifier, RVIdentifier], subsample_factor: float = 1, device: torch.device = _CPU_DEVICE, ) -> torch.Tensor: """The score function / log derivative trick surrogate loss (https://arxiv.org/pdf/1506.05254) gradient estimator.""" loss = torch.zeros(1).to(device) for _ in range(num_samples): variational_world = VariationalWorld.initialize_world( queries=queries_to_guides.values(), observations=observations, initialize_fn=lambda d: d.sample(), params=params, queries_to_guides=queries_to_guides, ) world = World.initialize_world( queries=[], observations={ **{ query: variational_world[guide] for query, guide in queries_to_guides.items() }, **observations, }, ) # form log density ratio logu = logp - logq # We want to avoid using world.latent_nodes/world.observations. # The preceding World.initialize_world puts everything into observations, which results in latent_nodes being empty. # That results in everything being scaled by the scaling factor (we don't want that) logq = variational_world.log_prob(queries_to_guides.values()) logu = ( world.log_prob(queries_to_guides.keys()) + (1.0 / subsample_factor) * world.log_prob(observations.keys()) - logq ) # score function estimator surrogate loss loss += discrepancy_fn(logu).detach().clone() * logq + discrepancy_fn(logu) return loss / num_samples
beanmachine-main
src/beanmachine/ppl/inference/vi/gradient_estimator.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations import logging from typing import Callable, Dict, Optional import torch import torch.optim as optim from beanmachine.ppl.inference.vi.discrepancy import kl_reverse from beanmachine.ppl.inference.vi.gradient_estimator import ( monte_carlo_approximate_reparam, ) from beanmachine.ppl.inference.vi.variational_world import VariationalWorld from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.world.world import RVDict from tqdm.auto import tqdm _CPU_DEVICE = torch.device("cpu") class VariationalInfer: def __init__( self, queries_to_guides: Dict[RVIdentifier, RVIdentifier], observations: RVDict, optimizer: Callable[ [torch.Tensor], optim.Optimizer ] = lambda params: optim.Adam(params, lr=1e-2), device: torch.device = _CPU_DEVICE, ): """ Performs variational inference using reparameterizable guides. Args: queries_to_guides: Pairing between random variables and their variational guide/surrogate observations: Observations as an RVDict keyed by RVIdentifier optimizer: A function returning a ``torch.Optimizer`` to use for optimizing variational parameters. device: a ``torch.device`` to use for pytorch tensors """ super().__init__() self.observations = observations self.queries_to_guides = queries_to_guides # runs all guides to reify `param`s for `optimizer` # NOTE: assumes `params` is static and same across all worlds, consider MultiOptimizer (see Pyro) # TODO: what happens if not all the params are encountered # in this execution pass, eg an if/else, consider MultiOptimizer world = VariationalWorld( observations=observations, params={}, queries_to_guides=queries_to_guides, ) for guide in queries_to_guides.values(): world.call(guide) self.params = world._params self._optimizer = optimizer(self.params.values()) self._device = device def infer( self, num_steps: int, num_samples: int = 1, discrepancy_fn=kl_reverse, mc_approx=monte_carlo_approximate_reparam, # TODO: support both reparam and SF in same guide step_callback: Optional[ Callable[[int, torch.Tensor, VariationalInfer], None] ] = None, subsample_factor: float = 1, ) -> VariationalWorld: """ Perform variatonal inference. Args: num_steps: number of optimizer steps num_samples: number of samples per Monte-Carlo gradient estimate of E[f(logp - logq)] discrepancy_fn: discrepancy function f, use ``kl_reverse`` to minimize negative ELBO mc_approx: Monte-Carlo gradient estimator to use step_callback: callback function invoked each optimizer step subsample_factor: subsampling factor used for subsampling, helps scale the observations to avoid overshrinking towards the prior Returns: VariationalWorld: A world with variational guide distributions initialized with optimized parameters """ assert subsample_factor > 0 and subsample_factor <= 1 for it in tqdm(range(num_steps)): loss = self.step(num_samples, discrepancy_fn, mc_approx, subsample_factor) if step_callback: step_callback(it, loss, self) return self.initialize_world() def step( self, num_samples: int = 1, discrepancy_fn=kl_reverse, mc_approx=monte_carlo_approximate_reparam, # TODO: support both reparam and SF in same guide subsample_factor: float = 1, ) -> torch.Tensor: """ Perform one step of variatonal inference. Args: num_samples: number of samples per Monte-Carlo gradient estimate of E[f(logp - logq)] discrepancy_fn: discrepancy function f, use ``kl_reverse`` to minimize negative ELBO mc_approx: Monte-Carlo gradient estimator to use subsample_factor: subsampling factor used for subsampling, helps scale the observations to avoid overshrinking towards the prior Returns: torch.Tensor: the loss value (before the step) """ self._optimizer.zero_grad() loss = mc_approx( self.observations, num_samples, discrepancy_fn, self.params, self.queries_to_guides, subsample_factor=subsample_factor, device=self._device, ) if not torch.isnan(loss) and not torch.isinf(loss): loss.backward() self._optimizer.step() else: logging.warn("Encountered NaN/inf loss, skipping step.") return loss def initialize_world(self) -> VariationalWorld: """ Initializes a `VariationalWorld` using samples from guide distributions evaluated at the current parameter values. Returns: VariationalWorld: a `World` where guide samples and distributions have replaced their corresponding queries """ return VariationalWorld.initialize_world( queries=self.queries_to_guides.values(), observations=self.observations, params=self.params, queries_to_guides=self.queries_to_guides, initialize_fn=lambda d: d.sample(), )
beanmachine-main
src/beanmachine/ppl/inference/vi/variational_infer.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. "Csiszar f-functions in log-space." import torch def kl_reverse(logu: torch.Tensor) -> torch.Tensor: """ Log-space Csiszar function for reverse KL-divergence D_f(p,q) = KL(q||p). Also known as the exclusive KL-divergence and negative ELBO, minimizing results in zero-forcing / mode-seeking behavior. Args: logu (torch.Tensor): ``p.log_prob``s evaluated at samples from q. """ return -logu def kl_forward(logu: torch.Tensor) -> torch.Tensor: """ Log-space Csiszar function for forward KL-divergence D_f(p,q) = KL(p||q). Also known as the inclusive KL-divergence, minimizing results in zero-avoiding / mass-covering behavior. Args: logu (torch.Tensor): ``p.log_prob``s evaluated at samples from q. """ return torch.exp(logu) * logu
beanmachine-main
src/beanmachine/ppl/inference/vi/discrepancy.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import NamedTuple, Set, Tuple import torch from beanmachine.ppl.experimental.torch_jit_backend import jit_compile, TorchJITBackend from beanmachine.ppl.inference.proposer.hmc_proposer import HMCProposer from beanmachine.ppl.model.rv_identifier import RVIdentifier from beanmachine.ppl.world import World class _TreeNode(NamedTuple): positions: torch.Tensor momentums: torch.Tensor pe_grad: torch.Tensor class _Tree(NamedTuple): left: _TreeNode right: _TreeNode proposal: torch.Tensor pe: torch.Tensor pe_grad: torch.Tensor log_weight: torch.Tensor sum_momentums: torch.Tensor sum_accept_prob: torch.Tensor num_proposals: torch.Tensor turned_or_diverged: torch.Tensor class _TreeArgs(NamedTuple): log_slice: torch.Tensor direction: torch.Tensor step_size: torch.Tensor initial_energy: torch.Tensor mass_inv: torch.Tensor class NUTSProposer(HMCProposer): """ The No-U-Turn Sampler (NUTS) as described in [1]. Unlike vanilla HMC, it does not require users to specify a trajectory length. The current implementation roughly follows Algorithm 6 of [1]. If multinomial_sampling is True, then the next state will be drawn from a multinomial distribution (weighted by acceptance probability, as introduced in Appendix 2 of [2]) instead of drawn uniformly. Reference: [1] Matthew Hoffman and Andrew Gelman. "The No-U-Turn Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte Carlo" (2014). https://arxiv.org/abs/1111.4246 [2] Michael Betancourt. "A Conceptual Introduction to Hamiltonian Monte Carlo" (2017). https://arxiv.org/abs/1701.02434 Args: initial_world: Initial world to propose from. target_rvs: Set of RVIdentifiers to indicate which variables to propose. num_adaptive_samples: Number of adaptive samples to run. max_tree_depth: Maximum tree depth, defaults to 10. max_delta_energy: Maximum delta energy (for numerical stability), defaults to 1000. initial_step_size: Defaults to 1.0. adapt_step_size: Whether to adapt step size with Dual averaging as suggested in [1], defaults to True. adapt_mass_matrix: Whether to adapt mass matrix using Welford Scheme, defaults to True. multinomial_sampling: Whether to use multinomial sampling as in [2], defaults to True. target_accept_prob: Target accept probability. Increasing this would lead to smaller step size. Defaults to 0.8. nnc_compile: If True, NNC compiler will be used to accelerate the inference. """ def __init__( self, initial_world: World, target_rvs: Set[RVIdentifier], num_adaptive_sample: int, max_tree_depth: int = 10, max_delta_energy: float = 1000.0, initial_step_size: float = 1.0, adapt_step_size: bool = True, adapt_mass_matrix: bool = True, full_mass_matrix: bool = False, multinomial_sampling: bool = True, target_accept_prob: float = 0.8, jit_backend: TorchJITBackend = TorchJITBackend.NNC, ): # note that trajectory_length is not used in NUTS super().__init__( initial_world, target_rvs, num_adaptive_sample, trajectory_length=0.0, initial_step_size=initial_step_size, adapt_step_size=adapt_step_size, adapt_mass_matrix=adapt_mass_matrix, full_mass_matrix=full_mass_matrix, target_accept_prob=target_accept_prob, jit_backend=TorchJITBackend.NONE, # we will use NNC at NUTS level, not at HMC level ) self._max_tree_depth = max_tree_depth self._max_delta_energy = max_delta_energy self._multinomial_sampling = multinomial_sampling # pyre-ignore[8] self._build_tree_base_case = jit_compile( self._build_tree_base_case, jit_backend ) def _is_u_turning( self, mass_inv: torch.Tensor, left_momentums: torch.Tensor, right_momentums: torch.Tensor, sum_momentums: torch.Tensor, ) -> torch.Tensor: """The generalized U-turn condition, as described in [2] Appendix 4.2""" rho = self._scale_r(sum_momentums, mass_inv) return (torch.dot(left_momentums, rho) <= 0) or ( torch.dot(right_momentums, rho) <= 0 ) def _build_tree_base_case(self, root: _TreeNode, args: _TreeArgs) -> _Tree: """Base case of the recursive tree building algorithm: take a single leapfrog step in the specified direction and return a subtree.""" positions, momentums, pe, pe_grad = self._leapfrog_step( root.positions, root.momentums, args.step_size * args.direction, args.mass_inv, root.pe_grad, ) new_energy = torch.nan_to_num( self._hamiltonian(positions, momentums, args.mass_inv, pe), float("inf"), ) # initial_energy == -L(\theta^{m-1}) + 1/2 r_0^2 in Algorithm 6 of [1] delta_energy = new_energy - args.initial_energy if self._multinomial_sampling: log_weight = -delta_energy else: # slice sampling as introduced in the original NUTS paper [1] log_weight = (args.log_slice <= -new_energy).log() tree_node = _TreeNode(positions=positions, momentums=momentums, pe_grad=pe_grad) return _Tree( left=tree_node, right=tree_node, proposal=positions, pe=pe, pe_grad=pe_grad, log_weight=log_weight, sum_momentums=momentums, sum_accept_prob=torch.clamp(torch.exp(-delta_energy), max=1.0), num_proposals=torch.tensor(1), turned_or_diverged=args.log_slice >= self._max_delta_energy - new_energy, ) def _build_tree(self, root: _TreeNode, tree_depth: int, args: _TreeArgs) -> _Tree: """Build the binary tree by recursively build the left and right subtrees and combine the two.""" if tree_depth == 0: return self._build_tree_base_case(root, args) # build the first half of the tree sub_tree = self._build_tree(root, tree_depth - 1, args) if sub_tree.turned_or_diverged: return sub_tree # build the other half of the tree other_sub_tree = self._build_tree( root=sub_tree.left if args.direction == -1 else sub_tree.right, tree_depth=tree_depth - 1, args=args, ) return self._combine_tree( sub_tree, other_sub_tree, args.direction, args.mass_inv, biased=False ) def _combine_tree( self, old_tree: _Tree, new_tree: _Tree, direction: torch.Tensor, mass_inv: torch.Tensor, biased: bool, ) -> _Tree: """Combine the old tree and the new tree into a single (large) tree. The new tree will be add to the left of the old tree if direction is -1, otherwise it will be add to the right. If biased is True, then we will prefer choosing from new tree (which is away from the starting location) than old tree when sampling the next state from the trajectory. This function assumes old_tree is not turned or diverged.""" # if old tree hsa turned or diverged, then we shouldn't build the new tree in # the first place assert not old_tree.turned_or_diverged # log of the sum of the weights from both trees log_weight = torch.logaddexp(old_tree.log_weight, new_tree.log_weight) if new_tree.turned_or_diverged: selected_subtree = old_tree else: # progressively sample from the trajectory if biased: # biased progressive sampling (Appendix 3.2 of [2]) log_tree_prob = new_tree.log_weight - old_tree.log_weight else: # uniform progressive sampling (Appendix 3.1 of [2]) log_tree_prob = new_tree.log_weight - log_weight if torch.rand_like(log_tree_prob).log() < log_tree_prob: selected_subtree = new_tree else: selected_subtree = old_tree if direction == -1: left_tree, right_tree = new_tree, old_tree else: left_tree, right_tree = old_tree, new_tree sum_momentums = left_tree.sum_momentums + right_tree.sum_momentums turned_or_diverged = new_tree.turned_or_diverged or self._is_u_turning( mass_inv, left_tree.left.momentums, right_tree.right.momentums, sum_momentums, ) # More robust U-turn condition # https://discourse.mc-stan.org/t/nuts-misses-u-turns-runs-in-circles-until-max-treedepth/9727 if not turned_or_diverged and right_tree.num_proposals > 1: extended_sum_momentums = left_tree.sum_momentums + right_tree.left.momentums turned_or_diverged = self._is_u_turning( mass_inv, left_tree.left.momentums, right_tree.left.momentums, extended_sum_momentums, ) if not turned_or_diverged and left_tree.num_proposals > 1: extended_sum_momentums = ( right_tree.sum_momentums + left_tree.right.momentums ) turned_or_diverged = self._is_u_turning( mass_inv, left_tree.right.momentums, right_tree.right.momentums, extended_sum_momentums, ) return _Tree( left=left_tree.left, right=right_tree.right, proposal=selected_subtree.proposal, pe=selected_subtree.pe, pe_grad=selected_subtree.pe_grad, log_weight=log_weight, sum_momentums=sum_momentums, sum_accept_prob=old_tree.sum_accept_prob + new_tree.sum_accept_prob, num_proposals=old_tree.num_proposals + new_tree.num_proposals, turned_or_diverged=turned_or_diverged, ) def propose(self, world: World) -> Tuple[World, torch.Tensor]: if world is not self.world: # re-compute cached values since world was modified by other sources self.world = world self._positions = self._dict2vec.to_vec( self._to_unconstrained({node: world[node] for node in self._target_rvs}) ) self._pe, self._pe_grad = self._potential_grads(self._positions) momentums = self._initialize_momentums(self._positions) current_energy = self._hamiltonian( self._positions, momentums, self._mass_inv, self._pe ) if self._multinomial_sampling: # log slice is only used to check the divergence log_slice = -current_energy else: # this is a more stable way to sample from log(Uniform(0, exp(-current_energy))) log_slice = torch.log1p(-torch.rand_like(current_energy)) - current_energy tree_node = _TreeNode(self._positions, momentums, self._pe_grad) tree = _Tree( left=tree_node, right=tree_node, proposal=self._positions, pe=self._pe, pe_grad=self._pe_grad, # log accept prob of staying at current state log_weight=torch.zeros_like(log_slice), sum_momentums=momentums, sum_accept_prob=torch.zeros_like(log_slice), num_proposals=torch.tensor(0), turned_or_diverged=torch.tensor(False), ) for j in range(self._max_tree_depth): direction = torch.tensor(1 if torch.rand(()) > 0.5 else -1) tree_args = _TreeArgs( log_slice, direction, self.step_size, current_energy, self._mass_inv, ) if direction == -1: new_tree = self._build_tree(tree.left, j, tree_args) else: new_tree = self._build_tree(tree.right, j, tree_args) tree = self._combine_tree( tree, new_tree, direction, self._mass_inv, biased=True ) if tree.turned_or_diverged: break if tree.proposal is not self._positions: positions_dict = self._dict2vec.to_dict(tree.proposal) self.world = self.world.replace(self._to_unconstrained.inv(positions_dict)) self._positions, self._pe, self._pe_grad = ( tree.proposal, tree.pe, tree.pe_grad, ) self._alpha = tree.sum_accept_prob / tree.num_proposals return self.world, torch.zeros_like(self._alpha)
beanmachine-main
src/beanmachine/ppl/inference/proposer/nuts_proposer.py