python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
import torch
import numpy as np
from torch.nn import Parameter
from halp.layers.pool_layer import BitCenterMaxPool2D, BitCenterAvgPool2D
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from unittest import TestCase
from halp.utils.utils import set_seed
from halp.utils.test_utils import HalpTest
from torch.autograd.gradcheck import get_numerical_jacobian, iter_tensors, make_jacobian
from halp.layers.bit_center_layer_test import TestBitCenterLayer
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class TestBitCenterPool2DLayer(TestBitCenterLayer):
# class TestBitCenterMaxPool2DLayer(TestBitCenterLayer, TestCase):
'''
Test the functionality of bit centering conv2d layers
'''
def get_config(self, type="grad_check"):
config = {}
if type == "grad_check":
config["n_train_sample"] = 35
config["channel_in"] = 17
config["w_in"] = 24
config["h_in"] = 13
config["kernel_size"] = (2, 2)
config["stride"] = None
config["padding"] = 0
config["cast_func"] = void_cast_func
config["do_double"] = True
config["seed"] = 0
config["batch_size"] = 35
elif type == "fw_bw_proc":
config["n_train_sample"] = 98
config["channel_in"] = 13
config["w_in"] = 31
config["h_in"] = 17
config["kernel_size"] = (4, 4)
config["stride"] = None
config["padding"] = 0
config["cast_func"] = single_to_half_det
config["do_double"] = False
config["seed"] = 0
config["batch_size"] = 33
else:
raise Exception("Config type not supported!")
return config
def check_layer_param_and_cache(self, layer):
t_list = [(layer.input_cache, torch.half, False, False),
(layer.grad_output_cache, torch.half, False, False)]
self.CheckLayerTensorProperty(t_list)
self.CheckLayerTensorGradProperty(t_list)
def get_input(self,
channel_in,
w_in,
h_in,
kernel_size,
stride=None,
padding=0,
cast_func=void_cast_func,
bias=False,
do_double=True,
seed=0,
batch_size=1,
n_train_sample=1):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if do_double:
input_delta = Parameter(
torch.randn(
n_train_sample, channel_in, w_in, h_in,
dtype=torch.double).cuda(),
requires_grad=True)
input_fp = Parameter(
torch.randn(
n_train_sample, channel_in, w_in, h_in,
dtype=torch.double).cuda(),
requires_grad=True)
else:
input_delta = Parameter(
cast_func(
torch.randn(
n_train_sample,
channel_in,
w_in,
h_in,
dtype=torch.double).cuda()),
requires_grad=True)
input_fp = Parameter(
torch.randn(
n_train_sample, channel_in, w_in, h_in,
dtype=torch.float).cuda(),
requires_grad=True)
return [
input_fp,
], [
input_delta,
]
def get_analytical_param_grad(self, layer):
# as there is no param in the relu layer, we use empty function
return []
def get_numerical_grad(self,
layer,
input_fp,
input_delta,
perturb_eps,
target=None):
grad_list = []
layer.set_mode(do_offset=True)
input_final = [
Parameter(x + y, requires_grad=True)
for x, y in zip(input_fp, input_delta)
]
output_final = layer(*input_final)
loss = 0.5 * torch.sum(output_final**2)
loss.backward()
# use the gradient from 0.5*sum(output**2), this case has output = gradient wrt output
num_input_grad = input_final[0].grad
grad_list.append(num_input_grad)
return output_final, grad_list
class TestBitCenterMaxPool2DLayer(TestBitCenterPool2DLayer, TestCase):
def prepare_layer(self,
channel_in,
w_in,
h_in,
kernel_size,
stride=None,
padding=0,
cast_func=void_cast_func,
bias=False,
do_double=True,
seed=0,
batch_size=1,
n_train_sample=1):
layer = BitCenterMaxPool2D(
kernel_size=kernel_size,
stride=stride,
padding=padding,
cast_func=cast_func,
n_train_sample=n_train_sample)
# Note do_double = setup layer for gradient check, otherwise, it is for checking
# the tensor properties, and layer behaviors
if do_double:
layer.double()
layer.cuda()
return layer
class TestBitCenterAvgPool2DLayer(TestBitCenterPool2DLayer, TestCase):
def get_config(self, type="grad_check"):
config = {}
if type == "grad_check":
config["n_train_sample"] = 35
config["channel_in"] = 17
config["w_in"] = 25
config["h_in"] = 14
config["kernel_size"] = (4, 4)
config["stride"] = None
config["padding"] = 1
config["cast_func"] = void_cast_func
config["do_double"] = True
config["seed"] = 0
config["batch_size"] = 35
elif type == "fw_bw_proc":
config["n_train_sample"] = 98
config["channel_in"] = 13
config["w_in"] = 31
config["h_in"] = 17
config["kernel_size"] = (4, 4)
config["stride"] = None
config["padding"] = 0
config["cast_func"] = single_to_half_det
config["do_double"] = False
config["seed"] = 0
config["batch_size"] = 33
else:
raise Exception("Config type not supported!")
return config
def prepare_layer(self,
channel_in,
w_in,
h_in,
kernel_size,
stride=None,
padding=0,
cast_func=void_cast_func,
bias=False,
do_double=True,
seed=0,
batch_size=1,
n_train_sample=1):
layer = BitCenterAvgPool2D(
kernel_size=kernel_size,
stride=stride,
padding=padding,
cast_func=cast_func,
n_train_sample=n_train_sample)
# Note do_double = setup layer for gradient check, otherwise, it is for checking
# the tensor properties, and layer behaviors
if do_double:
layer.double()
layer.cuda()
return layer
if __name__ == "__main__":
print(torch.__version__)
unittest.main() | halp-master | halp/layers/pool_layer_test.py |
import torch
import numpy as np
import torch.nn.functional as F
from torch.nn import Parameter
from halp.layers.ele_mult import BitCenterEleMult, bit_center_ele_mult
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from unittest import TestCase
from halp.layers.bit_center_layer_test import TestBitCenterNoParamLayer
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class TestBitCenterEleMultLayer(TestBitCenterNoParamLayer, TestCase):
def get_config(self, type="grad_check"):
config = {}
if type == "grad_check":
config["n_train_sample"] = 5
config["channel_in"] = 7
config["w_in"] = 4
config["h_in"] = 9
config["cast_func"] = void_cast_func
config["do_double"] = True
config["seed"] = 0
config["batch_size"] = 5
elif type == "fw_bw_proc":
config["n_train_sample"] = 17
config["channel_in"] = 6
config["w_in"] = 3
config["h_in"] = 5
config["cast_func"] = single_to_half_det
config["do_double"] = False
config["seed"] = 0
config["batch_size"] = 4
else:
raise Exception("Config type not supported!")
return config
def prepare_layer(self,
channel_in,
w_in,
h_in,
cast_func=void_cast_func,
bias=False,
do_double=True,
seed=0,
batch_size=1,
n_train_sample=1):
layer = BitCenterEleMult(
cast_func=cast_func, n_train_sample=n_train_sample)
# Note do_double = setup layer for gradient check, otherwise, it is for checking
# the tensor properties, and layer behaviors
if do_double:
layer.double()
layer.cuda()
return layer
def get_input(self,
channel_in,
w_in,
h_in,
cast_func=void_cast_func,
bias=False,
do_double=True,
seed=0,
batch_size=1,
n_train_sample=1):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if do_double:
input_delta_l = Parameter(
torch.randn(
n_train_sample, channel_in, w_in, h_in,
dtype=torch.double).cuda(),
requires_grad=True)
input_fp_l = Parameter(
torch.randn(
n_train_sample, channel_in, w_in, h_in,
dtype=torch.double).cuda(),
requires_grad=True)
input_delta_r = Parameter(
torch.randn(
n_train_sample, channel_in, w_in, h_in,
dtype=torch.double).cuda(),
requires_grad=True)
input_fp_r = Parameter(
torch.randn(
n_train_sample, channel_in, w_in, h_in,
dtype=torch.double).cuda(),
requires_grad=True)
else:
input_delta_l = Parameter(
cast_func(
torch.randn(
n_train_sample,
channel_in,
w_in,
h_in,
dtype=torch.double).cuda()),
requires_grad=True)
input_fp_l = Parameter(
torch.randn(
n_train_sample, channel_in, w_in, h_in,
dtype=torch.float).cuda(),
requires_grad=True)
input_delta_r = Parameter(
cast_func(
torch.randn(
n_train_sample,
channel_in,
w_in,
h_in,
dtype=torch.double).cuda()),
requires_grad=True)
input_fp_r = Parameter(
torch.randn(
n_train_sample, channel_in, w_in, h_in,
dtype=torch.float).cuda(),
requires_grad=True)
return [
input_fp_l, input_fp_r
], [
input_delta_l, input_delta_r
]
if __name__ == "__main__":
print(torch.__version__)
unittest.main() | halp-master | halp/layers/ele_mult_test.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn import Parameter
import numpy as np
from halp.layers.linear_layer import BitCenterLinear, bit_center_linear
from torch.autograd import gradcheck
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from unittest import TestCase
from halp.utils.test_utils import HalpTest
from torch.autograd.gradcheck import get_numerical_jacobian, iter_tensors, make_jacobian
import logging
import sys
import copy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class TestBitCenterLayer(HalpTest):
'''
Test the functionality of bit centering linear layers
the helper functions implemented here should directy serve for linear style
layers such as linear layer conv layer and etc.
This template class can benefits parametric layers with one weight param
and one bias param. E.g. convolutional layers, linear layers
'''
def get_config(self):
pass
def prepare_layer(self, **kargs):
"""
this function should generate the layer to be tested and the fp lp input.
Need to specify one parameter in this function for different layers.
This parameter is self.target_dtype: use None for layers not needing
target for forward; use torch.float for regression style problems;
use torch.long for regression style problems
"""
pass
def get_analytical_param_grad(self, layer):
# this function get the analytical grad with respect to parameters
# This function can be very layer specfic.
grad_list = []
weight_grad = layer.weight.grad + layer.weight_delta.grad
grad_list.append(weight_grad)
if layer.bias is not None:
bias_grad = layer.bias.grad + layer.bias_delta.grad
grad_list.append(bias_grad)
return grad_list
def get_analytical_grad(self, layer1, input_fp, input_delta, target=None):
# this function get the analytical grad with respect to parameters and input
# it calls get_analytical_param_grad to get grad wrt to paramters.
# the framework in the function is generic to all layers
# note we copy the layer in order to make numerical grad func
# to properly got the state of the layer, e.g. for batch norm
# we need to get num grad func a set of running statistics before
# the stat update resulting from the current lp step.
layer = copy.deepcopy(layer1)
layer.set_mode(do_offset=True)
grad_list = []
output_fp = layer(*input_fp)
output_fp_copy = output_fp.data.clone()
loss_fp = torch.sum(0.5 * output_fp * output_fp)
loss_fp.backward()
grad_input_fp = copy.deepcopy(layer.input_grad_for_test)
layer.set_mode(do_offset=False)
output_lp = layer(*input_delta)
loss_lp = torch.sum(0.5 * output_lp * output_lp)
loss_lp.backward()
if layer.input_grad_for_test is not None:
grad_input_delta = copy.deepcopy(layer.input_grad_for_test)
# as we only have 1 minibatch, we can directly use layer.grad_output_cache
if isinstance(grad_input_delta, list) or isinstance(
grad_input_delta, tuple):
grad_list += [
x + y for x, y in zip(grad_input_fp, grad_input_delta)
]
else:
input_grad = grad_input_fp + grad_input_delta
grad_list.append(input_grad)
else:
grad_list.append(None)
grad_list += self.get_analytical_param_grad(layer)
return output_lp + output_fp, grad_list
def get_numerical_param_grad(self, layer, input, get_loss, perturb_eps):
# this function get the numerical grad with respect to parameters and input
# it calls get_analytical_param_grad to get grad wrt to paramters.
# the framework in the function is generic to all layers
# Note get loss is a function defined in function get_numerical_grad
grad_list = []
num_weight_grad = get_numerical_jacobian(
get_loss, input, target=layer.weight, eps=perturb_eps)
grad_list.append(num_weight_grad)
if layer.bias is not None:
num_bias_grad = get_numerical_jacobian(
get_loss, input, target=layer.bias, eps=perturb_eps)
grad_list.append(num_bias_grad)
return grad_list
def get_numerical_grad(self,
layer,
input_fp,
input_delta,
perturb_eps,
target=None):
# this function get the numerical grad with respect to parameters and input
# it calls get_analytical_param_grad to get grad wrt to paramters.
# the framework in the function is generic to all layers
layer.set_mode(do_offset=True)
def get_loss(x):
output = layer(*x)
return torch.sum(0.5 * output * output)
grad_list = []
layer.set_mode(do_offset=True)
param_dict = layer.state_dict()
# update the offset variable
for name, param in layer.named_parameters():
if name.endswith("_delta"):
p_offset = param_dict[name.split("_delta")[0]]
p_offset.data.add_(param)
output_final = layer(*[x + y for x, y in zip(input_fp, input_delta)])
input = []
for i, (x, y) in enumerate(zip(input_fp, input_delta)):
input.append(x + y)
for x in input:
num_input_grad = get_numerical_jacobian(
get_loss, input, target=x, eps=perturb_eps)
grad_list.append(num_input_grad)
grad_list += self.get_numerical_param_grad(layer, input, get_loss,
perturb_eps)
return output_final, grad_list
def test_forward_backward_output(self):
# test if backward is synced with forward in double mode
# test on a single batch of data, check if the
# gradient can give similar numerical loss changes
# In this test, we use the quadratic loss, this is because
# using this loss, the grad_output decompose directly to offset and delta
# in the fp and lp steps
perturb_eps = 1e-6
rtol_num_analytical_grad = 1e-3
atol_num_analytical_grad = 1e-6
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
for bias in [True, False]:
for i in range(1):
config = self.get_config(type="grad_check")
config["bias"] = bias
config["do_double"] = True
config["cast_func"] = void_cast_func
config["seed"] = i + 1
layer = self.prepare_layer(**config)
input_fp, input_delta = self.get_input(**config)
analytical_output, analytical_grads = \
self.get_analytical_grad(layer, input_fp, input_delta)
numerical_output, numerical_grads = \
self.get_numerical_grad(layer, input_fp, input_delta, perturb_eps)
assert len(analytical_grads) == len(numerical_grads)
np.testing.assert_allclose(
analytical_output.data.cpu().numpy().ravel(),
numerical_output.data.cpu().numpy().ravel(),
rtol=rtol_num_analytical_grad)
for ana_grad, num_grad in zip(analytical_grads,
numerical_grads):
if (ana_grad is None) and (num_grad is None):
continue
np.testing.assert_allclose(
ana_grad.data.cpu().numpy().ravel(),
num_grad.data.cpu().numpy().ravel(),
rtol=rtol_num_analytical_grad,
atol=atol_num_analytical_grad * np.max(
np.abs(ana_grad.data.cpu().numpy().ravel())))
logger.info(self.__class__.__name__ + " function test passed!")
def check_layer_param_and_cache(self, layer):
t_list = [(layer.weight, torch.float32, True, True),
(layer.bias, torch.float32, True, True),
(layer.weight_delta, torch.half, True, True),
(layer.bias_delta, torch.half, True, True),
(layer.weight_lp, torch.half, True, False),
(layer.bias_lp, torch.half, True, False),
(layer.input_cache, torch.half, False, False),
(layer.grad_output_cache, torch.half, False, False)]
self.CheckLayerTensorProperty(t_list)
self.CheckLayerTensorGradProperty(t_list)
def test_layer_forward_backward_precedures(self):
# We test the behavior of layers in a multiple epoch (each with multiple minibatch setting).
# Along with this, we will also test the property of tensors
# (including param and cache along the way).
# check if the behavior of BitCentering linear layer is going as expected for forward
# the backward behavior is guaranteed by
# bit center linear function test TestBitCenterLinearFuncGradientCheck
for use_bias in [True, False]:
config = self.get_config(type="fw_bw_proc")
config["bias"] = use_bias
layer = self.prepare_layer(**config)
cast_func = config["cast_func"]
n_sample = config["n_train_sample"]
minibatch_size = config["batch_size"]
n_minibatch = int(
np.ceil(config["n_train_sample"] / config["batch_size"]))
# test fp mode
layer.set_mode(do_offset=True)
layer.cuda()
input_tensor_list = []
if (cast_func == single_to_half_det) or (
cast_func == single_to_half_stoc):
self.check_layer_param_and_cache(layer)
for i in range(n_minibatch):
start_idx = i * minibatch_size
end_idx = min((i + 1) * minibatch_size, n_sample)
if i != 0:
if isinstance(layer.input_cache, list) or isinstance(
layer.input_cache, tuple):
input_cache_before = [
x[start_idx:end_idx].clone().numpy()
for x in layer.input_cache
]
else:
input_cache_before = [
layer.input_cache[start_idx:end_idx].clone().
numpy(),
]
grad_input_cache_before = layer.grad_output_cache[
start_idx:end_idx].clone().numpy()
config["n_train_sample"] = end_idx - start_idx
input_fp, _ = self.get_input(**config)
output = layer(*input_fp)
input_tensor_list.append(input_fp)
torch.sum(output).backward()
if i != 0:
# test grad cache
grad_input_cache_after = layer.grad_output_cache[
start_idx:end_idx, :].numpy()
assert (grad_input_cache_before == 0).all()
assert (grad_input_cache_before !=
grad_input_cache_after).all()
# test input cache
if isinstance(layer.input_cache, list) or isinstance(
layer.input_cache, tuple):
input_cache_after = [
x[start_idx:end_idx].clone().numpy()
for x in layer.input_cache
]
else:
input_cache_after = [
layer.input_cache[start_idx:end_idx].clone().
numpy(),
]
def test_input_cache(input_cache_before_list,
input_cache_after_list):
assert len(input_cache_before_list) == len(
input_cache_after_list)
for input_cache_before, input_cache_after in \
zip(input_cache_before_list, input_cache_after_list):
assert (input_cache_before == 0).all()
if input_cache_before.dtype == np.int64:
# if cache is long type, there is possibility
# that some entries are 0 both before and after
# update
assert np.sum(input_cache_before !=
input_cache_after) != 0
else:
assert (input_cache_before !=
input_cache_after).all()
test_input_cache(input_cache_before, input_cache_after)
# test lp mode
layer.set_mode(do_offset=False)
if (cast_func == single_to_half_det) or (
cast_func == single_to_half_stoc):
self.check_layer_param_and_cache(layer)
for i in range(n_minibatch):
_, input_delta = self.get_input(**config)
output = layer(*input_delta)
torch.sum(output).backward()
if (cast_func == single_to_half_det) or (
cast_func == single_to_half_stoc):
self.check_layer_param_and_cache(layer)
logger.info(self.__class__.__name__ + " layer test passed!")
class TestBitCenterNoParamLayer(TestBitCenterLayer):
'''
Test the functionality of bit centering activation like tanh and sigmoid
'''
def get_config(self, type="grad_check"):
config = {}
if type == "grad_check":
config["n_train_sample"] = 5
config["channel_in"] = 7
config["w_in"] = 4
config["h_in"] = 9
config["cast_func"] = void_cast_func
config["do_double"] = True
config["seed"] = 0
config["batch_size"] = 5
elif type == "fw_bw_proc":
config["n_train_sample"] = 17
config["channel_in"] = 6
config["w_in"] = 3
config["h_in"] = 5
config["cast_func"] = single_to_half_det
config["do_double"] = False
config["seed"] = 0
config["batch_size"] = 4
else:
raise Exception("Config type not supported!")
return config
def check_layer_param_and_cache(self, layer):
t_list = [(layer.grad_output_cache, torch.half, False, False)]
if isinstance(layer.input_cache, list) or isinstance(
layer.input_cache, tuple):
t_list += [(x, torch.half, False, False)
for x in layer.input_cache]
else:
t_list += [(layer.input_cache, torch.half, False, False)]
self.CheckLayerTensorProperty(t_list)
self.CheckLayerTensorGradProperty(t_list)
def get_input(self,
channel_in,
w_in,
h_in,
cast_func=void_cast_func,
bias=False,
do_double=True,
seed=0,
batch_size=1,
n_train_sample=1):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if do_double:
input_delta = Parameter(
torch.randn(
n_train_sample, channel_in, w_in, h_in,
dtype=torch.double).cuda(),
requires_grad=True)
input_fp = Parameter(
torch.randn(
n_train_sample, channel_in, w_in, h_in,
dtype=torch.double).cuda(),
requires_grad=True)
else:
input_delta = Parameter(
cast_func(
torch.randn(
n_train_sample,
channel_in,
w_in,
h_in,
dtype=torch.double).cuda()),
requires_grad=True)
input_fp = Parameter(
torch.randn(
n_train_sample, channel_in, w_in, h_in,
dtype=torch.float).cuda(),
requires_grad=True)
return [
input_fp,
], [
input_delta,
]
def get_analytical_param_grad(self, layer):
# as there is no param in the relu layer, we use empty function
return []
def get_numerical_param_grad(self, layer, input, get_loss, perturb_eps):
return []
| halp-master | halp/layers/bit_center_layer_test.py |
import torch
import torch.nn as nn
from math import floor
from torch.nn import Conv2d
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.autograd import Function
from torch.autograd import Variable
import numpy as np
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from halp.layers.bit_center_layer import BitCenterLayer
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class BitCenterConv2DFunction(Function):
"""
This class does forward and backward computation for bit center
2D convolution. We document the forward backward computation math
here for normal conv2d. The math for bit center Conv2d can be derived
by decomposing the input and weights into full precision offset and
low precision delta.
Given input X in the shape of (b, n_i, w_i, h_i)
and filter W in the shape of (n_o, n_i, w_k, h_k)
For simplicity we write out the math with 0 padding and stride 1
in the following:
we transform the input tensor X to a matrix X_L in the shape of
(b, w_o * h_o, n_i * w_k * h_k). W is transformed into W_L in the shape of
(n_i * w_k * h_k, n_o).
In the forward pass,
\tilde{X} = matmul(X_L, W_L), where \tilde{X} is in the shape of
(b, w_o * h_o, n_o)
In the backward pass,
\par L / \par W_L = matmul(X_L^T, \par L / \par \tilde{X}_L)
\par L / \par X_L = matmul(\par L / \par \tilde{X}_L, W_L^T)
Note here \par L / \par X_L can be directly done using deconv operations
We opt to use fold and unfold function to explicitly do
\par L / \par W_L = matmul(X_L^T, \par L / \par \tilde{X}_L)
because we the current deconv (conv_transpose2d) API has some flexibility issue.
Note \par L / \par W_L, and \par L / \par X_L are in the shape of
() and () respectively. We need properly reshape to return the gradient
"""
@staticmethod
def forward(ctx,
input_delta,
input_lp,
output_grad_lp,
weight_delta,
weight_lp,
bias_delta=None,
bias_lp=None,
stride=1,
padding=0,
dilation=1,
groups=1):
# suffix lp means the lp version of the offset tensors
# suffix delta means the real low precision part of the model representation
# output_grad_lp is only for backward function, but we need to keep it in ctx
# for backward function to access it.
# TODO: extend to accommodate different dilation, groups
# assert (stride, padding, dilation, groups) == (1, 0, 1, 1)
batch_size = input_lp.size(0)
if (dilation != (1, 1)) or (groups != 1):
raise Exception(
"Dillation and groups are not fully supported yet in bc conv.")
kernel_size = list(weight_lp.size()[-2:])
input_size = list(input_lp.size()[-2:])
output_size = [ \
floor((input_size[0] + 2 * padding[0] - dilation[0] *
(kernel_size[0] - 1) - 1) / stride[0] + 1),
floor((input_size[1] + 2 * padding[1] - dilation[1] *
(kernel_size[1] - 1) - 1) / stride[1] + 1)]
input_lp_unf = F.unfold(
input_lp,
kernel_size,
dilation=dilation,
padding=padding,
stride=stride)
input_delta_unf = F.unfold(
input_delta,
kernel_size,
dilation=dilation,
padding=padding,
stride=stride)
ctx.save_for_backward(input_lp, input_delta, output_grad_lp, weight_lp, weight_delta, bias_lp, bias_delta)
ctx.hyperparam = (kernel_size, stride, padding, dilation, groups, input_lp.shape)
conv2d = lambda input_unf, weight: \
input_unf.transpose(1, 2).matmul(
weight.permute(1, 2, 3, 0).view(-1, weight.size(0)))
output = conv2d(input_delta_unf, weight_lp) \
+ conv2d(input_lp_unf + input_delta_unf, weight_delta)
channel_out = weight_lp.size(0)
output = output.transpose(1, 2).view(batch_size, channel_out,
*output_size)
if bias_delta is not None:
output = output + bias_delta.view(1, -1, 1, 1).expand_as(output)
return output
@staticmethod
def backward(ctx, grad_output):
'''
In this function, suffix represent the results from torch unfold style im2col op
'''
input_lp, input_delta, output_grad_lp, weight_lp, weight_delta, bias_lp, bias_delta = ctx.saved_tensors
kernel_size, stride, padding, dilation, groups, input_shape = ctx.hyperparam
# get unfolded input
input_lp_unf = F.unfold(
input_lp,
kernel_size,
dilation=dilation,
padding=padding,
stride=stride)
input_delta_unf = F.unfold(
input_delta,
kernel_size,
dilation=dilation,
padding=padding,
stride=stride)
batch_size, channel_out, w_out, h_out = list(grad_output.size())
w_in, h_in = input_shape[-2:]
channel_in = weight_lp.size(1)
kernel_size = weight_lp.size()[-2:]
assert channel_out == weight_lp.size(0)
# reshape output grad for further computation
grad_output_reshape = grad_output.permute(0, 2, 3, 1).view(
batch_size, -1, channel_out)
output_grad_lp_reshape = output_grad_lp.permute(0, 2, 3, 1).view(
batch_size, -1, channel_out)
# helper functions for backward computation of bit center conv 2d:
# we define these function to get gradient wrt input and weights
# in the get_grad_input helper, padding is the padding in the
# original convolution (the transpose is the inverse deconvolution op)
# the output_padding controls the output when stride > 0. It adds the
# ignored rows back to the output of the deconv op; this resolves
# disambiguity in the shape of the output of deconv
output_padding = [\
w_in - ((w_out - 1) * stride[0] + kernel_size[0] - 2 * padding[0]),
h_in - ((h_out - 1) * stride[1] + kernel_size[1] - 2 * padding[1])]
get_grad_input = lambda grad_out_reshape, weight: \
F.conv_transpose2d(grad_out_reshape, weight, bias=None, stride=stride, padding=padding, output_padding=output_padding)
get_grad_weight = lambda input_unf, grad_output_reshape: \
torch.sum(torch.bmm(input_unf, grad_output_reshape), dim=0)
grad_input_lp = None
grad_input_delta = \
get_grad_input(grad_output, (weight_lp + weight_delta)) \
+ get_grad_input(output_grad_lp, weight_delta)
grad_output_grad_lp = None # this dummy to adapt to pytorch API
grad_weight_lp = None
grad_weight_delta = \
get_grad_weight(input_lp_unf + input_delta_unf, grad_output_reshape) \
+ get_grad_weight(input_delta_unf, output_grad_lp_reshape)
grad_weight_delta = \
grad_weight_delta.view(channel_in, *kernel_size, channel_out).permute(3, 0, 1, 2)
grad_bias_lp = None
if (bias_lp is not None) and (bias_delta is not None):
grad_bias_delta = grad_output.sum(dim=[0, 2, 3])
else:
grad_bias_delta = None
grad_stride, grad_padding, grad_dilation, grad_group = None, None, None, None
return grad_input_delta, grad_input_lp, grad_output_grad_lp, \
grad_weight_delta, grad_weight_lp, grad_bias_delta, grad_bias_lp, \
grad_stride, grad_padding, grad_dilation, grad_group
bit_center_conv2d = BitCenterConv2DFunction.apply
class BitCenterConv2D(BitCenterLayer, Conv2d):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False,
cast_func=void_cast_func,
n_train_sample=1):
BitCenterLayer.__init__(
self,
fp_functional=F.conv2d,
lp_functional=bit_center_conv2d,
bias=bias,
cast_func=cast_func,
n_train_sample=n_train_sample)
Conv2d.__init__(
self,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
# weight_delta is the delta tensor in the algorithm while weight_lp is the cached
# lp version of weight offset
self.setup_bit_center_vars()
self.cuda()
self.reset_parameters_bit_center()
self.register_backward_hook(self.update_grad_output_cache)
def forward_fp(self, input):
self.check_or_setup_input_cache(input)
output = self.fp_func(input, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
self.check_or_setup_grad_cache(output)
self.update_input_cache(input)
return output
def forward_lp(self, input):
# Need to test do_offset mode whether gradient is updated properly
input_lp, grad_output_lp = self.get_input_cache_grad_cache(input)
input_delta = input
weight_lp = self.weight_lp
weight_delta = self.weight_delta
bias_lp = self.bias_lp
bias_delta = self.bias_delta
output = self.lp_func(input_delta, input_lp, grad_output_lp,
weight_delta, weight_lp, bias_delta, bias_lp,
self.stride, self.padding, self.dilation,
self.groups)
self.increment_cache_iter(input)
return output
| halp-master | halp/layers/conv_layer.py |
import torch
import torch.nn as nn
from torch.nn import Tanh
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.autograd import Function
import numpy as np
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from halp.layers.bit_center_layer import BitCenterActivation
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class BitCenterSigmoidFunction(Function):
@staticmethod
def forward(ctx, input_delta, input_lp, grad_output_lp):
sig_full = F.sigmoid(input_delta + input_lp)
sig_lp = F.sigmoid(input_lp)
ctx.save_for_backward(grad_output_lp, sig_full, sig_lp)
return sig_full - sig_lp
@staticmethod
def backward(ctx, grad_output):
grad_output_lp, sig_full, sig_lp = ctx.saved_tensors
one = torch.tensor([1.0], dtype=grad_output.dtype, device=grad_output.device)
grad_input = sig_full * (one - sig_full) * (grad_output_lp + grad_output) \
- sig_lp * (one - sig_lp) * grad_output_lp
return grad_input, None, None
bit_center_sigmoid = BitCenterSigmoidFunction.apply
class BitCenterSigmoid(BitCenterActivation, nn.Sigmoid):
def __init__(self, cast_func=void_cast_func, n_train_sample=1):
BitCenterActivation.__init__(self, fp_functional=F.sigmoid,
lp_functional=bit_center_sigmoid,
cast_func=cast_func, n_train_sample=n_train_sample)
nn.Sigmoid.__init__(self)
self.register_backward_hook(self.update_grad_output_cache) | halp-master | halp/layers/sigmoid_layer.py |
import torch
import torch.nn as nn
from torch.nn import Tanh
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.autograd import Function
import numpy as np
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from halp.layers.bit_center_layer import BitCenterActivation
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class BitCenterTanhFunction(Function):
@staticmethod
def forward(ctx, input_delta, input_lp, grad_output_lp):
tanh_full = F.tanh(input_delta + input_lp)
tanh_lp = F.tanh(input_lp)
ctx.save_for_backward(tanh_full, tanh_lp, grad_output_lp)
return tanh_full - tanh_lp
@staticmethod
def backward(ctx, grad_output):
tanh_full, tanh_lp, grad_output_lp = ctx.saved_tensors
one = torch.tensor([1.0], dtype=grad_output.dtype, device=grad_output.device)
return -(one - tanh_lp**2) * grad_output_lp \
+ (one - tanh_full**2) * (grad_output + grad_output_lp), None, None
bit_center_tanh = BitCenterTanhFunction.apply
class BitCenterTanh(BitCenterActivation, nn.Tanh):
def __init__(self, cast_func=void_cast_func, n_train_sample=1):
BitCenterActivation.__init__(self, fp_functional=F.tanh,
lp_functional=bit_center_tanh,
cast_func=cast_func, n_train_sample=n_train_sample)
nn.Tanh.__init__(self)
self.register_backward_hook(self.update_grad_output_cache)
| halp-master | halp/layers/tanh_layer.py |
import torch
import numpy as np
from torch.nn import Parameter
from halp.layers.relu_layer import BitCenterReLU, bit_center_relu
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from unittest import TestCase
from halp.utils.utils import set_seed
from halp.utils.test_utils import HalpTest
from torch.autograd.gradcheck import get_numerical_jacobian, iter_tensors, make_jacobian
from halp.layers.bit_center_layer_test import TestBitCenterLayer
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class TestBitCenterReLULayer(TestBitCenterLayer, TestCase):
'''
Test the functionality of bit centering conv2d layers
'''
def get_config(self, type="grad_check"):
config = {}
if type == "grad_check":
config["n_train_sample"] = 35
config["channel_in"] = 17
config["w_in"] = 24
config["h_in"] = 13
config["cast_func"] = void_cast_func
config["do_double"] = True
config["seed"] = 0
config["batch_size"] = 35
elif type == "fw_bw_proc":
config["n_train_sample"] = 98
config["channel_in"] = 13
config["w_in"] = 31
config["h_in"] = 17
config["cast_func"] = single_to_half_det
config["do_double"] = False
config["seed"] = 0
config["batch_size"] = 33
else:
raise Exception("Config type not supported!")
return config
def prepare_layer(self,
channel_in,
w_in,
h_in,
cast_func=void_cast_func,
bias=False,
do_double=True,
seed=0,
batch_size=1,
n_train_sample=1):
layer = BitCenterReLU(
cast_func=cast_func, n_train_sample=n_train_sample)
# Note do_double = setup layer for gradient check, otherwise, it is for checking
# the tensor properties, and layer behaviors
if do_double:
layer.double()
layer.cuda()
return layer
def check_layer_param_and_cache(self, layer):
t_list = [(layer.input_cache, torch.half, False, False),
(layer.grad_output_cache, torch.half, False, False)]
self.CheckLayerTensorProperty(t_list)
self.CheckLayerTensorGradProperty(t_list)
def get_input(self,
channel_in,
w_in,
h_in,
cast_func=void_cast_func,
bias=False,
do_double=True,
seed=0,
batch_size=1,
n_train_sample=1):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if do_double:
input_delta = Parameter(
torch.randn(
n_train_sample, channel_in, w_in, h_in,
dtype=torch.double).cuda(),
requires_grad=True)
input_fp = Parameter(
torch.randn(
n_train_sample, channel_in, w_in, h_in,
dtype=torch.double).cuda(),
requires_grad=True)
else:
input_delta = Parameter(
cast_func(
torch.randn(
n_train_sample,
channel_in,
w_in,
h_in,
dtype=torch.double).cuda()),
requires_grad=True)
input_fp = Parameter(
torch.randn(
n_train_sample, channel_in, w_in, h_in,
dtype=torch.float).cuda(),
requires_grad=True)
return [
input_fp,
], [
input_delta,
]
def get_analytical_param_grad(self, layer):
# as there is no param in the relu layer, we use empty function
return []
def get_numerical_grad(self,
layer,
input_fp,
input_delta,
perturb_eps,
target=None):
grad_list = []
layer.set_mode(do_offset=True)
output_final = layer(*[x + y for x, y in zip(input_fp, input_delta)])
# use the gradient from 0.5*sum(output**2)
num_input_grad = output_final.clone()
num_input_grad[output_final == 0.0] = 0.0
grad_list.append(num_input_grad)
return output_final, grad_list
if __name__ == "__main__":
print(torch.__version__)
unittest.main() | halp-master | halp/layers/relu_layer_test.py |
import torch
import numpy as np
import copy, logging
from torch.autograd import Variable
from torch.optim.optimizer import required, Optimizer
from torch.optim import SGD
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from halp.optim.bit_center_sgd import BitCenterOptim
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger()
class BitCenterSVRG(BitCenterOptim):
"""
Implementation of bit centering SVRG
as the gradient cache is small, we cache gradients on GPU
We accumulate full gradient in full precision and then cast it
"""
def __init__(self,
params,
params_name,
lr=required,
momentum=0.0,
weight_decay=0.0,
n_train_sample=1,
cast_func=void_cast_func,
minibatch_size=128,
T=1):
super(BitCenterSVRG, self).__init__(
params,
params_name,
lr,
momentum,
weight_decay,
n_train_sample,
cast_func,
minibatch_size=minibatch_size,
T=T)
def setup_single_grad_cache(self, grad_shape, dtype):
logger.info("setup fp accum for full grad")
cache_shape = grad_shape
return torch.Tensor(np.zeros(cache_shape)).type(dtype).cuda()
def update_single_grad_cache(self, grad, cache):
cache.add_(grad)
def get_single_grad_offset(self, cache):
# we assume the size of the first dimension is the minibatch size
return cache
def on_start_fp_steps(self, model):
# resetup single precision cache
self.setup_grad_cache()
model.set_mode(do_offset=True)
def on_end_fp_steps(self, model):
for key, cache in self.grad_cache.items():
if cache is not None:
cache.div_(self.n_minibatch_per_epoch)
# turn the cache into half precision
self.grad_cache[key] = self.cast_func(cache)
model.set_mode(do_offset=True)
| halp-master | halp/optim/bit_center_svrg.py |
from torch.optim.optimizer import Optimizer, required
import torch
from torch.autograd import Variable
import copy, logging
class SVRG(torch.optim.SGD):
"""Implements stochastic variance reduction gradient descent.
Args:
params (iterable): iterable of parameters to optimize
lr (float): learning rate
T (int): number of iterations between the step to take the full grad/save w
data_loader (DataLoader): dataloader to use to load training data
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
momentum (float, optional): momentum (default: 0)
opt (torch.optim): optimizer to baseclass (default: SGD)
"""
def __init__(self, params, lr=required, T=required, data_loader=required, weight_decay=0.0,
momentum=0.0, opt=torch.optim.SGD):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum)
# Choose the baseclass dynamically.
self.__class__ = type(self.__class__.__name__,
(opt,object),
dict(self.__class__.__dict__))
logging.info("Using base optimizer {} in SVRG".format(opt))
super(self.__class__, self).__init__(params, **defaults)
if len(self.param_groups) != 1:
raise ValueError("SVRG doesn't support per-parameter options "
"(parameter groups)")
params = self.param_groups[0]['params']
self._params = params
self._curr_w = [p.data for p in params]
self._prev_w = [p.data.clone() for p in params]
# Gradients are lazily allocated and don't exist yet. However, gradients are
# the same shape as the weights so we can still allocate buffers here
self._curr_grad = [p.data.clone() for p in params]
self._prev_grad = [p.data.clone() for p in params]
self._full_grad = None
self.data_loader = data_loader
self.state['t_iters'] = T
self.T = T # Needed to trigger full gradient
logging.info("Data Loader has {} with batch {}".format(len(self.data_loader),
self.data_loader.batch_size))
def __setstate__(self, state):
super(self.__class__, self).__setstate__(state)
def _zero_grad(self):
for p in self._params:
if p.grad is not None:
p.grad.detach()
p.grad.zero_()
def _set_weights_grad(self,ws,gs):
for idx, p in enumerate(self._params):
if ws is not None: p.data = ws[idx]
if gs is not None and p.grad is not None: p.grad.data = gs[idx]
if (gs is not None) and (p.grad is not None):
assert (p.grad.data.data_ptr() == gs[idx].data_ptr())
def step(self, closure):
"""Performs a single optimization step.
Arguments:
closure (callable): A closure that reevaluates the model
and returns the loss.
"""
assert len(self.param_groups) == 1
# Calculate full gradient
if self.state['t_iters'] == self.T:
# Setup the full grad
# Reset gradients before accumulating them
self._set_weights_grad(None, self._full_grad)
self._zero_grad()
# Accumulate gradients
for i, (data, target) in enumerate(self.data_loader):
closure(data, target)
# Adjust summed gradients by num_iterations accumulated over
# assert(n_iterations == len(self.data_loader))
for p in self._params:
if p.grad is not None:
p.grad.data /= len(self.data_loader)
if self._full_grad is None:
self._full_grad = []
for p in self._params:
if p.grad is not None:
self._full_grad.append(p.grad.data.clone())
else:
self._full_grad.append(None)
# Copy w to prev_w
for p, p0 in zip(self._curr_w, self._prev_w):
p0.copy_(p)
# Reset t
self.state['t_iters'] = 0
# Setup the previous grad
self._set_weights_grad(self._prev_w, self._prev_grad)
self._zero_grad()
closure()
# Calculate the current grad.
self._set_weights_grad(self._curr_w, self._curr_grad)
self._zero_grad()
loss = closure()
# Adjust the current gradient using the previous gradient and the full gradient.
# We have normalized so that these are all comparable.
for p, d_p0, fg in zip(self._params, self._prev_grad, self._full_grad):
# Adjust gradient in place
if p.grad is not None:
p.grad.data -= (d_p0 - fg)
# Call optimizer update step
super(self.__class__, self).step()
self.state['t_iters'] += 1
return loss | halp-master | halp/optim/svrg.py |
halp-master | halp/optim/__init__.py |
|
import torch
import torch.nn as nn
import numpy as np
from halp.utils.test_utils import HalpTest
from halp.optim.bit_center_sgd import BitCenterSGD
from halp.optim.bit_center_svrg import BitCenterSVRG
from unittest import TestCase
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from copy import deepcopy
def GetAllParameterStates(optimizer):
param_dict = dict()
for param_group in optimizer.param_groups:
for p, p_name in zip(param_group["params"],
param_group["params_name"]):
param_dict[p_name] = p.clone()
return param_dict
def GetAllGradOffset(optimizer):
grad_offset_dict = dict()
for param_group in optimizer.param_groups:
for p, p_name in zip(param_group["params"],
param_group["params_name"]):
if p_name.endswith("_delta"):
cache = optimizer.grad_cache[p_name.split("_delta")[0]]
# note the offset in cache is only correctly fetched if this function
# is called before lp_step
grad_offset = optimizer.get_single_grad_offset(cache)
grad_offset_dict[p_name] = grad_offset.clone()
return grad_offset_dict
class TestBitCenterOptim(HalpTest):
@staticmethod
def FuncTestCacheProperty(model, optimizer):
pass
def test_CacheProperty(self):
n_train_sample = np.random.randint(low=100, high=1000)
minibatch_size = np.random.randint(low=9, high=n_train_sample // 10)
model = self.GetMultipleLayerLinearModel(
n_layer=3, n_train_sample=n_train_sample)
optimizer = self.GetOptimizer(
model,
lr=0.5,
weight_decay=0,
n_train_sample=n_train_sample,
minibatch_size=minibatch_size)
for param_group in optimizer.param_groups:
for p, p_name in zip(param_group["params"],
param_group["params_name"]):
cache = optimizer.grad_cache[p_name]
self.FuncTestCacheProperty(p, p_name, cache, optimizer)
print(self.__class__, " cache property test passed!")
@staticmethod
def FuncTestCacheUpdate(cache_before,
cache_after,
is_first_update=False,
is_last_update=False):
pass
def FuncTestLPStep(self,
optimizer,
param_dict_prev,
grad_offset_dict,
is_first_update=False,
is_last_update=False):
for param_group in optimizer.param_groups:
for p, p_name in zip(param_group["params"],
param_group["params_name"]):
p_prev = param_dict_prev[p_name]
if p_name.endswith("_delta"):
if is_first_update:
assert (p_prev.cpu().detach().numpy() == 0.0).all()
new_p = param_dict_prev[p_name] \
- torch.Tensor(np.array(param_group["lr"])).half().cuda() * p.grad \
- grad_offset_dict[p_name].cuda()
# atol is selected to not asserting on denormal values for half precision
# rtol test at least if the values are in neighbor positions
# in the of the float point grid, we require a bit stricter with
# a factor of 2.0
if not is_last_update:
# Note here we are asserting on the delta variables.
# As the delta variables are cleared before this test,
# we can not asssert on the last iteration.
np.testing.assert_allclose(
new_p.cpu().detach().numpy(),
p.cpu().detach().numpy(),
atol=6.2e-5,
rtol=1.0 / 1024.0 / 2.0)
elif p_name.endswith("_lp"):
if is_last_update:
assert not (p_prev.cpu().detach().numpy() == p.cpu().detach().
numpy()).all()
else:
assert (p_prev.cpu().detach().numpy() == p.cpu().detach().
numpy()).all()
else: # this branch check the offset variables
if is_last_update:
assert not (p_prev.cpu().detach().numpy() == p.cpu().
detach().numpy()).all()
else:
assert (p_prev.cpu().detach().numpy() == p.cpu().
detach().numpy()).all()
def FuncTestFPStep(self, optimizer, param_dict_prev):
for param_group in optimizer.param_groups:
for p, p_name in zip(param_group["params"],
param_group["params_name"]):
p_prev = param_dict_prev[p_name]
assert (p_prev.cpu().detach().numpy() == p.cpu().detach().
numpy()).all()
def test_Step(self):
"""
test grad cache is udpated properly: test the involved grad got generated
for fp step, we test in 3 consecutive epochs to make sure
step_fp is updating the cache properly
for lp step, we test:
first iter clears delta;
last iter adds back to offset;
in normal iterations, only delta is property is updated.
"""
n_train_sample = np.random.randint(low=100, high=1000)
minibatch_size = np.random.randint(low=9, high=n_train_sample // 10)
model = self.GetMultipleLayerLinearModel(
n_layer=3, n_train_sample=n_train_sample)
optimizer = self.GetOptimizer(
model,
lr=0.0005,
weight_decay=0,
n_train_sample=n_train_sample,
minibatch_size=minibatch_size)
n_minibatch = int(np.ceil(n_train_sample / float(minibatch_size)))
for k in range(3):
# do fp loops
for i in range(n_minibatch):
if i == 0:
optimizer.on_start_fp_steps(model)
start_idx = i * minibatch_size
end_idx = min((i + 1) * minibatch_size, n_train_sample)
fw_input = torch.Tensor(
np.random.randn(end_idx - start_idx,
model.n_feat_in[0])).cuda()
fw_label = torch.Tensor(
np.random.randn(end_idx - start_idx, 1)).cuda()
loss = model.forward(fw_input, fw_label)
loss.backward()
# get the grad cache before fp step
cache_list_before_update = \
self.GetUpdatedCache(minibatch_idx=i, optimizer=optimizer)
param_dict_before_update = \
GetAllParameterStates(optimizer)
optimizer.step_fp()
# get the grad cache after fp step
cache_list_after_update = \
self.GetUpdatedCache(minibatch_idx=i, optimizer=optimizer)
self.FuncTestFPStep(optimizer, param_dict_before_update)
# test if the cache is properly maintained
for cache_before, cache_after in \
zip(cache_list_before_update, cache_list_after_update):
is_first_update = (i == 0)
self.FuncTestCacheUpdate(
cache_before,
cache_after,
is_first_update=is_first_update)
if i == n_minibatch - 1:
optimizer.on_end_fp_steps(model)
# do lp loops
for i in range(n_minibatch):
if i == 0:
optimizer.on_start_lp_steps(model)
start_idx = i * minibatch_size
end_idx = min((i + 1) * minibatch_size, n_train_sample)
fw_input = optimizer.cast_func(
torch.Tensor(
np.random.randn(end_idx - start_idx,
model.n_feat_in[0])).cuda())
fw_label = optimizer.cast_func(
torch.Tensor(np.random.randn(end_idx - start_idx,
1)).cuda())
loss = model.forward(fw_input, fw_label)
loss.backward()
# get param state before update steps
param_dict_before_update = \
GetAllParameterStates(optimizer)
grad_offset_dict = GetAllGradOffset(optimizer)
optimizer.step_lp()
is_first_update = False
is_last_update = False
if i == 0:
is_first_update = True
if i == n_minibatch - 1:
is_last_update = True
if i == n_minibatch - 1:
optimizer.on_end_lp_steps(model)
# test update procedures
self.FuncTestLPStep(optimizer, param_dict_before_update,
grad_offset_dict, is_first_update,
is_last_update)
class TestBitCenterSGD(TestBitCenterOptim, TestCase):
@staticmethod
def GetOptimizer(model,
lr,
weight_decay=0.0,
n_train_sample=128,
cast_func=single_to_half_det,
minibatch_size=128):
params = [x[1] for x in model.named_parameters()]
names = [x[0] for x in model.named_parameters()]
return BitCenterSGD(
params,
names,
lr,
n_train_sample=n_train_sample,
cast_func=cast_func,
minibatch_size=minibatch_size)
def FuncTestCacheProperty(self, param, name, cache, optimizer):
if (cache is None):
assert (not param.requires_grad) \
or (name.endswith("_lp")) \
or (name.endswith("_delta"))
else:
assert list(param.shape) == list(cache.shape[1:])
assert cache.size(0) == optimizer.n_minibatch_per_epoch
assert (not name.endswith("_lp")) and (not name.endswith("_delta"))
# assert on GPU and not require_grad
t_list = [(cache, torch.float16, False, False)]
self.CheckLayerTensorProperty(t_list)
@staticmethod
def GetUpdatedCache(minibatch_idx, optimizer):
cache_list = []
for param_group in optimizer.param_groups:
for p, p_name in zip(param_group["params"],
param_group["params_name"]):
cache = optimizer.grad_cache[p_name]
if cache is not None:
# print("check idx ", minibatch_idx, cache.size(), cache.cuda()[minibatch_idx])
cache_list.append(cache[minibatch_idx].clone())
return cache_list
@staticmethod
def FuncTestCacheUpdate(cache_before,
cache_after,
is_first_update=False,
is_last_update=False):
if (cache_before is None) and (cache_after is None):
return
assert (cache_before.cpu().numpy() == 0).all()
class TestBitCenterSVRG(TestBitCenterOptim, TestCase):
@staticmethod
def GetOptimizer(model,
lr,
weight_decay=0.0,
n_train_sample=128,
cast_func=single_to_half_det,
minibatch_size=128):
params = [x[1] for x in model.named_parameters()]
names = [x[0] for x in model.named_parameters()]
return BitCenterSVRG(
params,
names,
lr,
n_train_sample=n_train_sample,
cast_func=cast_func,
minibatch_size=minibatch_size)
def FuncTestCacheProperty(self, param, name, cache, optimizer):
if cache is None:
assert (not param.requires_grad) \
or (name.endswith("_lp")) \
or (name.endswith("_delta"))
else:
assert list(param.shape) == list(cache.shape)
assert (not name.endswith("_lp")) and (not name.endswith("_delta"))
# assert on GPU and not require_grad
t_list = [(cache, [torch.float16, torch.float32], True, False)]
self.CheckLayerTensorProperty(t_list)
@staticmethod
def GetUpdatedCache(minibatch_idx, optimizer):
cache_list = []
for param_group in optimizer.param_groups:
for p, p_name in zip(param_group["params"],
param_group["params_name"]):
cache = optimizer.grad_cache[p_name]
if cache is not None:
cache_list.append(cache.clone())
return cache_list
@staticmethod
def FuncTestCacheUpdate(cache_before,
cache_after,
is_first_update=False,
is_last_update=False):
if (cache_before is None) and (cache_after is None):
return
assert not (
cache_before.cpu().numpy() == cache_after.cpu().numpy()).all()
if is_first_update:
assert (cache_before.cpu().numpy() == 0).all()
if __name__ == "__main__":
print(torch.__version__)
unittest.mian() | halp-master | halp/optim/bit_center_optim_test.py |
import torch
import numpy as np
import copy, logging
from torch.autograd import Variable
from torch.optim.optimizer import required, Optimizer
from torch.optim import SGD
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from halp.utils.utils import get_recur_attr
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger('')
class BitCenterOptim(SGD):
"""
The base class for bit center optimizer: bit center SGD, bit center SVRG
"""
def __init__(self,
params,
params_name,
lr=required,
momentum=0.0,
weight_decay=0.0,
n_train_sample=128,
cast_func=void_cast_func,
minibatch_size=128,
T=1):
"""
The base class for bit centering style optimizers
The bit centering optimizer can be used with calling step_fp for compute offset
and with calling step_lp for compute delta.
The non bit centering version of the same update rule can be implemented only using
step_fp for updates.
"""
defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay)
super(BitCenterOptim, self).__init__(params, **defaults)
if len(self.param_groups) != 1:
logger.error(
"Bit centering optimizers doesn't support per-parameter options "
"(parameter groups)")
raise ValueError("Bit centering optimizers doesn't support per-parameter options " \
"(parameter groups)")
self.param_groups[0]['params_name'] = params_name
self.n_train_sample = n_train_sample
self.n_minibatch_per_epoch = int(
np.floor((self.n_train_sample - 1) // float(minibatch_size)) + 1)
self.cast_func = cast_func
self.step_iter = 0 # this is a iter for step_lp function
self.cache_iter = 0 # this is a iter for updating the gradient cache
self.setup_grad_cache()
self.T = T
def setup_single_grad_cache(self, grad_shape, dtype):
# we assume the size of the first dimension is the minibatch size
pass
def setup_grad_cache(self):
self.grad_cache = dict()
for group in self.param_groups:
for p, p_name in zip(group["params"], group["params_name"]):
if (not p.requires_grad) \
or (p_name.endswith("_lp")) \
or (p_name.endswith("_delta")):
self.grad_cache[p_name] = None
continue
grad_shape = list(p.size())
cache = self.setup_single_grad_cache(grad_shape, dtype=p.dtype)
self.grad_cache[p_name] = cache
logger.info(p_name + " cache setup.")
self.cache_iter = 0
def update_single_grad_cache(self, grad, cache):
pass
def update_grad_cache(self):
# TODO: need to sanity check when whole dataset size is not divided by the minibatch
# to make sure the data idx in each minibatch is the same between the fp pass and lp pass
for param_group in self.param_groups:
weight_decay = param_group["weight_decay"]
for p, p_name in zip(param_group["params"],
param_group["params_name"]):
cache = self.grad_cache[p_name]
if cache is None:
continue
if weight_decay != 0.0:
p.grad.data.add_(weight_decay, p.data)
self.update_single_grad_cache(p.grad * param_group["lr"],
cache)
def get_single_grad_offset(self, cache, cache_iter=0):
# cache iter is useful for bit centering SGD to retrieve gradient offset
pass
def get_named_delta_parameters(self, only_requires_grad=True):
"""
if only_requires_grad, it would not return params that
does not require gradient, including running statistics of batchnorm layers
"""
named_parameters = []
for param_group in self.param_groups:
for p, p_name in zip(param_group["params"],
param_group["params_name"]):
if not p_name.endswith("_delta") or (only_requires_grad and (not p.requires_grad)):
continue
named_parameters.append((p_name, p))
return named_parameters
def step_lp(self):
for param_group in self.param_groups:
lr = param_group["lr"]
weight_decay = param_group["weight_decay"]
momentum = param_group["momentum"]
named_delta_parameters = self.get_named_delta_parameters()
for p_name, p in named_delta_parameters:
cache = self.grad_cache[p_name.split("_delta")[0]]
grad_offset = self.get_single_grad_offset(cache)
if weight_decay != 0.0:
p.grad.data.add_(weight_decay, p.data)
if (momentum is not None) and (momentum != 0.0):
param_state = self.state[p]
if "momentum_buffer" not in param_state:
param_state["momentum_buffer"] = torch.zeros_like(
p.data)
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_(lr * p.grad.data)
else:
buf = lr * p.grad.data
if p.is_cuda:
buf.add_(grad_offset.cuda())
else:
buf.add_(grad_offset)
p.data.add_(-buf)
self.step_iter = (self.step_iter + 1) % self.n_minibatch_per_epoch
def step_fp(self):
# update all the blobs as usual, the uninvolved blobs has 0 gradient so effectively it is not updated
# during the entire training
self.update_grad_cache()
self.cache_iter = (self.cache_iter + 1) % self.n_minibatch_per_epoch
def update_offset_vars(self):
# we need to update both the parameters requiring gradient
# and those which does not, e.g the running statistics in batch norm
named_delta_parameters = self.get_named_delta_parameters(only_requires_grad=False)
for p_name, p in named_delta_parameters:
# update the offset variable and its lp version
corr_found = False
for param_offset_group in self.param_groups:
for p_offset, p_offset_name in zip(
param_offset_group["params"],
param_offset_group["params_name"]):
if p_offset_name == p_name.split("_delta")[0]:
p_offset.data.add_(p.data.type(p_offset.dtype))
corr_found = True
# update the offset lp variable
lp_corr_found = False
for param_offset_lp_group in self.param_groups:
for p_offset_lp, p_offset_lp_name in zip(
param_offset_group["params"],
param_offset_group["params_name"]):
if p_offset_lp_name == p_name.split(
"_delta")[0] + "_lp":
p_offset_lp.data.copy_(p_offset)
lp_corr_found = True
if corr_found == False or lp_corr_found == False:
logger.error("Can not find offset var for ", p_name)
raise Exception("Can not find offset var for ", p_name)
def clear_cache(self):
for cache in self.grad_cache.values():
if cache is None:
continue
if not cache.is_cuda:
cache.copy_(self.cast_func(torch.zeros(cache.size())).cpu())
else:
cache.zero_()
def reset_delta_vars(self):
named_delta_parameters = self.get_named_delta_parameters(only_requires_grad=False)
for p_name, p in named_delta_parameters:
p.data.zero_()
# note we set the mode of model using the following
# helpers. After each specific fp or lp phase,
# we set model back to do_offset=True as the defaut
# statues
def on_start_lp_steps(self, model):
self.reset_delta_vars()
model.set_mode(do_offset=False)
def on_end_lp_steps(self, model):
self.update_offset_vars()
self.reset_delta_vars()
model.set_mode(do_offset=True)
def on_start_fp_steps(self, model):
self.clear_cache()
model.set_mode(do_offset=True)
def on_end_fp_steps(self, model):
# pass
model.set_mode(do_offset=True)
def step(self):
raise Exception(
"This function is not suppose to be called. Please use step_lp or step_fp"
)
class BitCenterSGD(BitCenterOptim):
"""
Implementation of bit centering SGD
"""
def __init__(self,
params,
params_name,
lr=required,
momentum=0.0,
weight_decay=0.0,
n_train_sample=128,
cast_func=void_cast_func,
minibatch_size=128,
T=1):
super(BitCenterSGD, self).__init__(
params,
params_name,
lr,
momentum,
weight_decay,
n_train_sample,
cast_func,
minibatch_size=minibatch_size,
T=T)
def setup_single_grad_cache(self, grad_shape, dtype):
cache_shape = [self.n_minibatch_per_epoch] + grad_shape
return self.cast_func(torch.Tensor(np.zeros(cache_shape)).type(dtype).cpu()).cpu()
def update_single_grad_cache(self, grad, cache):
# the input grad is actually grad * lr in function update_grad_cache
cache[self.cache_iter].copy_(grad)
def get_single_grad_offset(self, cache):
# we assume the size of the first dimension is the minibatch size
return cache[self.step_iter]
| halp-master | halp/optim/bit_center_sgd.py |
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from torch.autograd import gradcheck
from halp.layers.bit_center_layer import BitCenterModule, BitCenterModuleList
from halp.layers.linear_layer import BitCenterLinear
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from unittest import TestCase
from halp.utils.utils import get_recur_attr
class HalpTest(object):
def CheckLayerTensorProperty(self, t_list):
# each element of t_list is a tuple containing (t, dtype, is_cuda)
def CheckSingleTensor(t, dtype, is_cuda, requires_grad):
if isinstance(dtype, list):
assert t.dtype in dtype
else:
assert t.dtype == dtype
if isinstance(is_cuda, list):
assert t.is_cuda in is_cuda
else:
assert t.is_cuda == is_cuda
if isinstance(requires_grad, list):
assert t.requires_grad in requires_grad
else:
assert t.requires_grad == requires_grad
for i, (t, dtype, is_cuda, requires_grad) in enumerate(t_list):
if t is None:
continue
CheckSingleTensor(t, dtype, is_cuda, requires_grad)
def CheckLayerTensorGradProperty(self, t_list):
# each element of t_list is a tuple containing (t, dtype, is_cuda)
# We check if the gradient is of the right type and in the right device
def CheckSingleTensorGrad(t, dtype, is_cuda, requires_grad):
assert t.grad.dtype == dtype
assert t.grad.is_cuda == is_cuda
for i, (t, dtype, is_cuda, requires_grad) in enumerate(t_list):
if (t is None) or (t.grad is None):
continue
CheckSingleTensorGrad(t, dtype, is_cuda, requires_grad)
@staticmethod
def GetMultipleLayerLinearModel(n_layer, n_train_sample):
class Net(BitCenterModule):
def __init__(self, n_layer, n_feat_in, final_dim, n_train_sample):
# super(Net, self).__init__()
BitCenterModule.__init__(self)
self.layers = BitCenterModuleList([])
n_feat_in = np.hstack((n_feat_in, final_dim))
for i in range(n_layer):
self.layers.append(
BitCenterLinear(
n_feat_in[i],
n_feat_in[i + 1],
cast_func=single_to_half_det,
n_train_sample=n_train_sample,
bias=True))
self.loss = torch.nn.MSELoss()
self.n_feat_in = n_feat_in
def forward(self, input, label):
fw_input = input
for layer in self.layers:
fw_input = layer(fw_input)
return self.loss.forward(fw_input, label)
n_feat_in = np.random.randint(low=10, high=100, size=(n_layer, ))
final_dim = 1
net = Net(n_layer, n_feat_in, final_dim, n_train_sample=n_train_sample)
return net | halp-master | halp/utils/test_utils.py |
import torch
import numpy as np
import logging
import sys
import math
from halp.optim.bit_center_sgd import BitCenterOptim, BitCenterSGD
from halp.optim.bit_center_svrg import BitCenterSVRG
from halp.optim.svrg import SVRG
from halp.utils.utils import get_recur_attr
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger('')
from copy import deepcopy
from halp.models.resnet import ResNet
import time
# this function can load a normal model to a bc model
def load_param_to_model(model, state_dict, to_bc_model=False, args=None):
state_dict = deepcopy(state_dict)
for name, ref_param in state_dict.items():
model_param = get_recur_attr(model, name.split("."))
ref_param = state_dict[name]
model_param.data.copy_(ref_param.data)
if to_bc_model and name + "_lp" in model.state_dict().keys():
# if this is a bit centering based model, we need
# to update its lp copy of the fp offset
model_param_lp = get_recur_attr(model, (name + "_lp").split("."))
model_param_lp.data.copy_(model.cast_func(ref_param.data))
logger.info("loaded model parameter " + name)
# this is mostly for loading the momentum terms
def load_state_to_optimizer(optimizer, model, state_dict, to_bc_opt=False, args=None):
ref_state_dict = deepcopy(state_dict)
opt_state_dict = optimizer.state_dict()
assert len(optimizer.param_groups
) == 1 # we currently only consider 1 param group case
lr_after_ckpt = optimizer.param_groups[0]["lr"]
load_cnt = 0
if to_bc_opt:
for name, param in optimizer.get_named_delta_parameters(
only_requires_grad=True):
ref_name = name.split("_delta")[0]
if ref_name not in ref_state_dict.keys():
continue
# note the definition of momentum in bc and normal sgd is different
# in bc optimizer momentum includes lr as a factor already,
# while normal sgd optimizer does not.
ref_state_dict[ref_name]["momentum_buffer"].mul_(lr_after_ckpt)
dtype = param.dtype
optimizer.state[param] = ref_state_dict[ref_name]
optimizer.state[param]["momentum_buffer"] = model.cast_func(
optimizer.state[param]["momentum_buffer"]).type(dtype)
load_cnt += 1
logger.info("opt param state loaded for " + name)
else:
for name, param in model.named_parameters():
ref_name = name
if ref_name not in ref_state_dict.keys():
continue
dtype = param.dtype
optimizer.state[param] = ref_state_dict[ref_name]
optimizer.state[param]["momentum_buffer"] = model.cast_func(
optimizer.state[param]["momentum_buffer"]).type(dtype)
load_cnt += 1
logger.info("opt param state loaded for " + name)
logger.info("loaded opt param state for " + str(load_cnt) + " params.")
def get_named_opt_param_state(model, optimizer):
# when constructing the optimizers, we have forced the order
# of parameters in the same way.
opt_state_dict = {}
for name, param in model.named_parameters():
opt_state_dict[name] = optimizer.state[param]
return opt_state_dict
class StepLRScheduler(object):
def __init__(self, optimizer, step_epoch=(1, 2), step_fac=0.1):
self.optimizer = optimizer
self.step_epoch = step_epoch
self.step_fac = step_fac
self.do_save = False
def turn_on(self):
self.do_save = True
def check_and_step(self, epoch_id, iter_id, train_dataloader):
if self.do_save and (epoch_id in self.step_epoch) and (iter_id == 0):
for group in self.optimizer.param_groups:
group["lr"] *= self.step_fac
if iter_id == 0:
for group in self.optimizer.param_groups:
logger.info("lr at epoch " + str(epoch_id) + " " +
str(group["lr"]))
class ModelSaver(object):
def __init__(self, optimizer, model, step_epoch=[1, 2], save_path="./"):
self.optimizer = optimizer
self.model = model
self.step_epoch = step_epoch
self.save_path = save_path
self.do_save = False
def turn_on(self):
self.do_save = True
def check_and_save(self, epoch_id, iter_id, train_dataloader):
if self.do_save and (epoch_id in self.step_epoch) and (iter_id == 0):
torch.save(
self.model.state_dict(), self.save_path + "/model_e_" +
str(epoch_id) + "_i_" + str(iter_id))
# get the param state, e.g. momentum for saving
opt_param_state = get_named_opt_param_state(
self.model, self.optimizer)
# also save the current learning rate
assert len(self.optimizer.param_groups) == 1
opt_param_state["lr_at_ckpt"] = self.optimizer.param_groups[0][
"lr"]
torch.save(
opt_param_state, self.save_path + "/opt_e_" + str(epoch_id) +
"_i_" + str(iter_id))
logger.info("model saved at epoch " + str(epoch_id))
def get_grad_norm(optimizer, model):
"""
note this function only supports a single learning rate in the optimizer
This is because the gradient offset is actually the lr * grad offset.
We need to recover it here
Note this should be used before step function of the optimizers.
However, it should be used after the step function of fp/lp SVRG.
This is because the fp/lp SVRG optimizer add the weight decay
automatically to the gradient variables
"""
norm = 0.0
weight_decay = optimizer.param_groups[0]["weight_decay"]
lr = optimizer.param_groups[0]["lr"]
if isinstance(optimizer, BitCenterOptim):
named_delta_parameters = optimizer.get_named_delta_parameters()
for p_name, p in named_delta_parameters:
# note we need to make sure this function is properly used
# as the optimizer's get_single_grad_offset is used and it
# depends on the internal functions of the optimizer.
# generally, use this function after the .backward() call.
if not p.requires_grad:
raise Exception(p_name + " does not require gradient!")
cache = optimizer.grad_cache[p_name.split("_delta")[0]]
grad_offset = optimizer.get_single_grad_offset(cache)
grad_delta = p.grad.data
# note the optimizer has already add delta part of decay to grad variable
norm += torch.sum((grad_delta.type(torch.FloatTensor) \
+ weight_decay * p.data.type(torch.FloatTensor) \
+ grad_offset.type(torch.FloatTensor) / lr)**2).item()
else:
if optimizer.__class__.__name__ == "SVRG":
for p_name, p in model.named_parameters():
if p.requires_grad:
# note the optimizer has already add weight decay to grad variable
norm += torch.sum(p.grad.data.type(torch.FloatTensor)
**2).item()
else:
for p_name, p in model.named_parameters():
if p.requires_grad:
norm += torch.sum((p.grad.data.type(torch.FloatTensor) \
+ weight_decay * p.data.type(torch.FloatTensor))
**2).item()
return math.sqrt(norm)
def remove_dummy_classes(pred, Y):
# we assume pred is numpy array while Y is pytorch Tensor.
# note Y always contains dummy classes while pred does not always.
pred = pred.ravel()
Y = Y.view(-1)
if pred.size == Y.numel():
pred = pred[Y.data.cpu().numpy() != -1]
Y = Y[Y != -1]
return pred, Y
def evaluate_acc(model, val_loader, use_cuda=True, dtype="fp", args=None):
model.eval()
correct_cnt = 0
sample_cnt = 0
cross_entropy_accum = 0.0
for i, (X, Y) in enumerate(val_loader):
if use_cuda:
X, Y = X.cuda(), Y.cuda()
if dtype == "lp" and (X.dtype != torch.long):
X = model.cast_func(X)
if args.double_debug and (X.dtype != torch.long):
X = X.double()
pred, output = model.predict(X)
output = output[Y.view(-1) != -1]
pred, Y = remove_dummy_classes(pred, Y)
assert pred.shape == Y.data.cpu().numpy().shape
correct_cnt += np.sum(pred == Y.data.cpu().numpy())
cross_entropy_accum += model.criterion(
output, Y).data.cpu().numpy() * X.shape[0]
sample_cnt += pred.size
logger.info(
"Test metric acc: " + str(correct_cnt / float(sample_cnt)) +
" loss: " +
str(cross_entropy_accum / float(sample_cnt) +
0.5 * model.reg_lambda * model.get_trainable_param_squared_norm()))
return (correct_cnt / float(sample_cnt),
cross_entropy_accum / float(sample_cnt))
def train_non_bit_center_optimizer(model,
optimizer,
train_loader,
val_loader,
n_epochs,
eval_func=evaluate_acc,
use_cuda=True,
dtype='fp',
args=None):
train_loss_list = []
eval_metric_list = []
logging.info("using training function for non bit center optimizers")
if optimizer.T is not None:
logging.info("optimizer T=" + str(optimizer.T))
for epoch_id in range(n_epochs):
model.train()
for i, (X, Y) in enumerate(train_loader):
optimizer.model_saver.check_and_save(epoch_id, i, train_loader)
optimizer.lr_scheduler.check_and_step(epoch_id, i, train_loader)
if use_cuda:
X, Y = X.cuda(), Y.cuda()
if dtype == "lp" and (X.dtype != torch.long):
X = optimizer.cast_func(X)
if dtype == "bc":
raise Exception("This function can only run non-bc optimizers")
optimizer.zero_grad()
if args.double_debug and (X.dtype != torch.long):
X = X.double()
train_loss = model(X, Y)
train_pred = model.output.data.cpu().numpy().argmax(axis=1)
# we use Y_dup to make sure the closure function use the
# Y with the original shape
train_pred, Y_dup = remove_dummy_classes(train_pred, Y.clone())
train_acc = np.sum(train_pred == Y_dup.data.cpu().numpy()) / float(
Y_dup.size(0))
train_loss.backward()
if optimizer.__class__.__name__ == "SVRG":
def svrg_closure(data=X, target=Y):
if use_cuda:
data = data.cuda()
target = target.cuda()
if dtype == "lp" and (data.dtype != torch.long):
data = optimizer.cast_func(data)
if dtype == "bc":
raise Exception(
"This function can only run non-bc optimizers")
if args.double_debug and (data.dtype != torch.long):
data = data.double()
if isinstance(model, ResNet) and (not args.resnet_fine_tune):
# for fine tune situation, the batch norm layer
# is protected by set to eval mode in the forward
# function already.
model.fix_running_stat()
loss = model(data, target)
if isinstance(model, ResNet) and (not args.resnet_fine_tune):
model.free_running_stat()
loss.backward()
return loss
optimizer.step(svrg_closure)
grad_norm = get_grad_norm(optimizer, model)
else:
grad_norm = get_grad_norm(optimizer, model)
optimizer.step()
param_norm = model.get_trainable_param_squared_norm()
train_loss_list.append(train_loss.item() +
0.5 * model.reg_lambda * param_norm)
logger.info("train loss epoch: " + str(epoch_id) + " iter: " +
str(i) + " loss: " + str(train_loss_list[-1]) +
" grad_norm: " + str(grad_norm) + " acc: " +
str(train_acc) + " regularizer: " +
str(0.5 * model.reg_lambda * param_norm))
logger.info("Finished train epoch " + str(epoch_id))
model.eval()
eval_metric_list.append(eval_func(model, val_loader, use_cuda, dtype, args))
return train_loss_list, eval_metric_list
def train_bit_center_optimizer(model,
optimizer,
train_loader,
val_loader,
n_epochs,
eval_func=evaluate_acc,
use_cuda=True,
dtype="bc",
args=None):
train_loss_list = []
eval_metric_list = []
T = optimizer.T
total_iter = 0
logging.info("using training function for bit center optimizers")
logging.info("optimizer T=" + str(optimizer.T))
for epoch_id in range(n_epochs):
model.train()
for i, (X, Y) in enumerate(train_loader):
if total_iter % T == 0:
optimizer.on_start_fp_steps(model)
for j, (X_fp, Y_fp) in enumerate(train_loader):
optimizer.zero_grad()
if use_cuda:
X_fp, Y_fp = X_fp.cuda(), Y_fp.cuda()
if args.double_debug and (X_fp.dtype != torch.long):
X_fp = X_fp.double()
loss_fp = model(X_fp, Y_fp)
loss_fp.backward()
optimizer.step_fp()
optimizer.on_end_fp_steps(model)
optimizer.on_start_lp_steps(model)
if use_cuda:
X, Y = X.cuda(), Y.cuda()
# note here X is the input delta. It is suppose to be zero.
if dtype != "bc":
raise Exception(
"This training function does not support dtype other than bc"
)
optimizer.zero_grad()
if model.on_site_compute:
# in on site mode we need to calculate the offsets on site
# as the delta is not added to offset variable yet, this is
# equivalent to compute gradient for input/output at the fixed
# offset model value. As optimizer step is not called, the offset
# gradients wrt parameters are also not affected.
if args.double_debug and (X.dtype != torch.long):
X = X.double()
model.set_mode(do_offset=True)
fp_loss = model(X, Y)
fp_loss.backward()
model.set_mode(do_offset=False)
optimizer.zero_grad()
if X.dtype != torch.long:
X = optimizer.cast_func(X)
X.zero_()
if args.double_debug and (X.dtype != torch.long):
X = X.double()
train_loss = model(X, Y)
train_pred = model.output.data.cpu().numpy().argmax(axis=1)
train_pred, Y = remove_dummy_classes(train_pred, Y)
train_acc = np.sum(train_pred == Y.data.cpu().numpy()) / float(
Y.size(0))
train_loss.backward()
if model.on_site_compute:
# get faster iteration without grad norm
grad_norm = 0
else:
grad_norm = get_grad_norm(optimizer, model)
optimizer.step_lp()
if total_iter % T == T - 1:
optimizer.on_end_lp_steps(model)
total_iter += 1
param_norm = model.get_trainable_param_squared_norm()
train_loss_list.append(train_loss.item() +
0.5 * model.reg_lambda * param_norm)
logger.info("train loss epoch: " + str(epoch_id) + " iter: " +
str(i) + " loss: " + str(train_loss_list[-1]) +
" grad_norm: " + str(grad_norm) + " acc: " +
str(train_acc) + " regularizer: " +
str(0.5 * model.reg_lambda * param_norm))
logger.info("Finished train epoch " + str(epoch_id))
model.eval()
optimizer.on_start_fp_steps(model)
eval_metric_list.append(
eval_func(model, val_loader, use_cuda, dtype=dtype, args=args))
optimizer.on_end_fp_steps(model)
return train_loss_list, eval_metric_list
| halp-master | halp/utils/train_utils.py |
halp-master | halp/utils/__init__.py |
|
import nltk
from nltk.stem import PorterStemmer
import numpy as np
import sys, os
import torch
from halp.utils.utils import set_seed
from halp.utils.utils import DOUBLE_PREC_DEBUG_EPOCH_LEN, LP_DEBUG_EPOCH_LEN
from torch.utils.data.dataset import Dataset
import _pickle as cp
import logging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger('')
DOWNLOAD_PATH = "../datasets/nltk/"
nltk.data.path.append(DOWNLOAD_PATH)
def download_postag_dataset(download_path):
nltk.download('conll2000', download_dir=DOWNLOAD_PATH)
nltk.download('universal_tagset', download_dir=DOWNLOAD_PATH)
def get_dict(sentences):
ps = PorterStemmer()
tags = set([tag for sentence in sentences for _, tag in sentence])
words = set(
[ps.stem(w) for sentence in sentences for w, _ in sentence])
tag_dict = {}
word_dict = {}
assert type(tags) == set
assert type(words) == set
for i, tag in enumerate(tags):
tag_dict[tag] = i
for i, word in enumerate(words):
word_dict[word] = i
return tag_dict, word_dict
def process_conll2000_data(data_path=DOWNLOAD_PATH + "/conll2000/processed/"):
os.makedirs(data_path, exist_ok=True)
set_seed(0)
ps = PorterStemmer()
logger.info("Start processing dataset")
train_sentences = nltk.corpus.conll2000.tagged_sents(
"train.txt", tagset='universal')
test_sentences = nltk.corpus.conll2000.tagged_sents(
"test.txt", tagset='universal')
tag_dict, word_dict = get_dict(train_sentences + test_sentences)
train_sentences= [[(ps.stem(word[0]), word[1]) for word in sentence] for sentence in train_sentences]
test_sentences= [[(ps.stem(word[0]), word[1]) for word in sentence] for sentence in test_sentences]
with open(data_path + "trainset", "wb") as f:
cp.dump(train_sentences, f)
with open(data_path + "testset", "wb") as f:
cp.dump(test_sentences, f)
with open(data_path + "tag_dict", "wb") as f:
cp.dump(tag_dict, f)
with open(data_path + "word_dict", "wb") as f:
cp.dump(word_dict, f)
logger.info("Processing dataset done.")
class TaggingDataset(Dataset):
def __init__(self, sentences, tag_dict, word_dict):
self.tag_dict, self.word_dict = tag_dict, word_dict
self.words = [[self.word_dict[word] for word, _ in sentence]
for sentence in sentences]
self.tags = [[self.tag_dict[tag] for _, tag in sentence]
for sentence in sentences]
self.length = [len(x) for x in self.words]
self.max_seq_length = max(self.length)
logger.info("max seq length in dataset: " + str(self.max_seq_length))
# inflate the first example to make sure we
# allocate enough memory for the bc layer cache
self.words[0] = self.words[0] + [0] * (self.max_seq_length - len(self.words[0]))
self.tags[0] = self.tags[0] + [-1] * (self.max_seq_length - len(self.tags[0]))
assert len(self.words) == len(self.tags)
assert len(self.words[-1]) == len(self.tags[-1])
def __getitem__(self, index):
return self.words[index], self.tags[index]
def __len__(self):
return len(self.words)
def collate_fn(data):
words, tags = zip(*data)
lengths = [len(x) for x in words]
X = torch.zeros(len(words), max(lengths)).long()
Y = -torch.ones(len(tags), max(lengths)).long()
for i, (word_seq, tag_seq) in enumerate(zip(words, tags)):
length = lengths[i]
X[i, :length] = torch.LongTensor(word_seq)
Y[i, :length] = torch.LongTensor(tag_seq)
return X, Y
def get_conll2000_data_loader(data_path=DOWNLOAD_PATH + "/conll2000/processed/", args=None):
'''
conll2000 has 8936 train sample
'''
with open(data_path + "trainset", "rb") as f:
train_sentences = cp.load(f)[:8928] # to have even number of steps in each epoch for fractional epoch T runs
with open(data_path + "testset", "rb") as f:
test_sentences = cp.load(f)
with open(data_path + "tag_dict", "rb") as f:
tag_dict = cp.load(f)
with open(data_path + "word_dict", "rb") as f:
word_dict = cp.load(f)
max_seq_length = 271 # this maximum length works for conll 2000
num_embeddings = len(word_dict)
assert len(tag_dict) == args.n_classes
print(len(train_sentences))
if args.double_debug:
train_sentences = train_sentences[:(args.batch_size * DOUBLE_PREC_DEBUG_EPOCH_LEN)]
test_sentences = test_sentences[:(args.batch_size * DOUBLE_PREC_DEBUG_EPOCH_LEN)]
elif args.float_debug:
train_sentences = train_sentences[:(args.batch_size * LP_DEBUG_EPOCH_LEN)]
test_sentences = test_sentences[:(args.batch_size * DOUBLE_PREC_DEBUG_EPOCH_LEN)]
train_set = TaggingDataset(train_sentences, tag_dict, word_dict)
test_set = TaggingDataset(test_sentences, tag_dict, word_dict)
train_data_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=args.batch_size, shuffle=False, collate_fn=collate_fn)
test_data_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=args.batch_size, shuffle=False, collate_fn=collate_fn)
return train_data_loader, test_data_loader, None, len(train_set), max_seq_length, num_embeddings
if __name__ == "__main__":
download_postag_dataset(DOWNLOAD_PATH)
process_conll2000_data()
| halp-master | halp/utils/postag_data_utils.py |
import gzip
import os
from os import path
import numpy as np
import torch
import sys
if sys.version_info.major < 3:
import urllib
else:
import urllib.request as request
from halp.utils.utils import LP_DEBUG_EPOCH_LEN, DOUBLE_PREC_DEBUG_EPOCH_LEN
DATASET_DIR = 'datasets/'
MNIST_FILES = ["train-images-idx3-ubyte.gz", "train-labels-idx1-ubyte.gz",
"t10k-images-idx3-ubyte.gz", "t10k-labels-idx1-ubyte.gz"]
def download_file(url, local_path):
dir_path = path.dirname(local_path)
if not path.exists(dir_path):
print("Creating the directory '%s' ..." % dir_path)
os.makedirs(dir_path)
print("Downloading from '%s' ..." % url)
if sys.version_info.major < 3:
urllib.URLopener().retrieve(url, local_path)
else:
request.urlretrieve(url, local_path)
def download_mnist(local_path):
url_root = "http://yann.lecun.com/exdb/mnist/"
for f_name in MNIST_FILES:
f_path = os.path.join(local_path, f_name)
if not path.exists(f_path):
download_file(url_root + f_name, f_path)
def one_hot(x, n):
if type(x) == list:
x = np.array(x)
x = x.flatten()
o_h = np.zeros((len(x), n))
o_h[np.arange(len(x)), x] = 1
return o_h
def load_mnist(ntrain=60000, ntest=10000, onehot=True):
data_dir = os.path.join(DATASET_DIR, 'mnist/')
if not path.exists(data_dir):
download_mnist(data_dir)
else:
# check all files
checks = [path.exists(os.path.join(data_dir, f)) for f in MNIST_FILES]
if not np.all(checks):
download_mnist(data_dir)
with gzip.open(os.path.join(data_dir, 'train-images-idx3-ubyte.gz')) as fd:
buf = fd.read()
loaded = np.frombuffer(buf, dtype=np.uint8)
trX = loaded[16:].reshape((60000, 28 * 28)).astype(float)
with gzip.open(os.path.join(data_dir, 'train-labels-idx1-ubyte.gz')) as fd:
buf = fd.read()
loaded = np.frombuffer(buf, dtype=np.uint8)
trY = loaded[8:].reshape((60000))
with gzip.open(os.path.join(data_dir, 't10k-images-idx3-ubyte.gz')) as fd:
buf = fd.read()
loaded = np.frombuffer(buf, dtype=np.uint8)
teX = loaded[16:].reshape((10000, 28 * 28)).astype(float)
with gzip.open(os.path.join(data_dir, 't10k-labels-idx1-ubyte.gz')) as fd:
buf = fd.read()
loaded = np.frombuffer(buf, dtype=np.uint8)
teY = loaded[8:].reshape((10000))
trX /= 255.
teX /= 255.
trX = trX[:ntrain]
trY = trY[:ntrain]
teX = teX[:ntest]
teY = teY[:ntest]
if onehot:
trY = one_hot(trY, 10)
teY = one_hot(teY, 10)
else:
trY = np.asarray(trY)
teY = np.asarray(teY)
return trX, teX, trY, teY
def get_mnist_data_loader(onehot=False, batch_size=1, args=None):
LP_DEBUG = args.float_debug
DOUBLE_PREC_DEBUG = args.double_debug
X_train, X_val, Y_train, Y_val = load_mnist(onehot=False)
X_train, X_val = torch.FloatTensor(X_train), torch.FloatTensor(X_val)
Y_train, Y_val = torch.LongTensor(Y_train), torch.LongTensor(Y_val)
if LP_DEBUG:
X_train = X_train[:(batch_size * LP_DEBUG_EPOCH_LEN)]
X_val = X_val[:(batch_size * LP_DEBUG_EPOCH_LEN)]
Y_train = Y_train[:(batch_size * LP_DEBUG_EPOCH_LEN)]
Y_val = Y_val[:(batch_size * LP_DEBUG_EPOCH_LEN)]
elif DOUBLE_PREC_DEBUG:
X_train = X_train[:(batch_size * DOUBLE_PREC_DEBUG_EPOCH_LEN)]
X_val = X_val[:(batch_size * DOUBLE_PREC_DEBUG_EPOCH_LEN)]
Y_train = Y_train[:(batch_size * DOUBLE_PREC_DEBUG_EPOCH_LEN)]
Y_val = Y_val[:(batch_size * DOUBLE_PREC_DEBUG_EPOCH_LEN)]
train_data = \
torch.utils.data.TensorDataset(X_train, Y_train)
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=batch_size, shuffle=False)
val_data = \
torch.utils.data.TensorDataset(X_val, Y_val)
val_loader = torch.utils.data.DataLoader(
val_data, batch_size=batch_size, shuffle=False)
input_shape = (batch_size,) + X_train.shape[1:]
args.T = len(train_loader)
return train_loader, val_loader, input_shape, X_train.shape[0] | halp-master | halp/utils/mnist_data_utils.py |
import torch
import numpy as np
import ctypes
from unittest import TestCase
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger('')
# DOUBLE_PREC_DEBUG = False
DOUBLE_PREC_DEBUG_EPOCH_LEN = 3
# LP_DEBUG = False
LP_DEBUG_EPOCH_LEN = 3
def single_to_half_det(tensor):
return tensor.half()
def single_to_half_stoc(tensor):
if tensor.dtype == torch.half:
return tensor.clone()
assert tensor.dtype == torch.float
value = tensor.data.clone().cpu().numpy().astype(np.float32)
value = np.ascontiguousarray(value)
value_shape = value.shape
value_ptr = value.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
value_int = np.ctypeslib.as_array(value_ptr, shape=value_shape)
sign_before = np.bitwise_and(
value_int,
np.array(int("0x80000000", 16)).astype(np.int32))
mantissa_rand = np.random.randint(
low=0, high=int("0x80000000", 16), size=value.shape, dtype=np.int32)
# only keeping the last 13 bit mantissa as random bit seq
mantissa_rand = np.bitwise_and(
mantissa_rand,
np.array(int("0x00001FFF", 16)).astype(np.int32))
value_int += mantissa_rand
sign_after = np.bitwise_and(
value_int,
np.array(int("0x80000000", 16)).astype(np.int32))
# if exponent is larger than the max range for fp32
# we saturate at the largest value for fp32.
# This is very unlikely to happen
pos_overflow = np.logical_and(sign_before == 0, sign_after != 0)
neg_overflow = np.logical_and(sign_before != 0, sign_after == 0)
# add the random addition, and then round towards 0 to achieve the stochastic rounding
assert value_int.flags["C_CONTIGUOUS"]
value_stoc = np.bitwise_and(
value_int,
np.array(int("0xFFFFE000", 16)).astype(np.int32))
value_ptr_stoc = value_stoc.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
value_lp = np.ctypeslib.as_array(value_ptr_stoc, shape=value_shape)
value_lp = value_lp.astype(np.float16)
# deal with the saturation on fp16
value_lp[value_lp == np.inf] = np.finfo(dtype=np.float16).max
value_lp[value_lp == -np.inf] = np.finfo(dtype=np.float16).min
value_lp[pos_overflow] = np.finfo(dtype=np.float16).max
value_lp[neg_overflow] = np.finfo(dtype=np.float16).min
output = torch.HalfTensor(value_lp)
if tensor.is_cuda:
output = output.cuda()
return output
def void_cast_func(tensor):
return tensor.clone()
def get_recur_attr(obj, attr_str_list):
if len(attr_str_list) == 0:
return obj
else:
sub_obj = getattr(obj, attr_str_list[0])
return get_recur_attr(sub_obj, attr_str_list[1:])
def void_func():
pass
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def copy_layer_weights(layer_old, layer_new):
layer_new.weight.data.copy_(layer_old.weight)
if hasattr(layer_new, "bias") and hasattr(layer_old, "bias") and (layer_new.bias is not None):
layer_new.bias.data.copy_(layer_old.bias)
# copy bn stats
if hasattr(layer_new, 'running_mean'):
layer_new.running_mean.data.copy_(layer_old.running_mean)
if hasattr(layer_new, 'running_var'):
layer_new.running_var.data.copy_(layer_old.running_var)
return layer_new
def copy_model_weights(model_old, model_new):
# model old is the native pytorch model, while the model_new is a model using our implementation
for name, param in model_old.named_parameters():
if name not in model_new.state_dict().keys():
continue
old_param = get_recur_attr(model_old, name.split("."))
new_param = get_recur_attr(model_new, name.split("."))
new_param.data.copy_(old_param.data)
def copy_module_weights(module_old, module_new):
for name, param in module_new.named_parameters():
old_param = get_recur_attr(module_old, name.split("."))
new_param = get_recur_attr(module_new, name.split("."))
new_param.data.copy_(old_param.data)
return module_new
class UtilityTest(TestCase):
def test_single_to_half_stoc(self):
# assert overflow in single precision level is properly handled
np.random.seed(1)
t = np.ones((1000, 1000)) * np.finfo(dtype=np.float32).max
t_out = single_to_half_stoc(torch.FloatTensor(t))
assert (t_out.cpu().numpy() == np.finfo(dtype=np.float16).max).all()
t = np.ones((1000, 1000)) * np.finfo(dtype=np.float32).min
t_out = single_to_half_stoc(torch.FloatTensor(t))
assert (t_out.cpu().numpy() == np.finfo(dtype=np.float16).min).all()
# assert overflow in half precsision level is property handled
t = np.ones((1000, 1000)) * np.finfo(dtype=np.float16).max + 1.0
t_out = single_to_half_stoc(torch.FloatTensor(t))
assert (t_out.cpu().numpy() == np.finfo(dtype=np.float16).max).all()
t = np.ones((1000, 1000)) * np.finfo(dtype=np.float16).min - 1.0
t_out = single_to_half_stoc(torch.FloatTensor(t))
assert (t_out.cpu().numpy() == np.finfo(dtype=np.float16).min).all()
# assert the probability to the left and right side
# present the right ratio
np.random.seed(2)
cubic_size = 100
n_trial = 1000
t_np = np.random.randn(*[cubic_size, cubic_size]).astype(np.float32)
t = torch.FloatTensor(t_np)
res = np.zeros((n_trial, cubic_size, cubic_size)).astype(np.float16)
for i in range(n_trial):
output = single_to_half_stoc(t)
assert output.dtype == torch.float16
res[i, :, :] = output.cpu().numpy()
for i in range(cubic_size):
for j in range(cubic_size):
assert np.unique(res[:, i, j]).size <= 2
if np.unique(res[:, i, j]).size == 2:
upper = np.max(np.unique(res[:, i, j]))
lower = np.min(np.unique(res[:, i, j]))
orig = t_np[i, j]
upper_cnt = np.sum(res[:, i, j] == upper)
lower_cnt = np.sum(res[:, i, j] == lower)
ratio_cnt = float(upper_cnt) / float(lower_cnt)
ratio_val = (orig - lower) / (upper - orig)
# we use a very loose ratio error to test here
# so that it is unlikely to fail due to
# extreme samples. Finer grain confirmation can be done using
# the following two print.
if ratio_cnt >= 2:
assert ratio_val > 1
elif ratio_cnt < 0.5:
assert ratio_val < 1
logger.info("stochastic rounding test passed!")
def test_single_to_half_det(self):
pass
if __name__ == "__main__":
print(torch.__version__)
unittest.main()
| halp-master | halp/utils/utils.py |
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
from halp.utils.utils import LP_DEBUG_EPOCH_LEN, DOUBLE_PREC_DEBUG_EPOCH_LEN
import sys
import logging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger('')
def get_cifar10_data_loader(batch_size=128, args=None):
print('==> Preparing data..')
LP_DEBUG = args.float_debug
DOUBLE_PREC_DEBUG = args.double_debug
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
if DOUBLE_PREC_DEBUG:
trainset = torch.utils.data.Subset(trainset, np.arange(batch_size * DOUBLE_PREC_DEBUG_EPOCH_LEN))
elif LP_DEBUG:
trainset = torch.utils.data.Subset(trainset, np.arange(batch_size * LP_DEBUG_EPOCH_LEN))
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=False, num_workers=1)
args.T = len(trainloader)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
test_batch_size = 100
if DOUBLE_PREC_DEBUG:
testset = torch.utils.data.Subset(testset, np.arange(batch_size * DOUBLE_PREC_DEBUG_EPOCH_LEN))
test_batch_size = args.batch_size
elif LP_DEBUG:
testset = torch.utils.data.Subset(testset, np.arange(batch_size * LP_DEBUG_EPOCH_LEN))
test_batch_size = args.batch_size
testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size, shuffle=False, num_workers=1)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
input_shape = (batch_size, 3, 32, 32)
return trainloader, testloader, input_shape, len(trainset) | halp-master | halp/utils/cifar_data_utils.py |
import torch
import numpy as np
from torch.autograd import Variable
# from halp.utils.test_utils import assert_model_grad_equal
from halp.utils.utils import get_recur_attr
from halp.utils.utils import single_to_half_det, single_to_half_stoc, void_cast_func
from halp.utils.utils import copy_model_weights, set_seed
from halp.utils.test_utils import HalpTest
class BitCenterModelTest(HalpTest):
"""
Test whether it gives the same fw bw output given the same input
In this test, we compare to the original LeNet implementation
We test with a epoch with 2 minibatches, we compare the output
between fp32 model and fp32 bc model
"""
def get_config(self):
pass
def get_models(self, n_minibatch, batch_size, n_class):
pass
def get_inputs(self, n_minibatch, batch_size, n_class):
pass
def assert_model_grad_equal(self, model1, model2, model2_is_bc=False):
# we assume all model1's params can be found in model2
for name, param in model1.named_parameters():
if name.endswith("_lp") or name.endswith("_delta"):
continue
if name not in model2.state_dict().keys():
continue
old_param = get_recur_attr(model1, name.split("."))
new_param = get_recur_attr(model2, name.split("."))
if old_param.requires_grad and new_param.requires_grad:
if model2_is_bc:
new_param_delta = get_recur_attr(model2, (name + "_delta").split("."))
np.testing.assert_allclose(old_param.grad.data.cpu().numpy(),
new_param.grad.data.cpu().numpy() + new_param_delta.grad.data.cpu().numpy())
else:
np.testing.assert_allclose(old_param.grad.data.cpu().numpy(),
new_param.grad.data.cpu().numpy())
def test_fw_bw_output(self):
set_seed(0)
config = self.get_config()
native_model, fp_model, lp_model, bc_model = self.get_models(**config)
x_list, y_list = self.get_inputs(**config)
# check fp forward
criterion = torch.nn.CrossEntropyLoss() # for native model
bc_model.set_mode(do_offset=True)
n_minibatch = config["n_minibatch"]
for i in range(n_minibatch):
output_native = native_model(x_list[i])
loss_native = criterion(output_native, y_list[i]).detach()
loss_fp = fp_model(x_list[i], y_list[i]).detach()
loss_lp = lp_model(x_list[i], y_list[i]).detach()
loss_bc = bc_model(x_list[i], y_list[i])
loss_bc.backward()
self.check_layer_status(bc_model, do_offset=True)
# print("fp loss ", loss_native.item(), loss_fp.item(), loss_lp.item(), loss_bc.item())
np.testing.assert_allclose(
np.array(loss_native.item()), np.array(loss_fp.item()))
np.testing.assert_allclose(
np.array(loss_fp.item()), np.array(loss_lp.item()))
np.testing.assert_allclose(
np.array(loss_lp.item()), np.array(loss_bc.item()))
bc_model.set_mode(do_offset=False)
for i in range(n_minibatch):
output_native = native_model(x_list[i])
loss_native = criterion(output_native, y_list[i])
loss_native.backward()
loss_fp = fp_model(x_list[i], y_list[i])
loss_fp.backward()
loss_lp = lp_model(x_list[i], y_list[i])
loss_lp.backward()
self.check_layer_status(bc_model, do_offset=False)
loss_bc = bc_model(torch.zeros_like(x_list[i]), y_list[i])
self.check_layer_status(bc_model, do_offset=False)
loss_bc.backward()
np.testing.assert_allclose(
np.array(loss_native.item()), np.array(loss_fp.item()))
np.testing.assert_allclose(
np.array(loss_fp.item()), np.array(loss_lp.item()))
np.testing.assert_allclose(
np.array(loss_lp.item()), np.array(loss_bc.item()))
if i == n_minibatch - 1:
# we only test the gradient for the last minibatch because the gradient offset
# changes across minibatch
self.assert_model_grad_equal(native_model, fp_model)
self.assert_model_grad_equal(fp_model, lp_model)
self.assert_model_grad_equal(lp_model, bc_model, model2_is_bc=True)
if __name__ == "__main__":
print(torch.__version__)
unittest.main()
| halp-master | halp/models/model_test.py |
halp-master | halp/models/__init__.py |
|
import torch
import numpy as np
from torch.autograd import Variable
from halp.utils.utils import single_to_half_det, single_to_half_stoc, void_cast_func
from halp.utils.utils import copy_model_weights, set_seed
from unittest import TestCase
from halp.utils.test_utils import HalpTest
from halp.models.lenet import LeNet_PyTorch, LeNet
from halp.models.model_test import BitCenterModelTest
class LeNetTest(BitCenterModelTest, TestCase):
def get_config(self):
config = {}
config["batch_size"] = 5
config["n_minibatch"] = 6
config["n_class"] = 10
return config
def get_models(self, n_minibatch, batch_size, n_class):
n_train_sample = batch_size * n_minibatch
native_model = LeNet_PyTorch().cuda().double()
fp_model = LeNet(
cast_func=void_cast_func,
n_train_sample=n_train_sample,
dtype="fp").cuda().double()
copy_model_weights(native_model, fp_model)
lp_model = LeNet(
cast_func=void_cast_func,
n_train_sample=n_train_sample,
dtype="lp").cuda().double()
copy_model_weights(native_model, lp_model)
bc_model = LeNet(
cast_func=void_cast_func,
n_train_sample=n_train_sample,
dtype="bc").double()
copy_model_weights(native_model, bc_model)
return native_model, fp_model, lp_model, bc_model
def get_inputs(self, n_minibatch, batch_size, n_class):
x_list = []
y_list = []
for i in range(n_minibatch):
x_list.append(
torch.nn.Parameter(
torch.randn(batch_size, 3, 32, 32,
dtype=torch.float).cuda(),
requires_grad=True).cuda().double())
y_list.append(torch.LongTensor(batch_size).random_(n_class).cuda())
return x_list, y_list
def check_layer_status(self, bc_model, do_offset=True):
assert bc_model.conv1.do_offset == do_offset
assert bc_model.relu1.do_offset == do_offset
assert bc_model.max_pool1.do_offset == do_offset
assert bc_model.conv2.do_offset == do_offset
assert bc_model.relu2.do_offset == do_offset
assert bc_model.max_pool2.do_offset == do_offset
assert bc_model.fc1.do_offset == do_offset
assert bc_model.relu3.do_offset == do_offset
assert bc_model.fc2.do_offset == do_offset
assert bc_model.relu4.do_offset == do_offset
assert bc_model.fc3.do_offset == do_offset
assert bc_model.criterion.do_offset == do_offset
if __name__ == "__main__":
print(torch.__version__)
unittest.main()
| halp-master | halp/models/lenet_test.py |
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from halp.utils.utils import single_to_half_det, single_to_half_stoc, copy_layer_weights
from halp.utils.utils import void_cast_func, get_recur_attr
from halp.layers.bit_center_layer import BitCenterModule
from halp.layers.linear_layer import BitCenterLinear
from halp.layers.cross_entropy import BitCenterCrossEntropy
from halp.layers.conv_layer import BitCenterConv2D
from halp.layers.relu_layer import BitCenterReLU
from halp.layers.pool_layer import BitCenterMaxPool2D
class LeNet_PyTorch(nn.Module):
def __init__(self):
super(LeNet_PyTorch, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
class LeNet(BitCenterModule):
def __init__(self,
reg_lambda=0.0,
dtype="bc",
cast_func=void_cast_func,
n_train_sample=1):
super(LeNet, self).__init__()
self.cast_func = cast_func
self.n_train_sample = n_train_sample
self.reg_lambda = reg_lambda
self.dtype = dtype
# setup layers
self.conv1 = BitCenterConv2D(
in_channels=3,
out_channels=6,
kernel_size=(5, 5),
bias=True,
cast_func=cast_func,
n_train_sample=n_train_sample)
self.relu1 = BitCenterReLU(
cast_func=cast_func, n_train_sample=n_train_sample)
self.max_pool1 = BitCenterMaxPool2D(
kernel_size=(2, 2),
cast_func=cast_func,
n_train_sample=n_train_sample)
self.conv2 = BitCenterConv2D(
in_channels=6,
out_channels=16,
kernel_size=(5, 5),
bias=True,
cast_func=cast_func,
n_train_sample=n_train_sample)
self.relu2 = BitCenterReLU(
cast_func=cast_func, n_train_sample=n_train_sample)
self.max_pool2 = BitCenterMaxPool2D(
kernel_size=(2, 2),
cast_func=cast_func,
n_train_sample=n_train_sample)
self.fc1 = BitCenterLinear(
in_features=16 * 5 * 5,
out_features=120,
bias=True,
cast_func=cast_func,
n_train_sample=n_train_sample)
self.relu3 = BitCenterReLU(
cast_func=cast_func, n_train_sample=n_train_sample)
self.fc2 = BitCenterLinear(
in_features=120,
out_features=84,
bias=True,
cast_func=cast_func,
n_train_sample=n_train_sample)
self.relu4 = BitCenterReLU(
cast_func=cast_func, n_train_sample=n_train_sample)
self.fc3 = BitCenterLinear(
in_features=84,
out_features=10,
bias=True,
cast_func=cast_func,
n_train_sample=n_train_sample)
self.criterion = BitCenterCrossEntropy(
cast_func=cast_func, n_train_sample=n_train_sample)
if dtype == "bc":
pass
elif (dtype == "fp") or (dtype == "lp"):
self.conv1 = copy_layer_weights(self.conv1, nn.Conv2d(3, 6, 5))
self.conv2 = copy_layer_weights(self.conv2, nn.Conv2d(6, 16, 5))
self.fc1 = copy_layer_weights(self.fc1, nn.Linear(16 * 5 * 5, 120))
self.fc2 = copy_layer_weights(self.fc2, nn.Linear(120, 84))
self.fc3 = copy_layer_weights(self.fc3, nn.Linear(84, 10))
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.relu3 = nn.ReLU()
self.relu4 = nn.ReLU()
self.max_pool1 = nn.MaxPool2d(kernel_size=2)
self.max_pool2 = nn.MaxPool2d(kernel_size=2)
self.criterion = nn.CrossEntropyLoss(size_average=True)
if dtype == "lp":
if self.cast_func == void_cast_func:
pass
else:
for child in self.children():
child.half()
else:
raise Exception(dtype + " is not supported in LeNet!")
def forward(self, x, y, test=False):
out = self.relu1(self.conv1(x))
out = self.max_pool1(out)
out = self.relu2(self.conv2(out))
out = self.max_pool2(out)
out = out.view(out.size(0), -1)
out = self.relu3(self.fc1(out))
out = self.relu4(self.fc2(out))
out = self.fc3(out)
self.output = out
if test:
return out
else:
self.loss = self.criterion(out, y)
if isinstance(self.criterion, BitCenterCrossEntropy) \
and self.criterion.do_offset == False:
self.output = self.output + self.criterion.input_lp
return self.loss
def check_layer_status(self, do_offset=True):
assert self.conv1.do_offset == do_offset
assert self.relu1.do_offset == do_offset
assert self.max_pool1.do_offset == do_offset
assert self.conv2.do_offset == do_offset
assert self.relu2.do_offset == do_offset
assert self.max_pool2.do_offset == do_offset
assert self.fc1.do_offset == do_offset
assert self.relu3.do_offset == do_offset
assert self.fc2.do_offset == do_offset
assert self.relu4.do_offset == do_offset
assert self.fc3.do_offset == do_offset
assert self.criterion.do_offset == do_offset
def predict(self, x):
output = self.forward(x, y=None, test=True)
pred = output.data.cpu().numpy().argmax(axis=1)
return pred, output
| halp-master | halp/models/lenet.py |
import torch
import numpy as np
from torch.autograd import Variable
import halp.utils.utils
from halp.utils.utils import single_to_half_det, single_to_half_stoc
from halp.models.logistic_regression import LogisticRegression
from unittest import TestCase
class LeNetTest(TestCase):
def test_logistic_regression_grad(self):
n_sample = 4
n_dim = 3
n_class = 4
X = Variable(torch.DoubleTensor(np.random.normal(size=(n_sample, n_dim) ) ) )
Y = Variable(torch.LongTensor(np.array([0, 1, 3, 2] ) ) )
regressor = LogisticRegression(input_dim=n_dim, n_class=n_class, reg_lambda=100.0, dtype="fp")
regressor.double()
loss1 = regressor.forward(X, Y)
loss_diff = 0.0
move = 1e-9
loss1.backward()
for w in regressor.parameters():
loss_diff += torch.sum(w.grad.data * move)
for w in regressor.parameters():
w.data += move
loss2 = regressor.forward(X, Y)
assert np.abs((loss2.item() - loss1.item() ) - loss_diff) < 1e-9
print("logistic regression gradient test done!")
if __name__ == "__main__":
print(torch.__version__)
unittest.main() | halp-master | halp/models/logistic_regression_test.py |
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from halp.utils.utils import single_to_half_det, single_to_half_stoc
from halp.utils.utils import copy_layer_weights, copy_module_weights
from halp.utils.utils import void_cast_func, get_recur_attr
from halp.layers.bit_center_layer import BitCenterModule
from halp.layers.bit_center_layer import BitCenterSequential
from halp.layers.linear_layer import BitCenterLinear
from halp.layers.cross_entropy import BitCenterCrossEntropy
from halp.layers.conv_layer import BitCenterConv2D
from halp.layers.relu_layer import BitCenterReLU
from halp.layers.pool_layer import BitCenterAvgPool2D
from halp.layers.batch_norm_layer import BitCenterBatchNorm2D
import sys
import logging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger('')
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet_PyTorch(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet_PyTorch, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(
3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class BitCenterBasicBlock(BitCenterModule):
expansion = 1
def __init__(self,
in_planes,
planes,
stride=1,
cast_func=void_cast_func,
n_train_sample=1):
super(BitCenterBasicBlock, self).__init__()
self.conv1 = BitCenterConv2D(
in_planes,
planes,
kernel_size=(3, 3),
stride=stride,
padding=1,
bias=False,
cast_func=cast_func,
n_train_sample=n_train_sample)
self.bn1 = BitCenterBatchNorm2D(
planes, cast_func=cast_func, n_train_sample=n_train_sample)
self.relu1 = BitCenterReLU(
cast_func=cast_func, n_train_sample=n_train_sample)
self.conv2 = BitCenterConv2D(
planes,
planes,
kernel_size=(3, 3),
stride=1,
padding=1,
bias=False,
cast_func=cast_func,
n_train_sample=n_train_sample)
self.bn2 = BitCenterBatchNorm2D(
planes, cast_func=cast_func, n_train_sample=n_train_sample)
self.relu2 = BitCenterReLU(
cast_func=cast_func, n_train_sample=n_train_sample)
self.shortcut = BitCenterSequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = BitCenterSequential(
BitCenterConv2D(
in_planes,
self.expansion * planes,
kernel_size=(1, 1),
stride=stride,
bias=False,
cast_func=cast_func,
n_train_sample=n_train_sample),
BitCenterBatchNorm2D(
self.expansion * planes,
cast_func=cast_func,
n_train_sample=n_train_sample))
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out += self.shortcut(x)
out = self.relu2(out)
return out
class ResNet(BitCenterModule):
def __init__(self,
block,
num_blocks,
num_classes=10,
reg_lambda=0.0,
dtype="bc",
fine_tune=False,
cast_func=void_cast_func,
n_train_sample=1):
super(ResNet, self).__init__()
self.in_planes = 64
self.reg_lambda = reg_lambda
self.dtype = dtype
self.fine_tune = fine_tune
self.cast_func = cast_func
self.n_train_sample = n_train_sample
self.conv1 = BitCenterConv2D(
3,
64,
kernel_size=(3, 3),
stride=1,
padding=1,
bias=False,
cast_func=cast_func,
n_train_sample=n_train_sample)
self.bn1 = BitCenterBatchNorm2D(
64, cast_func=cast_func, n_train_sample=n_train_sample)
self.relu1 = BitCenterReLU(
cast_func=cast_func, n_train_sample=n_train_sample)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avg_pool = BitCenterAvgPool2D(
kernel_size=(4, 4),
cast_func=cast_func,
n_train_sample=n_train_sample)
self.linear = BitCenterLinear(
512 * block.expansion,
num_classes,
bias=True,
cast_func=cast_func,
n_train_sample=n_train_sample)
self.criterion = BitCenterCrossEntropy(
cast_func=cast_func, n_train_sample=n_train_sample)
if dtype == "bc" and not self.fine_tune:
pass
elif (dtype == "fp") or (dtype == "lp") or (self.fine_tune):
# for fp and lp models, we use the origianl pytorch modules
# reset initial inplanes
self.in_planes = 64
self.conv1 = copy_module_weights(
self.conv1,
nn.Conv2d(
3,
self.in_planes,
kernel_size=3,
stride=1,
padding=1,
bias=False))
self.bn1 = copy_module_weights(self.bn1, nn.BatchNorm2d(64))
self.relu1 = copy_module_weights(self.relu1, nn.ReLU())
self.layer1 = copy_module_weights(
self.layer1,
ResNet_PyTorch._make_layer(
self, BasicBlock, 64, num_blocks[0], stride=1))
self.layer2 = copy_module_weights(
self.layer2,
ResNet_PyTorch._make_layer(
self, BasicBlock, 128, num_blocks[1], stride=2))
self.layer3 = copy_module_weights(
self.layer3,
ResNet_PyTorch._make_layer(
self, BasicBlock, 256, num_blocks[2], stride=2))
self.layer4 = copy_module_weights(
self.layer4,
ResNet_PyTorch._make_layer(
self, BasicBlock, 512, num_blocks[3], stride=2))
self.avg_pool = copy_module_weights(
self.avg_pool, nn.AvgPool2d(kernel_size=(4, 4)))
if self.fine_tune:
if self.cast_func == void_cast_func:
# this branch is only for test purpose
pass
else:
for module_name, child in self.named_children():
if module_name not in ["linear.", "criterion."]:
child.half()
# turn off gradient for the rest of the models
for name, param in self.named_parameters():
if not (name.startswith("linear.") or name.startswith("criterion.")):
param.requires_grad = False
if dtype != "bc":
self.linear = copy_module_weights(
self.linear, nn.Linear(512 * BasicBlock.expansion,
num_classes))
self.criterion = copy_module_weights(
self.criterion, nn.CrossEntropyLoss(size_average=True))
if dtype == "lp":
if self.cast_func == void_cast_func:
pass
else:
for child in self.children():
child.half()
else:
raise Exception(dtype + " is not supported in LeNet!")
for name, param in self.named_parameters():
logger.info("Resnet check requires grad " + name + " " + str(param.requires_grad) + " " + str(param.size()))
def setup_swap_bn_running_stat_swap(self):
self.running_stat = {}
self.running_stat_swap = {}
for name, param in self.state_dict().items():
if (".running_mean" in name) or (".running_var" in name):
assert param.requires_grad == False
self.running_stat[name] = param.data
# as the value of the swap does not matter, we set the to 1.
self.running_stat_swap[name] = torch.ones_like(param)
assert self.running_stat[name].data_ptr() != self.running_stat_swap[name].data_ptr()
def set_running_stat_param(self, stat_dict):
for name, param in self.state_dict().items():
if (".running_mean" in name) or (".running_var" in name):
assert param.requires_grad == False
stat = get_recur_attr(self, name.split("."))
if stat.is_cuda:
stat.data = stat_dict[name].type(stat.dtype).cuda()
else:
stat.data = stat_dict[name].type(stat.dtype).cpu()
def fix_running_stat(self):
# protect the running stat from being updated for multiple
# times in each iterations. This is because the SVRG optimizer
# call closure for 3 times, each can update the running statistics
# this is only used for lp anf fp model, not for bc model.
# for the case of fine tuning, we don't use this function.
# We set to eval mode for training in the forward function
assert self.dtype != "bc" and not self.fine_tune
# as the running stat memory is changing all the time,
# every time we swap on dummy stat, we need to save
# the original stat
self.setup_swap_bn_running_stat_swap()
self.set_running_stat_param(self.running_stat_swap)
def free_running_stat(self):
assert self.dtype != "bc" and not self.fine_tune
self.set_running_stat_param(self.running_stat)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(
block(self.in_planes, planes, stride, self.cast_func,
self.n_train_sample))
self.in_planes = planes * block.expansion
return BitCenterSequential(*layers)
def set_bn_mode(self, module, do_eval=True):
for name, child in module.named_children():
if not isinstance(child, nn.BatchNorm2d):
self.set_bn_mode(child, do_eval=do_eval)
else:
child.eval()
def forward(self, x, y, test=False):
if self.fine_tune:
# sync the input type with the layer param type
x = x.type(self.conv1.weight.dtype)
self.set_bn_mode(self, do_eval=True)
out = self.relu1(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avg_pool(out)
out = out.view(out.size(0), -1)
# if in fine tune lp step mode, the delta are always
# 0 as the lower layers output are fixed, as a consequence
# of the lower layers being fixed.
if self.fine_tune:
if self.dtype == "bc" and self.do_offset == False:
out = torch.zeros_like(out)
out = self.cast_func(out)
else:
out = out.type(self.linear.weight.dtype)
out = self.linear(out)
self.output = out
if test:
return out
else:
self.loss = self.criterion(out, y)
if isinstance(self.criterion, BitCenterCrossEntropy) \
and self.criterion.do_offset == False:
# this is for the case where we want to get full output
# in the do_offset = False mode.
self.output = self.output + self.criterion.input_lp
return self.loss
def predict(self, x):
output = self.forward(x, y=None, test=True)
pred = output.data.cpu().numpy().argmax(axis=1)
return pred, output
def ResNet18(reg_lambda, cast_func, n_train_sample, dtype, fine_tune, num_classes=10):
return ResNet(
BitCenterBasicBlock, [2, 2, 2, 2],
reg_lambda=reg_lambda,
cast_func=cast_func,
n_train_sample=n_train_sample,
dtype=dtype,
fine_tune=fine_tune,
num_classes=num_classes)
| halp-master | halp/models/resnet.py |
import torch
import numpy as np
from torch.autograd import Variable
from halp.utils.utils import single_to_half_det, single_to_half_stoc, void_cast_func
from halp.layers.bit_center_layer import BitCenterModule
from halp.layers.linear_layer import BitCenterLinear
from halp.layers.cross_entropy import BitCenterCrossEntropy
class LogisticRegression(BitCenterModule):
def __init__(self, input_dim, n_class, reg_lambda, dtype="bc",
cast_func=void_cast_func, n_train_sample=1):
super(LogisticRegression, self).__init__()
self.input_dim = input_dim
self.n_class = n_class
self.reg_lambda = reg_lambda
self.linear = \
BitCenterLinear(self.input_dim, out_features=self.n_class,
cast_func=cast_func, n_train_sample=n_train_sample)
self.criterion = \
BitCenterCrossEntropy(cast_func=cast_func,
n_train_sample=n_train_sample)
self.cast_func = cast_func
if dtype == "bc":
pass
elif (dtype == "fp") or (dtype == "lp"):
# we use the copied weights to guarantee same initialization
# across different dtype when using the same random seed
linear_tmp = self.linear
self.linear = torch.nn.Linear(self.input_dim, out_features=self.n_class)
self.criterion = torch.nn.CrossEntropyLoss(size_average=True)
self.linear.weight.data.copy_(linear_tmp.weight)
self.linear.bias.data.copy_(linear_tmp.bias)
if dtype == "lp":
if self.cast_func == void_cast_func:
pass
else:
self.linear.half()
self.criterion.half()
else:
raise Exception("dtype not supported")
self.dtype = dtype
def forward(self, x, y, test=False):
if len(list(x.size())) != 2:
x = x.view(x.size(0), -1)
self.output = self.linear(x)
if len(list(y.size() ) ) == 2:
y = y.squeeze()
if test:
return self.output
else:
self.loss = self.criterion(self.output, y)
if isinstance(self.criterion, BitCenterCrossEntropy) \
and self.criterion.do_offset == False:
self.output = self.output + self.criterion.input_lp
return self.loss
def predict(self, x):
if len(list(x.size())) != 2:
x = x.view(x.size(0), -1)
output = self.linear(x)
if isinstance(self.linear, BitCenterLinear):
assert self.linear.do_offset == True
pred = output.data.cpu().numpy().argmax(axis=1)
return pred, output
| halp-master | halp/models/logistic_regression.py |
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.autograd import Variable
from halp.utils.utils import single_to_half_det, single_to_half_stoc
from halp.utils.utils import copy_layer_weights, copy_module_weights
from halp.utils.utils import void_cast_func, get_recur_attr
from halp.layers.bit_center_layer import BitCenterModule, BitCenterModuleList
from halp.layers.linear_layer import BitCenterLinear
from halp.layers.cross_entropy import BitCenterCrossEntropy
from halp.layers.sigmoid_layer import BitCenterSigmoid
from halp.layers.tanh_layer import BitCenterTanh
from halp.layers.embedding import BitCenterEmbedding
from halp.layers.ele_mult import BitCenterEleMult
import sys
import logging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger('')
class BitCenterLSTMCell(BitCenterModule, nn.LSTMCell):
'''
Implementation of the LSTM cell
'''
def __init__(self,
input_size,
hidden_size,
bias=True,
cast_func=void_cast_func,
n_train_sample=1):
BitCenterModule.__init__(self)
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.cast_func = cast_func
self.n_train_sample = n_train_sample
# we process the input and last hidden state in a batch for the 4 gates
self.input_linear = BitCenterLinear(
input_size,
hidden_size * 4,
bias=self.bias,
cast_func=cast_func,
n_train_sample=n_train_sample)
self.hidden_linear = BitCenterLinear(
hidden_size,
hidden_size * 4,
bias=self.bias,
cast_func=cast_func,
n_train_sample=n_train_sample)
# sync the parameter name with the standard parameters
self.weight_ih = Parameter(self.input_linear.weight.clone(), requires_grad=True)
self.weight_hh = Parameter(self.hidden_linear.weight.clone(), requires_grad=True)
self.weight_ih_lp = Parameter(self.input_linear.weight_lp.clone(), requires_grad=True)
self.weight_hh_lp = Parameter(self.hidden_linear.weight_lp.clone(), requires_grad=True)
self.weight_ih_delta = Parameter(self.input_linear.weight_delta.clone(), requires_grad=True)
self.weight_hh_delta = Parameter(self.hidden_linear.weight_delta.clone(), requires_grad=True)
self.input_linear.weight = self.weight_ih
self.hidden_linear.weight = self.weight_hh
self.input_linear.weight_lp = self.weight_ih_lp
self.hidden_linear.weight_lp = self.weight_hh_lp
self.input_linear.weight_delta = self.weight_ih_delta
self.hidden_linear.weight_delta = self.weight_hh_delta
if self.bias:
self.bias_ih = Parameter(self.input_linear.bias.clone(), requires_grad=True)
self.bias_hh = Parameter(self.hidden_linear.bias.clone(), requires_grad=True)
self.bias_ih_lp = Parameter(self.input_linear.bias_lp.clone(), requires_grad=True)
self.bias_hh_lp = Parameter(self.hidden_linear.bias_lp.clone(), requires_grad=True)
self.bias_ih_delta = Parameter(self.input_linear.bias_delta.clone(), requires_grad=True)
self.bias_hh_delta = Parameter(self.hidden_linear.bias_delta.clone(), requires_grad=True)
self.input_linear.bias = self.bias_ih
self.hidden_linear.bias = self.bias_hh
self.input_linear.bias_lp = self.bias_ih_lp
self.hidden_linear.bias_lp = self.bias_hh_lp
self.input_linear.bias_delta = self.bias_ih_delta
self.hidden_linear.bias_delta = self.bias_hh_delta
# for the naming of the symbols like i, f, g, o, please refer to
# https://pytorch.org/docs/stable/nn.html#torch.nn.LSTMCell
self.i_activation = BitCenterSigmoid(
cast_func=self.cast_func, n_train_sample=self.n_train_sample)
self.f_activation = BitCenterSigmoid(
cast_func=self.cast_func, n_train_sample=self.n_train_sample)
self.g_activation = BitCenterTanh(
cast_func=self.cast_func, n_train_sample=self.n_train_sample)
self.o_activation = BitCenterSigmoid(
cast_func=self.cast_func, n_train_sample=self.n_train_sample)
self.f_c_mult = BitCenterEleMult(
cast_func=self.cast_func, n_train_sample=self.n_train_sample)
self.i_g_mult = BitCenterEleMult(
cast_func=self.cast_func, n_train_sample=self.n_train_sample)
self.c_prime_activation = BitCenterTanh(
cast_func=self.cast_func, n_train_sample=self.n_train_sample)
self.o_c_prime_mult = BitCenterEleMult(
cast_func=self.cast_func, n_train_sample=self.n_train_sample)
def forward(self, x, state):
h, c = state
trans_input = self.input_linear(x)
trans_hidden = self.hidden_linear(h)
out = trans_input + trans_hidden
i = self.i_activation(out[:, 0:self.hidden_size])
f = self.f_activation(out[:, self.hidden_size:(2 * self.hidden_size)])
g = self.g_activation(
out[:, (2 * self.hidden_size):(3 * self.hidden_size)])
o = self.o_activation(out[:, 3 * self.hidden_size:])
c_prime = self.f_c_mult(f, c) + self.i_g_mult(i, g)
c_prime_act = self.c_prime_activation(c_prime)
h_prime = self.o_c_prime_mult(o, c_prime_act)
return (h_prime, c_prime)
def copy_lstm_cell_weights(src, tar):
# source is bitcenter LSTM cell, tar is the conventional
tar_state_dict = tar.state_dict()
for name_src, p_src in src.named_parameters():
if name_src in tar_state_dict.keys():
tar_state_dict[name_src].data.copy_(p_src.data)
return tar
def copy_lstm_weights_to_lstm_cell(src, tar):
tar.weight_ih.data.copy_(src.weight_ih_l0)
tar.weight_hh.data.copy_(src.weight_hh_l0)
tar.bias_ih.data.copy_(src.bias_ih_l0)
tar.bias_hh.data.copy_(src.bias_hh_l0)
return tar
class LSTMTagger(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):
super(LSTMTagger, self).__init__()
self.hidden_dim = hidden_dim
self.embedding = nn.Embedding(vocab_size, embedding_dim)
# The LSTM takes word embeddings as inputs, and outputs hidden states
# with dimensionality hidden_dim.
self.lstm = nn.LSTM(embedding_dim, hidden_dim)
# The linear layer that maps from hidden state space to tag space
self.linear = nn.Linear(hidden_dim, tagset_size)
def init_hidden(self, sentence):
# Before we've done anything, we dont have any hidden state.
# Refer to the Pytorch documentation to see exactly
# why they have this dimensionality.
# The axes semantics are (num_layers, minibatch_size, hidden_dim)
batch_size = sentence.size(1)
return (torch.zeros(1, batch_size, self.hidden_dim,
dtype=self.embedding.weight.dtype,
device=self.embedding.weight.device),
torch.zeros(1, batch_size, self.hidden_dim,
dtype=self.embedding.weight.dtype,
device=self.embedding.weight.device))
def forward(self, sentence):
self.hidden = self.init_hidden(sentence)
embeds = self.embedding(sentence)
lstm_out, self.hidden = self.lstm(embeds, self.hidden)
tag_space = self.linear(lstm_out.view(-1, self.hidden_dim))
return tag_space
class BitCenterLSTMTagger(BitCenterModule):
def __init__(self,
num_embeddings,
embedding_dim,
hidden_size,
cast_func=void_cast_func,
n_classes=10,
n_train_sample=1,
seq_length=1,
reg_lambda=0.0,
dtype="bc"):
BitCenterModule.__init__(self)
self.cast_func = cast_func
self.n_train_sample = n_train_sample
self.n_classes = n_classes
self.dtype = dtype
self.reg_lambda = reg_lambda
self.seq_length = seq_length # this is the maximum length the model can handle
self.do_offset = True
self.embedding = BitCenterEmbedding(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
cast_func=cast_func,
n_train_sample=n_train_sample)
self.lstm_cell = BitCenterModuleList([])
# we use multiple lstm cells to easily
# utilize the caching features of each cell for seq of length > 1
for i in range(seq_length):
self.lstm_cell.append(
BitCenterLSTMCell(
input_size=embedding_dim,
hidden_size=hidden_size,
bias=True,
cast_func=cast_func,
n_train_sample=n_train_sample))
# note here we need to make n_train_sample = n sample * seq length
# this is because BitCenterLinear can only process 2D input. We need
# to set n_train_sample like this to make sure the cache is in the
# right shape.
self.linear = BitCenterLinear(
in_features=hidden_size,
out_features=n_classes,
bias=True,
cast_func=cast_func,
n_train_sample=n_train_sample * self.seq_length)
self.criterion = BitCenterCrossEntropy(
cast_func=cast_func, n_train_sample=n_train_sample * self.seq_length)
if dtype == "bc":
pass
elif (dtype == "fp") or (dtype == "lp"):
self.embedding = copy_layer_weights(
self.embedding,
nn.Embedding(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim))
for i in range(seq_length):
self.lstm_cell[i] = copy_lstm_cell_weights(
self.lstm_cell[i],
nn.LSTMCell(
embedding_dim, hidden_size, bias=self.lstm_cell[i].bias))
self.linear = copy_layer_weights(
self.linear,
nn.Linear(in_features=hidden_size, out_features=n_classes))
self.criterion = nn.CrossEntropyLoss(size_average=True)
if dtype == "lp":
if self.cast_func == void_cast_func:
pass
else:
for child in self.children():
child.half()
else:
raise Exception(dtype + " is not supported in LeNet!")
# all lstm cells using the same set of parameter tensors
self.unify_lstm_cell_param()
def unify_lstm_cell_param(self):
for i in range(1, len(self.lstm_cell)):
self.lstm_cell[i].weight_ih = self.lstm_cell[0].weight_ih
self.lstm_cell[i].weight_hh = self.lstm_cell[0].weight_hh
if self.dtype == "bc":
self.lstm_cell[i].weight_ih_lp = self.lstm_cell[0].weight_ih_lp
self.lstm_cell[i].weight_hh_lp = self.lstm_cell[0].weight_hh_lp
self.lstm_cell[i].weight_ih_delta = self.lstm_cell[0].weight_ih_delta
self.lstm_cell[i].weight_hh_delta = self.lstm_cell[0].weight_hh_delta
self.lstm_cell[i].input_linear.weight = self.lstm_cell[0].weight_ih
self.lstm_cell[i].hidden_linear.weight = self.lstm_cell[0].weight_hh
self.lstm_cell[i].input_linear.weight_lp = self.lstm_cell[0].weight_ih_lp
self.lstm_cell[i].hidden_linear.weight_lp = self.lstm_cell[0].weight_hh_lp
self.lstm_cell[i].input_linear.weight_delta = self.lstm_cell[0].weight_ih_delta
self.lstm_cell[i].hidden_linear.weight_delta = self.lstm_cell[0].weight_hh_delta
if self.lstm_cell[i].bias:
self.lstm_cell[i].bias_ih = self.lstm_cell[0].bias_ih
self.lstm_cell[i].bias_hh = self.lstm_cell[0].bias_hh
if self.dtype == "bc":
self.lstm_cell[i].bias_ih_lp = self.lstm_cell[0].bias_ih_lp
self.lstm_cell[i].bias_hh_lp = self.lstm_cell[0].bias_hh_lp
self.lstm_cell[i].bias_ih_delta = self.lstm_cell[0].bias_ih_delta
self.lstm_cell[i].bias_hh_delta = self.lstm_cell[0].bias_hh_delta
self.lstm_cell[i].input_linear.bias = self.lstm_cell[0].bias_ih
self.lstm_cell[i].hidden_linear.bias = self.lstm_cell[0].bias_hh
self.lstm_cell[i].input_linear.bias_lp = self.lstm_cell[0].bias_ih_lp
self.lstm_cell[i].hidden_linear.bias_lp = self.lstm_cell[0].bias_hh_lp
self.lstm_cell[i].input_linear.bias_delta = self.lstm_cell[0].bias_ih_delta
self.lstm_cell[i].hidden_linear.bias_delta = self.lstm_cell[0].bias_hh_delta
def init_hidden(self, x):
# Before we've done anything, we dont have any hidden state.
# Refer to the Pytorch documentation to see exactly
# why they have this dimensionality.
# The axes semantics are (num_layers, minibatch_size, hidden_dim)
batch_size = x.size(1)
if self.do_offset:
dtype = self.embedding.weight.dtype
device = self.embedding.weight.device
else:
dtype = self.embedding.weight_delta.dtype
device = self.embedding.weight_delta.device
hidden_dim = self.lstm_cell[0].hidden_size
return (torch.zeros(batch_size, hidden_dim, dtype=dtype, device=device),
torch.zeros(batch_size, hidden_dim, dtype=dtype, device=device))
def forward(self, x, y, test=False):
# we assume the first dimension corresponds to steps
# The second dimension corresponds to sample index
x = x.type(torch.long)
(h, c) = self.init_hidden(x)
out = self.embedding(x)
h_list = []
# note seq_length is the maximum seq length in the dataset
for i in range(out.size(0)):
state = self.lstm_cell[i](out[i], (h, c))
h, c = state
h_list.append(h)
h_seq = torch.stack(h_list, dim=0)
h_seq = h_seq.view(-1, h.size(-1))
if test:
out = self.linear(h_seq)
self.output = out
return out
else:
y = y.view(-1)
h_seq = h_seq[y != -1]
y = y[y != -1]
out = self.linear(h_seq)
self.output = out
self.loss = self.criterion(out, y)
if isinstance(self.criterion, BitCenterCrossEntropy) \
and self.criterion.do_offset == False:
self.output = self.output + self.criterion.input_lp
return self.loss
def predict(self, x):
output = self.forward(x, y=None, test=True)
pred = output.data.cpu().numpy().argmax(axis=1)
return pred, output
def LSTM(num_embeddings,
cast_func=void_cast_func,
n_train_sample=1,
seq_length=1,
reg_lambda=0.0,
dtype="bc"):
return BitCenterLSTMTagger(
num_embeddings=num_embeddings,
embedding_dim=32,
hidden_size=64,
cast_func=cast_func,
n_classes=12,
n_train_sample=n_train_sample,
seq_length=seq_length,
reg_lambda=reg_lambda,
dtype=dtype)
| halp-master | halp/models/lstm.py |
import torch
import numpy as np
from torch.autograd import Variable
# from halp.utils.test_utils import assert_model_grad_equal
from halp.utils.utils import single_to_half_det, single_to_half_stoc, void_cast_func
from halp.utils.utils import copy_model_weights, set_seed, get_recur_attr
from unittest import TestCase
from halp.utils.test_utils import HalpTest
from halp.models.resnet import ResNet, ResNet_PyTorch
from halp.models.resnet import BasicBlock, BitCenterBasicBlock
from halp.models.model_test import BitCenterModelTest
from halp.models.lstm import BitCenterLSTMTagger, LSTMTagger
from halp.models.lstm import copy_lstm_cell_weights
from halp.models.lstm import copy_lstm_weights_to_lstm_cell
class LSTMTaggerTest(BitCenterModelTest, TestCase):
""" Test the bit centering LSTM tagger """
def get_config(self):
config = {}
config["batch_size"] = 25
config["n_minibatch"] = 1
config["n_classes"] = 10
config["embedding_dim"] = 15
config["num_embeddings"] = 20
config["hidden_dim"] = 5
config["seq_length"] = 5
return config
def get_models(self, n_minibatch, batch_size, n_classes, embedding_dim,
num_embeddings, hidden_dim, seq_length):
n_train_sample = batch_size * n_minibatch
native_model = LSTMTagger(
embedding_dim=embedding_dim,
hidden_dim=hidden_dim,
vocab_size=num_embeddings,
tagset_size=n_classes).cuda().double()
fp_model = BitCenterLSTMTagger(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
hidden_size=hidden_dim,
cast_func=void_cast_func,
n_train_sample=n_train_sample,
seq_length=seq_length,
n_classes=n_classes,
dtype="fp").cuda().double()
copy_model_weights(native_model, fp_model)
copy_lstm_weights_to_lstm_cell(native_model.lstm,
fp_model.lstm_cell[0])
lp_model = BitCenterLSTMTagger(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
hidden_size=hidden_dim,
cast_func=void_cast_func,
n_train_sample=n_train_sample,
seq_length=seq_length,
n_classes=n_classes,
dtype="lp").cuda().double()
copy_model_weights(native_model, lp_model)
copy_lstm_weights_to_lstm_cell(native_model.lstm,
lp_model.lstm_cell[0])
bc_model = BitCenterLSTMTagger(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
hidden_size=hidden_dim,
cast_func=void_cast_func,
n_train_sample=n_train_sample,
seq_length=seq_length,
n_classes=n_classes,
dtype="bc").double()
copy_model_weights(native_model, bc_model)
copy_lstm_weights_to_lstm_cell(native_model.lstm,
bc_model.lstm_cell[0])
return native_model, fp_model, lp_model, bc_model
def get_inputs(self, n_minibatch, batch_size, n_classes, embedding_dim,
num_embeddings, hidden_dim, seq_length):
x_list = []
y_list = []
for i in range(n_minibatch):
x_list.append(
torch.nn.Parameter(
torch.LongTensor(seq_length, batch_size).random_(num_embeddings),
requires_grad=False).cuda())
y_list.append(torch.LongTensor(seq_length*batch_size).random_(n_classes).cuda())
return x_list, y_list
def check_layer_status(self, bc_model, do_offset=True):
assert bc_model.embedding.do_offset == do_offset
for i in range(bc_model.seq_length):
assert bc_model.lstm_cell[i].input_linear.do_offset == do_offset
assert bc_model.lstm_cell[i].hidden_linear.do_offset == do_offset
assert bc_model.lstm_cell[i].hidden_linear.do_offset == do_offset
assert bc_model.lstm_cell[i].hidden_linear.do_offset == do_offset
assert bc_model.lstm_cell[i].i_activation.do_offset == do_offset
assert bc_model.lstm_cell[i].f_activation.do_offset == do_offset
assert bc_model.lstm_cell[i].g_activation.do_offset == do_offset
assert bc_model.lstm_cell[i].o_activation.do_offset == do_offset
assert bc_model.lstm_cell[i].f_c_mult.do_offset == do_offset
assert bc_model.lstm_cell[i].i_g_mult.do_offset == do_offset
assert bc_model.lstm_cell[i].c_prime_activation.do_offset == do_offset
assert bc_model.lstm_cell[i].o_c_prime_mult.do_offset == do_offset
assert bc_model.linear.do_offset == do_offset
assert bc_model.criterion.do_offset == do_offset
def assert_model_grad_equal(self, model1, model2, model2_is_bc=False):
# we assume all model1's params can be found in model2
for name, param in model1.named_parameters():
if name.endswith("_lp") or name.endswith("_delta"):
continue
old_param = get_recur_attr(model1, name.split("."))
if name not in model2.state_dict().keys():
# this is for asserting the same gradient from native model and our model.
# in the native model we have lstm while in our model, we only have lstm cell
if "ih_l0" in name:
name = name.replace("ih_l0", "ih").replace("lstm.", "lstm_cell.0.")
elif "hh_l0" in name:
name = name.replace("hh_l0", "hh").replace("lstm.", "lstm_cell.0.")
else:
continue
new_param = get_recur_attr(model2, name.split("."))
if old_param.requires_grad and new_param.requires_grad:
if model2_is_bc:
new_param_delta = get_recur_attr(model2, (name + "_delta").split("."))
np.testing.assert_allclose(old_param.grad.data.cpu().numpy(),
new_param.grad.data.cpu().numpy() + new_param_delta.grad.data.cpu().numpy())
else:
np.testing.assert_allclose(old_param.grad.data.cpu().numpy(),
new_param.grad.data.cpu().numpy())
| halp-master | halp/models/lstm_test.py |
import torch
import numpy as np
from torch.autograd import Variable
# from halp.utils.test_utils import assert_model_grad_equal
from halp.utils.utils import single_to_half_det, single_to_half_stoc, void_cast_func
from halp.utils.utils import copy_model_weights, set_seed, get_recur_attr
from unittest import TestCase
from halp.utils.test_utils import HalpTest
from halp.models.resnet import ResNet, ResNet_PyTorch
from halp.models.resnet import BasicBlock, BitCenterBasicBlock
from halp.models.model_test import BitCenterModelTest
class LeNetTest(BitCenterModelTest, TestCase):
def get_config(self):
config = {}
config["batch_size"] = 5
config["n_minibatch"] = 6
config["n_class"] = 10
return config
def get_models(self, n_minibatch, batch_size, n_class):
n_train_sample = batch_size * n_minibatch
native_model = ResNet_PyTorch(BasicBlock, [2,2,2,2]).cuda().double()
fp_model = ResNet(
BitCenterBasicBlock,
[2,2,2,2],
cast_func=void_cast_func,
n_train_sample=n_train_sample,
dtype="fp").cuda().double()
copy_model_weights(native_model, fp_model)
lp_model = ResNet(
BitCenterBasicBlock,
[2,2,2,2],
cast_func=void_cast_func,
n_train_sample=n_train_sample,
dtype="lp").cuda().double()
copy_model_weights(native_model, lp_model)
bc_model = ResNet(
BitCenterBasicBlock,
[2,2,2,2],
cast_func=void_cast_func,
n_train_sample=n_train_sample,
dtype="bc").double()
copy_model_weights(native_model, bc_model)
return native_model, fp_model, lp_model, bc_model
def get_inputs(self, n_minibatch, batch_size, n_class):
x_list = []
y_list = []
for i in range(n_minibatch):
x_list.append(
torch.nn.Parameter(
torch.randn(batch_size, 3, 32, 32,
dtype=torch.float).cuda(),
requires_grad=True).cuda().double())
y_list.append(torch.LongTensor(batch_size).random_(n_class).cuda())
return x_list, y_list
def check_layer_status(self, bc_model, do_offset=True):
assert bc_model.conv1.do_offset == do_offset
assert bc_model.bn1.do_offset == do_offset
assert bc_model.relu1.do_offset == do_offset
for layer_idx in range(4):
for block_idx in range(2):
layer_name = "layer"+str(layer_idx + 1)+"."+str(block_idx)
layer = get_recur_attr(bc_model, (layer_name + ".conv1").split("."))
assert layer.do_offset == do_offset
layer = get_recur_attr(bc_model, (layer_name + ".bn1").split("."))
assert layer.do_offset == do_offset
layer = get_recur_attr(bc_model, (layer_name + ".relu1").split("."))
assert layer.do_offset == do_offset
layer = get_recur_attr(bc_model, (layer_name + ".conv2").split("."))
assert layer.do_offset == do_offset
layer = get_recur_attr(bc_model, (layer_name + ".bn2").split("."))
assert layer.do_offset == do_offset
layer = get_recur_attr(bc_model, (layer_name + ".relu2").split("."))
assert layer.do_offset == do_offset
if layer_idx != 0 and block_idx == 0:
layer = get_recur_attr(bc_model, (layer_name + ".shortcut.0").split("."))
assert layer.do_offset == do_offset
layer = get_recur_attr(bc_model, (layer_name + ".shortcut.1").split("."))
assert layer.do_offset == do_offset
assert bc_model.avg_pool.do_offset == do_offset
assert bc_model.linear.do_offset == do_offset
assert bc_model.criterion.do_offset == do_offset
if __name__ == "__main__":
print(torch.__version__)
unittest.main()
| halp-master | halp/models/resnet_test.py |
import copy
import argparse
import math
import numpy as np
import torch
torch.backends.cudnn.deterministic=True
import torch.nn as nn
import torch.utils.data
from torch.optim import SGD
import halp
from halp.optim.bit_center_sgd import BitCenterSGD
from halp.optim.bit_center_svrg import BitCenterSVRG
from halp.optim.svrg import SVRG
from halp.models.logistic_regression import LogisticRegression
from halp.models.lenet import LeNet
from halp.models.resnet import ResNet18
from halp.models.lstm import LSTM
from halp.utils.mnist_data_utils import get_mnist_data_loader
from halp.utils.cifar_data_utils import get_cifar10_data_loader
from halp.utils.postag_data_utils import get_conll2000_data_loader
from halp.utils import utils
from halp.utils.utils import void_cast_func
from halp.utils.utils import single_to_half_det, single_to_half_stoc
from halp.utils.train_utils import evaluate_acc
from halp.utils.train_utils import train_non_bit_center_optimizer
from halp.utils.train_utils import train_bit_center_optimizer
from halp.utils.train_utils import StepLRScheduler, ModelSaver
from halp.utils.train_utils import load_param_to_model, load_state_to_optimizer
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger('')
import time
parser = argparse.ArgumentParser()
parser.add_argument("-T", action="store", default=1, type=int,
help="T parameter for SVRG type algorithms.")
parser.add_argument("-e", "--n-epochs", action="store", default=10, type=int,
help="Number of epochs to run for")
parser.add_argument("-bs", "--batch-size", action="store", default=100, type=int,
help="Batch size.")
parser.add_argument("-a", "--alpha", action="store", default=0.01, type=float,
help="Learning Rate")
parser.add_argument("-m", "--momentum", default=0.0, type=float,
help="momentum value for Polyak's momentum algorithm")
parser.add_argument("-s", "--seed", action="store", default=42, type=int,
help="Random seed.")
parser.add_argument("-c", "--n-classes", action="store", default=10, type=int,
help="Number of classes for classification.")
parser.add_argument("--solver", action="store", default="sgd", type=str,
choices=["sgd", "svrg",
"lp-sgd", "lp-svrg",
"bc-sgd", "bc-svrg"],
help="Solver/optimization algorithm.")
parser.add_argument("--reg", type=float, default=0.0,
help="L2 regularizer strength")
parser.add_argument("--cuda", action="store_true",
help="currently pytorch only support store true.")
parser.add_argument("--rounding", default="near", type=str,
choices=["near", "stoc", "void"],
help="Support nearest (near) and stochastic (stoc) rounding.")
parser.add_argument("--dataset", default="mnist", type=str,
choices=["mnist", "cifar10", "conll2000"],
help="The dataset to train on.")
parser.add_argument("--model", default="logreg", type=str,
choices=["logreg", "lenet", "resnet", "lstm"],
help="The model used on the given dataset.")
parser.add_argument("--resnet-save-ckpt", action="store_true",
help="save check points for resnet18")
parser.add_argument("--resnet-save-ckpt-path", type=str, default="./",
help="path to save resnet18 check points")
parser.add_argument("--resnet-load-ckpt", action="store_true",
help="load check points for resnet18")
parser.add_argument("--resnet-load-ckpt-epoch-id", type=int, default=0,
help="warm start using a checkpoint saved at this epoch id")
parser.add_argument("--resnet-fine-tune", action="store_true",
help="fine tune the last linear layer of resnet")
parser.add_argument("--float-debug", action="store_true",
help="using single precision to do debug and test")
parser.add_argument("--double-debug", action="store_true",
help="using double precision to do debug and test")
parser.add_argument("--on-site-compute", action="store_true",
help="use on site fp compute to avoid host memory caching")
args = parser.parse_args()
utils.set_seed(args.seed)
if args.dataset == "mnist":
train_loader, val_loader, input_shape, n_train_sample = get_mnist_data_loader(
onehot=False, batch_size=args.batch_size, args=args)
elif args.dataset == "cifar10":
train_loader, val_loader, input_shape, n_train_sample = get_cifar10_data_loader(
batch_size=args.batch_size, args=args)
elif args.dataset == "conll2000":
train_loader, val_loader, input_shape, n_train_sample, max_seq_length, num_embeddings = \
get_conll2000_data_loader(args=args)
else:
raise Exception(args.dataset + " not supported.")
if args.rounding == "near":
args.cast_func = single_to_half_det
elif args.rounding == "stoc":
args.cast_func = single_to_half_stoc
elif args.rounding == "void":
args.cast_func = void_cast_func
else:
raise Exception("The rounding method is not supported!")
# set test and debug flag
assert not (args.float_debug and args.double_debug)
if args.double_debug:
assert args.cast_func == void_cast_func
args.T = len(train_loader)
elif args.float_debug:
assert args.cast_func == single_to_half_det
args.T = len(train_loader)
print("dataset stats: n_batch, batch_size, T ", len(train_loader), args.batch_size, args.T)
if len(train_loader) % args.T != 0:
raise Exception("Currently not supporting settings other than T = epoch_len/n")
# determine the dtype
if args.solver.startswith("bc-"):
args.dtype = "bc"
elif args.solver.startswith("lp-"):
args.dtype = "lp"
else:
args.dtype = "fp"
# note reg_lambda is dummy here, the regularizer is handled by the optimizer
if args.model == "logreg":
model = LogisticRegression(
input_dim=input_shape[1],
n_class=args.n_classes,
reg_lambda=args.reg,
dtype=args.dtype,
cast_func=args.cast_func,
n_train_sample=n_train_sample)
elif args.model == "lenet":
model = LeNet(
reg_lambda=args.reg,
cast_func=args.cast_func,
n_train_sample=n_train_sample,
dtype=args.dtype)
elif args.model == "resnet":
model = ResNet18(
reg_lambda=args.reg,
cast_func=args.cast_func,
n_train_sample=n_train_sample,
dtype=args.dtype,
fine_tune=args.resnet_fine_tune,
num_classes=args.n_classes)
elif args.model == "lstm":
model = LSTM(
num_embeddings=num_embeddings,
cast_func=args.cast_func,
seq_length=max_seq_length,
n_train_sample=n_train_sample,
dtype=args.dtype,
reg_lambda=args.reg)
else:
raise Exception(args.model + " is currently not supported!")
if args.cuda:
# note as the cache are set up in the first foward pass
# the location of the cache is not controled by the cuda() here
model.cuda()
if args.double_debug:
model.double()
# set on set compute flag recursively for all the modules
if args.on_site_compute:
model.set_on_site_compute(True)
# setup optimizer
if args.resnet_fine_tune:
if args.model != "resnet":
raise Exception("currently only resnet model support fine-tune mode")
params_name = []
params = []
for x, y in model.named_parameters():
if x.startswith("linear") or x.startswith("criterion"):
params_name.append(x)
params.append(y)
else:
params_name = [x for x, y in model.named_parameters()]
params = [y for x, y in model.named_parameters()]
logger.info("Params list: ")
for name, p in zip(params_name, params):
logger.info(name + " " + str(p.dtype))
if (args.solver == "sgd") or (args.solver == "lp-sgd"):
optimizer = SGD(
params=params,
lr=args.alpha,
momentum=args.momentum,
weight_decay=args.reg)
optimizer.cast_func = args.cast_func
optimizer.T = None
elif (args.solver == "svrg") or (args.solver == "lp-svrg"):
optimizer = SVRG(
params=params,
lr=args.alpha,
momentum=args.momentum,
weight_decay=args.reg,
T=args.T,
data_loader=train_loader)
optimizer.cast_func = args.cast_func
elif args.solver == "bc-sgd":
optimizer = BitCenterSGD(
params=params,
params_name=params_name,
lr=args.alpha,
momentum=args.momentum,
weight_decay=args.reg,
n_train_sample=n_train_sample,
cast_func=args.cast_func,
minibatch_size=args.batch_size,
T=args.T)
elif args.solver == "bc-svrg":
optimizer = BitCenterSVRG(
params=params,
params_name=params_name,
lr=args.alpha,
momentum=args.momentum,
weight_decay=args.reg,
n_train_sample=n_train_sample,
cast_func=args.cast_func,
minibatch_size=args.batch_size,
T=args.T)
else:
raise Exception(args.solver + " is an unsupported optimizer.")
# for warm start experiments for resnet.
# the scheduler and the saver is only working when it is turn_on()
optimizer.lr_scheduler = StepLRScheduler(
optimizer, step_epoch=[150, 250], step_fac=0.1)
# specify the epochs where to save check points
save_step_epochs = np.array([1, 300])
optimizer.model_saver = ModelSaver(
optimizer, model, step_epoch=save_step_epochs, save_path=args.resnet_save_ckpt_path)
if args.resnet_save_ckpt:
if args.model != "resnet":
raise Exception("Check point saving mode is only designed for resnet experiments")
if args.solver != "sgd" and args.solver != "lp-sgd":
raise Exception("Check point saving mode is only designed for fp and lp-sgd optimizer")
assert args.n_epochs == 350
assert args.alpha == 0.1
optimizer.lr_scheduler.turn_on()
optimizer.model_saver.turn_on()
logger.info("model saver and lr scheduler saved")
if args.resnet_load_ckpt:
assert args.resnet_save_ckpt == False
model_ckpt = args.resnet_save_ckpt_path + "/model_e_" + str(args.resnet_load_ckpt_epoch_id) + "_i_0"
opt_ckpt = args.resnet_save_ckpt_path + "/opt_e_" + str(args.resnet_load_ckpt_epoch_id) + "_i_0"
model_state_dict = torch.load(model_ckpt)
opt_state_dict = torch.load(opt_ckpt)
load_param_to_model(model, model_state_dict, to_bc_model=("bc-" in args.solver), args=args)
load_state_to_optimizer(optimizer, model, opt_state_dict, to_bc_opt=("bc-" in args.solver), args=args)
logger.info("model and optimizer loaded from " + model_ckpt)
try:
print("regularizer ", model.reg_lambda, optimizer.weight_decay)
except:
for param_group in optimizer.param_groups:
assert len(optimizer.param_groups) == 1
print("regularizer ", param_group["weight_decay"])
start_time = time.time()
# run training procedure
logger.info("optimizer " + optimizer.__class__.__name__)
logger.info("model " + model.__class__.__name__)
logger.info("optimizer rounding func " + optimizer.cast_func.__name__)
logger.info("model rounding func " + model.cast_func.__name__)
model.print_module_types()
if (args.solver == "bc-sgd") or (args.solver == "bc-svrg"):
train_loss = train_bit_center_optimizer(
model=model,
optimizer=optimizer,
train_loader=train_loader,
val_loader=val_loader,
n_epochs=args.n_epochs,
use_cuda=args.cuda,
dtype=args.dtype,
args=args)
else:
train_loss = train_non_bit_center_optimizer(
model=model,
optimizer=optimizer,
train_loader=train_loader,
val_loader=val_loader,
n_epochs=args.n_epochs,
use_cuda=args.cuda,
dtype=args.dtype,
args=args)
end_time = time.time()
print("Elapsed training time: ", end_time - start_time)
logger.info("Cross check model property: Params list: ")
for name, p in zip(params_name, params):
logger.info(name + " " + str(p.dtype))
| halp-master | halp/exp_script/run_models.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import platform
import re
import sys
from glob import glob
from pybind11.setup_helpers import build_ext, Pybind11Extension
from setuptools import find_packages, setup
REQUIRED_MAJOR = 3
REQUIRED_MINOR = 7
INSTALL_REQUIRES = [
"arviz>=0.12.1",
"astor>=0.7.1",
"black==22.3.0",
"botorch>=0.5.1",
"gpytorch>=1.3.0, <1.9.0",
"graphviz>=0.17",
"netCDF4<=1.5.8; python_version<'3.8'",
"numpy>=1.18.1",
"pandas>=0.24.2",
"plotly>=2.2.1",
"scipy>=0.16",
"statsmodels>=0.12.0",
"torch>=1.9.0, <2.0",
"tqdm>=4.46.0",
"typing-extensions>=3.10",
"xarray>=0.16.0",
]
TEST_REQUIRES = ["pytest>=7.0.0", "pytest-cov"]
TUTORIALS_REQUIRES = [
"bokeh",
"cma",
"ipywidgets",
"jupyter",
"lxml>=4.9",
"matplotlib",
"mdformat",
"mdformat-myst",
"scikit-learn>=1.0.0",
"seaborn",
"tabulate",
"torchvision",
]
DEV_REQUIRES = (
TEST_REQUIRES
+ TUTORIALS_REQUIRES
+ [
"flake8==4.0.1",
"libcst==0.4.1",
"nbval",
"sphinx==4.2.0",
"sphinx-autodoc-typehints",
"sphinx_rtd_theme",
"toml>=0.10.2",
# `black` is included in `INSTALL_REQUIRES` above.
"ufmt==1.3.2",
"usort==1.0.2",
]
)
if platform.system() == "Windows":
CPP_COMPILE_ARGS = [
"/WX",
"/permissive-",
"/std:c++20",
# Ignore utils.h(365) warning C4244 conversion from '__int64' to 'int'.
"/wd4244",
]
else:
CPP_COMPILE_ARGS = ["-std=c++2a", "-Werror"]
# Check for python version
if sys.version_info < (REQUIRED_MAJOR, REQUIRED_MINOR):
error = (
"Your version of python ({major}.{minor}) is too old. You need "
"python >= {required_major}.{required_minor}."
).format(
major=sys.version_info.major,
minor=sys.version_info.minor,
required_minor=REQUIRED_MINOR,
required_major=REQUIRED_MAJOR,
)
sys.exit(error)
# get version string from module
current_dir = os.path.dirname(os.path.abspath(__file__))
init_file = os.path.join(current_dir, "src", "beanmachine", "__init__.py")
version_regexp = r"__version__ = ['\"]([^'\"]*)['\"]"
with open(init_file, "r") as f:
version = re.search(version_regexp, f.read(), re.M).group(1)
# read in README.md as the long description
with open("README.md", "r") as fh:
long_description = fh.read()
# Use absolute path to the src directory
INCLUDE_DIRS = [os.path.join(current_dir, "src")]
# check if we're installing in a conda environment
if "CONDA_PREFIX" in os.environ:
conda_inc = "Library/include" if platform.system() == "Windows" else "include"
conda_include_dir = os.path.join(os.environ["CONDA_PREFIX"], conda_inc)
INCLUDE_DIRS.extend([conda_include_dir, os.path.join(conda_include_dir, "eigen3")])
INCLUDE_DIRS.extend([conda_include_dir, os.path.join(conda_include_dir, "boost")])
if sys.platform.startswith("linux"):
INCLUDE_DIRS.extend(
[
"/usr/include",
"/usr/include/eigen3",
"/usr/include/boost169/",
"/usr/include/x86_64-linux-gnu",
]
)
elif sys.platform.startswith("darwin"):
# MacOS dependencies installed through HomeBrew
INCLUDE_DIRS.extend(
glob("/usr/local/Cellar/eigen/*/include/eigen3")
+ glob("/usr/local/Cellar/boost/*/include")
)
# Add range-v3 'include' directory to configuration
RANGE_V3_INCLUDE_DIR_CANDIDATES = [
c for c in [os.environ.get("RANGE_V3_INCLUDE_DIR")] if c is not None
]
if sys.platform.startswith("linux"):
RANGE_V3_INCLUDE_DIR_CANDIDATES.extend(
[
os.path.join(current_dir, "vcpkg/packages/range-v3_x64-linux/include"),
"/usr/include/range-v3",
]
)
elif sys.platform.startswith("darwin"):
RANGE_V3_INCLUDE_DIR_CANDIDATES.extend(
[
os.path.join(current_dir, "vcpkg/packages/range-v3_x64-osx/include"),
*glob("/usr/local/Cellar/range-v3/*/include"), # Homebrew
]
)
elif platform.system() == "Windows":
RANGE_V3_INCLUDE_DIR_CANDIDATES.extend(
[
os.path.join(current_dir, "vcpkg/packages/range-v3_x86-windows/include"),
# The following option was observed being used on GitHub Actions runner:
"C:/vcpkg/packages/range-v3_x86-windows/include",
]
)
print(
"Checking directories for range-v3 'include':\n",
"\n".join(RANGE_V3_INCLUDE_DIR_CANDIDATES),
)
selected_range_v3_include_dirs = [
candidate
for candidate in RANGE_V3_INCLUDE_DIR_CANDIDATES
if os.path.isdir(candidate)
]
print(
"Existing candidate directories for range-v3 'include':\n",
"\n".join(selected_range_v3_include_dirs),
)
if len(selected_range_v3_include_dirs) == 0:
if os.environ.get("RANGE_V3_INCLUDE_DIR"):
message = (
"Could not find 'include' directory for range-v3 library dependency "
+ f"either at {os.environ.get('RANGE_V3_INCLUDE_DIR')}\n"
+ "as indicated in environment variable RANGE_V3_INCLUDE_DIR, "
+ "nor in some other common locations.\n"
+ "Please make sure library is installed (see README.md) and "
+ "set RANGE_V3_INCLUDE_DIR environment variable to the right directory."
)
else:
message = (
"Could not find 'include' directory for range-v3 library dependency "
+ "in some common locations.\n"
+ "Please make sure library is installed (see README.md). "
+ "You can also manually indicate the correct 'include' directory by "
+ "setting the environment variable RANGE_V3_INCLUDE_DIR environment "
+ "variable to the right directory."
)
message += "Here are the directories we checked:\n " + "\n".join(
RANGE_V3_INCLUDE_DIR_CANDIDATES
)
sys.exit(message)
else:
print(
"Using the following directory for range-v3 'include':\n",
selected_range_v3_include_dirs[0],
)
INCLUDE_DIRS.append(selected_range_v3_include_dirs[0])
setup(
name="beanmachine",
version=version,
description="Probabilistic Programming Language for Bayesian Inference",
author="Meta Platforms, Inc.",
license="MIT",
url="https://beanmachine.org",
project_urls={
"Documentation": "https://beanmachine.org",
"Source": "https://github.com/facebookresearch/beanmachine",
},
keywords=[
"Probabilistic Programming Language",
"Bayesian Inference",
"Statistical Modeling",
"MCMC",
"Variational Inference",
"PyTorch",
],
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
],
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">={}.{}".format(REQUIRED_MAJOR, REQUIRED_MINOR),
install_requires=INSTALL_REQUIRES,
packages=find_packages("src"),
package_dir={"": "src"},
package_data={"beanmachine/ppl": ["py.typed"]},
ext_modules=[
Pybind11Extension(
name="beanmachine.graph",
sources=sorted(
set(glob("src/beanmachine/graph/**/*.cpp", recursive=True))
- set(glob("src/beanmachine/graph/**/*_test.cpp", recursive=True))
),
include_dirs=INCLUDE_DIRS,
extra_compile_args=CPP_COMPILE_ARGS,
)
],
cmdclass={"build_ext": build_ext},
extras_require={
"dev": DEV_REQUIRES,
"test": TEST_REQUIRES,
"tutorials": TUTORIALS_REQUIRES,
},
)
| beanmachine-main | setup.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# -- Project information -----------------------------------------------------
project = "Bean Machine"
copyright = "2022, Meta Platforms, Inc."
author = "Meta Platforms, Inc."
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
master_doc = "index"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
| beanmachine-main | website/sphinx/conf.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import re
import shutil
import uuid
import warnings
from pathlib import Path
from typing import Any, Dict, List, Tuple, Union
import mdformat # @manual=fbsource//third-party/pypi/mdformat:mdformat
import nbformat
import pandas as pd
from lxml import etree # pyre-ignore
from nbformat.notebooknode import NotebookNode
try:
from libfb.py.fbcode_root import get_fbcode_dir # pyre-ignore
except ImportError:
SCRIPTS_DIR = Path(__file__).parent.resolve()
LIB_DIR = SCRIPTS_DIR.parent.parent.resolve()
else:
LIB_DIR = (Path(get_fbcode_dir()) / "beanmachine").resolve()
WEBSITE_DIR = LIB_DIR.joinpath("website")
DOCS_DIR = LIB_DIR.joinpath("docs")
OVERVIEW_DIR = DOCS_DIR.joinpath("overview")
TUTORIALS_DIR = OVERVIEW_DIR.joinpath("tutorials")
# Data display priority. Below lists the priority for displaying data from cell outputs.
# Cells can output many different items, and some will output a fallback display, e.g.
# text/plain if text/html is not working. The below priorities help ensure the output in
# the MDX file shows the best representation of the cell output.
priorities = [
"text/markdown",
"image/png", # matplotlib output.
"application/vnd.jupyter.widget-view+json", # tqdm progress bars.
"application/vnd.bokehjs_load.v0+json", # Bokeh loading output.
"application/vnd.bokehjs_exec.v0+json", # Bokeh `show` outputs.
"application/vnd.plotly.v1+json", # Plotly
"text/html",
"stream",
"text/plain",
]
def load_nb_metadata() -> Dict[str, Dict[str, str]]:
"""
Load the metadata and list of notebooks that are to be converted to MDX.
Args:
None
Returns:
Dict[str, Dict[str, str]]: A dictionary of metadata needed to convert notebooks
to MDX. Only those notebooks that are listed in the `tutorials.json` file
will be included in the Docusaurus MDX output.
"""
tutorials_json_path = WEBSITE_DIR.joinpath("tutorials.json")
with tutorials_json_path.open("r") as f:
tutorials_data = json.load(f)
return tutorials_data
def load_notebook(path: Path) -> NotebookNode:
"""
Load the given notebook into memory.
Args:
path (Path): Path to the Jupyter notebook.
Returns:
NotebookNode: `nbformat` object, which contains all the notebook cells in it.
"""
with path.open("r") as f:
nb_str = f.read()
nb = nbformat.reads(nb_str, nbformat.NO_CONVERT)
return nb
def create_folders(path: Path) -> Tuple[str, Path]:
"""
Create asset folders for the tutorial.
Args:
path (Path): Path to the Jupyter notebook.
Returns:
Tuple[str, Path]: Returns a tuple with the filename to use for the MDX file
and the path for the MDX assets folder.
"""
tutorial_folder_name = path.stem
filename = "".join([token.title() for token in tutorial_folder_name.split("_")])
tutorial_folder = TUTORIALS_DIR.joinpath(tutorial_folder_name)
assets_folder = tutorial_folder / "assets"
img_folder = assets_folder / "img"
plot_data_folder = assets_folder / "plot_data"
if not tutorial_folder.exists():
tutorial_folder.mkdir(parents=True, exist_ok=True)
if not img_folder.exists():
img_folder.mkdir(parents=True, exist_ok=True)
if not plot_data_folder.exists():
plot_data_folder.mkdir(parents=True, exist_ok=True)
return filename, assets_folder
def create_frontmatter(path: Path, nb_metadata: Dict[str, Dict[str, str]]) -> str:
"""
Create frontmatter for the resulting MDX file.
The frontmatter is the data between the `---` lines in an MDX file.
Args:
path (Path): Path to the Jupyter notebook.
nb_metadata (Dict[str, Dict[str, str]]): The metadata associated with the given
notebook. Metadata is defined in the `tutorials.json` file.
Returns:
str: MDX formatted frontmatter.
"""
# Add the frontmatter to the MDX string. This is the part between the `---` lines
# that define the tutorial sidebar_label information.
frontmatter_delimiter = ["---"]
frontmatter = [
f"{key}: {value}"
for key, value in nb_metadata.get(
path.stem,
{
"title": "",
"sidebar_label": "",
"path": "",
"nb_path": "",
"github": "",
"colab": "",
},
).items()
]
frontmatter = "\n".join(frontmatter_delimiter + frontmatter + frontmatter_delimiter)
mdx = mdformat.text(frontmatter, options={"wrap": 88}, extensions={"myst"})
return f"{mdx}\n"
def create_imports() -> str:
"""
Create the imports needed for displaying buttons, and interactive plots in MDX.
Returns:
str: MDX formatted imports.
"""
link_btn = "../../../../website/src/components/LinkButtons.jsx"
cell_out = "../../../../website/src/components/CellOutput.jsx"
plot_out = "../../../../website/src/components/Plotting.jsx"
imports = f'import LinkButtons from "{link_btn}";\n'
imports += f'import CellOutput from "{cell_out}";\n'
imports += f'import {{BokehFigure, PlotlyFigure}} from "{plot_out}";\n'
return f"{imports}\n"
def create_buttons(
nb_metadata: Dict[str, Dict[str, str]],
tutorial_folder_name: str,
) -> str:
"""
Create buttons that link to Colab and GitHub for the tutorial.
Args:
nb_metadata (Dict[str, Dict[str, str]]): Metadata for the tutorial.
tutorial_folder_name (str): The name of the tutorial folder where the MDX
converted files exist. This is typically just the name of the Jupyter
notebook file.
Returns:
str: MDX formatted buttons.
"""
github_url = nb_metadata[tutorial_folder_name]["github"]
colab_url = nb_metadata[tutorial_folder_name]["colab"]
return f'<LinkButtons\n githubUrl="{github_url}"\n colabUrl="{colab_url}"\n/>\n\n'
def handle_images_found_in_markdown(
markdown: str,
new_img_dir: Path,
lib_dir: Path,
) -> str:
"""
Update image paths in the Markdown, and copy the image to the docs location.
The pattern we search for in the Markdown is
```` with two groups:
- group 1 = path/to/image.png
- group 2 = "title"
The first group (the path to the image from the original notebook) will be replaced
with ``assets/img/{name}`` where the name is `image.png` from the example above. The
original image will also be copied to the new location
``{new_img_dir}/assets/img/{name}``, which can be directly read into the MDX file.
Args:
markdown (str): Markdown where we look for Markdown flavored images.
new_img_dir (Path): Path where images are copied to for display in the
MDX file.
lib_dir (Path): The location for the Bean Machine repo.
Returns:
str: The original Markdown with new paths for images.
"""
markdown_image_pattern = re.compile(r"""!\[[^\]]*\]\((.*?)(?=\"|\))(\".*\")?\)""")
searches = list(re.finditer(markdown_image_pattern, markdown))
# Return the given Markdown if no images are found.
if not searches:
return markdown
# Convert the given Markdown to a list so we can delete the old path with the new
# standard path.
markdown_list = list(markdown)
for search in searches:
# Find the old image path and replace it with the new one.
old_path, _ = search.groups()
start = 0
end = 0
search = re.search(old_path, markdown)
if search is not None:
start, end = search.span()
old_path = Path(old_path)
name = old_path.name.strip()
new_path = f"assets/img/{name}"
del markdown_list[start:end]
markdown_list.insert(start, new_path)
# Copy the original image to the new location.
if old_path.exists():
old_img_path = old_path
else:
# Here we assume the original image exists in the same directory as the
# notebook, which should be in the tutorials folder of the library.
old_img_path = (lib_dir / "tutorials" / old_path).resolve()
new_img_path = str(new_img_dir / name)
shutil.copy(str(old_img_path), new_img_path)
return "".join(markdown_list)
def transform_style_attributes(markdown: str) -> str:
"""
Convert HTML style attributes to something React can consume.
Args:
markdown (str): Markdown where we look for HTML style attributes.
Returns:
str: The original Markdown with new React style attributes.
"""
# Finds all instances of `style="attr: value; ..."`.
token = "style="
pattern = re.compile(f"""{token}["'`]([^"]*)["'`]""")
found_patterns = re.findall(pattern, markdown)
if not found_patterns:
return markdown
for found_pattern in found_patterns:
# Step 1: splits "attr: value; ..." to
# ["attr: value", ..."].
step1 = [token.strip() for token in found_pattern.split(";") if token]
# Step 2: splits ["attr: value", ...] to
# [["attr", "value"], ...].
step2 = [[token.strip() for token in tokens.split(":")] for tokens in step1]
# Step 3: converts [["attr", "value"], ...] to
# '{"attr": "value", ...}'.
step3 = json.dumps(dict(step2))
# Step 4 wraps the JSON object in {}, so we end up with a string of the form;
# '{{"attr": "value", ...}}'.
step4 = f"{{{step3}}}"
# Step 5 replaces the old style data with the React style data, and clean the
# string for inclusion in the final Markdown.
markdown = markdown.replace(found_pattern, step4)
markdown = markdown.replace('"{{', "{{").replace('}}"', "}}")
return markdown
def handle_markdown_cell(
cell: NotebookNode,
new_img_dir: Path,
lib_dir: Path,
) -> str:
"""
Handle the given Jupyter Markdown cell and convert it to MDX.
Args:
cell (NotebookNode): Jupyter Markdown cell object.
new_img_dir (Path): Path where images are copied to for display in the
Markdown cell.
lib_dir (Path): The location for the Bean Machine library.
Returns:
str: Transformed Markdown object suitable for inclusion in MDX.
"""
markdown = cell["source"]
# Update image paths in the Markdown and copy them to the Markdown tutorials folder.
markdown = handle_images_found_in_markdown(markdown, new_img_dir, lib_dir)
# We will attempt to handle inline style attributes written in HTML by converting
# them to something React can consume.
markdown = transform_style_attributes(markdown)
# Remove any HTML comments from the Markdown. They are fine to keep in the
# notebooks, but are not really useful in the MDX.
markdown = re.sub("(<!--.*?-->)", "", markdown, flags=re.DOTALL)
mdx = mdformat.text(markdown, options={"wrap": 88}, extensions={"myst"})
return f"{mdx}\n"
def handle_cell_input(cell: NotebookNode, language: str) -> str:
"""
Create a Markdown cell block using the given cell source, and the language.
The given language will determine cell input syntax styles. Docusaurus uses Prism as
the syntax highlighter, https://prismjs.com. See the Docusaurus documentation for
more information on code blocks
https://docusaurus.io/docs/markdown-features/code-blocks.
Args:
cell (NotebookNode): A notebook cell.
language (str): Language specifier for syntax highlighting.
Returns:
str: Code block formatted Markdown string.
"""
cell_source = cell.get("source", "")
return f"```{language}\n{cell_source}\n```\n\n"
def transform_bokeh_json(json_data: Dict[str, Any]) -> Dict[str, Any]:
"""
Transform Bokeh JSON found in a cell output to something BokehJS can consume.
Args:
json_data (Dict[str, Any]): JSON data found in a notebook's cell output that is
for Bokeh.
Returns:
Dict[str, Any]: Reorganized JSON output for BokehJS.
"""
key = list(json_data.keys())[0]
data = json_data[key]
json_tx = {}
json_tx["target_id"] = key
json_tx["root_id"] = data["roots"]["root_ids"][0]
json_tx["doc"] = {
"defs": data["defs"],
"roots": data["roots"],
"title": data["title"],
"version": data["version"],
}
json_tx["version"] = data["version"]
return json_tx
def handle_bokeh(
values: List[Dict[str, Union[int, str, NotebookNode]]],
plot_data_folder: Path,
) -> List[Tuple[int, str]]:
"""
Convert Bokeh `show` outputs and Applications to MDX.
Args:
values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell
outputs.
plot_data_folder (Path): Path to the folder where plot data should be
stored.
Returns:
List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is
the index where the output occurred from the cell, and the second entry of
the tuple is the MDX formatted string.
"""
output = []
for value in values:
index = int(value["index"])
data = str(value["data"])
app_flag = data.startswith("<!DOCTYPE html>")
json_data = {}
# Handle Bokeh `show` outputs.
if not app_flag:
# Parse the JavaScript for the Bokeh JSON data. The BokehJS output is
# standardized, so we can make the following assumption for finding the
# right spot to for the JSON data. Also, this is pure JavaScript so
# parsing it with lxml is not an option.
json_string = list(
filter(
lambda line: line.startswith("const docs_json = "),
[line.strip() for line in data.splitlines() if line],
),
)[0]
# Ignore the `const` definition and the ending `;` from the line.
json_string = json_string[len("const docs_json = ") : -1]
json_data = json.loads(json_string)
# Handle Bokeh Applications.
if app_flag:
# Bokeh Application objects are rendered in the notebook as HTML. This
# HTML is saved in the output cell, which we parse below using lxml and
# xpaths.
doc = etree.HTML(data) # pyre-ignore
scripts = doc.xpath("//body/script[@type='application/json']")
script = scripts[0]
script = "".join(script.itertext())
# Unescape characters. If we skip this step, then the JSON read in by
# the React BokehFigure object will error in the browser.
script = script.replace("&", "&")
script = script.replace("<", "<")
script = script.replace(">", ">")
script = script.replace(""", '"')
script = script.replace("'", "'")
script = script.replace("`", "`")
json_data = json.loads(script)
# Shuffle the data so we can save it in a format BokehJS will be able to
# consume later.
js = transform_bokeh_json(json_data)
file_name = js["target_id"]
# Save the Bokeh JSON data to disk. It will be read by React when loaded in
# Docusaurus.
file_path = plot_data_folder / f"{file_name}.json"
with file_path.open("w") as f:
json.dump(js, f, indent=2)
# Add the Bokeh figure to the MDX output.
path_to_data = f"./assets/plot_data/{file_name}.json"
output.append(
(index, f"<BokehFigure data={{require('{path_to_data}')}} />\n\n"),
)
return output
def handle_image(
values: List[Dict[str, Union[int, str, NotebookNode]]],
) -> List[Tuple[int, str]]:
"""
Convert embedded images to string MDX can consume.
Args:
values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell
outputs.
Returns:
List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is
the index where the output occurred from the cell, and the second entry of
the tuple is the MDX formatted string.
"""
output = []
for value in values:
index = value["index"]
mime_type = value["mime_type"]
img = value["data"]
output.append((index, f"\n\n"))
return output
def handle_markdown(
values: List[Dict[str, Union[int, str, NotebookNode]]],
) -> List[Tuple[int, str]]:
"""
Convert and format Markdown for MDX.
Args:
values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell
outputs.
Returns:
List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is
the index where the output occurred from the cell, and the second entry of
the tuple is the MDX formatted string.
"""
output = []
for value in values:
index = int(value["index"])
markdown = str(value["data"])
markdown = mdformat.text(markdown, options={"wrap": 88}, extensions={"myst"})
output.append((index, f"{markdown}\n\n"))
return output
def handle_pandas(
values: List[Dict[str, Union[int, str, NotebookNode]]],
) -> List[Tuple[int, str]]:
"""
Handle how to display pandas DataFrames.
There is a scoped style tag in the DataFrame output that uses the class name
`dataframe` to style the output. We will use this token to determine if a pandas
DataFrame is being displayed.
Args:
values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell
outputs.
Returns:
List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is
the index where the output occurred from the cell, and the second entry of
the tuple is the MDX formatted string.
"""
output = []
for value in values:
index = int(value["index"])
data = str(value["data"])
df = pd.read_html(data, flavor="lxml")
# NOTE: The return is a list of dataframes and we only care about the first
# one.
md_df = df[0]
for column in md_df.columns:
if column.startswith("Unnamed"):
md_df.rename(columns={column: ""}, inplace=True)
# Remove the index if it is just a range, and output to markdown.
mdx = ""
if isinstance(md_df.index, pd.RangeIndex):
# Ignore FutureWarning: 'showindex' is deprecated.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
mdx = md_df.to_markdown(showindex=False)
elif not isinstance(md_df.index, pd.RangeIndex):
mdx = md_df.to_markdown()
output.append((index, f"\n{mdx}\n\n"))
return output
def handle_plain(
values: List[Dict[str, Union[int, str, NotebookNode]]],
) -> List[Tuple[int, str]]:
"""
Handle how to plain cell output should be displayed in MDX.
Args:
values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell
outputs.
Returns:
List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is
the index where the output occurred from the cell, and the second entry of
the tuple is the MDX formatted string.
"""
output = []
for value in values:
index = int(value["index"])
data = str(value["data"])
data = [line.strip() for line in data.splitlines() if line]
data = [datum for datum in data if datum]
if data:
data = "\n".join([line for line in str(value["data"]).splitlines() if line])
output.append(
(index, f"<CellOutput>\n{{\n `{data}`\n}}\n</CellOutput>\n\n"),
)
return output
def handle_plotly(
values: List[Dict[str, Union[int, str, NotebookNode]]],
plot_data_folder: Path,
) -> List[Tuple[int, str]]:
"""
Convert Plotly outputs to MDX.
Args:
values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell
outputs.
plot_data_folder (Path): Path to the folder where plot data should be
stored.
Returns:
List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is
the index where the output occurred from the cell, and the second entry of
the tuple is the MDX formatted string.
"""
output = []
for value in values:
index = value["index"]
data = value["data"]
file_name = str(uuid.uuid4())
file_path = plot_data_folder / f"{file_name}.json"
path_to_data = f"./assets/plot_data/{file_name}.json"
output.append(
(index, f"<PlotlyFigure data={{require('{path_to_data}')}} />\n\n"),
)
with file_path.open("w") as f:
json.dump(data, f, indent=2)
return output
def handle_tqdm(
values: List[Dict[str, Union[int, str, NotebookNode]]],
) -> List[Tuple[int, str]]:
"""
Handle the output of tqdm.
tqdm will be displayed as separate CellOutput React components if we do not
aggregate them all into a single CellOutput object, which is what this method does.
Args:
values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell
outputs.
Returns:
List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is
the index where the output occurred from the cell, and the second entry of
the tuple is the MDX formatted string.
"""
output = sorted(values, key=lambda item: item["index"])
index = int(output[0]["index"])
md = "\n".join([str(item["data"]) for item in output if item["data"]])
return [(index, f"<CellOutput>\n{{\n `{md}`\n}}\n</CellOutput>\n\n")]
CELL_OUTPUTS_TO_PROCESS = Dict[
str,
List[Dict[str, Union[int, str, NotebookNode]]],
]
def aggregate_mdx(
cell_outputs_to_process: CELL_OUTPUTS_TO_PROCESS,
plot_data_folder: Path,
) -> str:
"""
Aggregate the `cell_outputs_to_process` into MDX.
Args:
cell_outputs_to_process (CELL_OUTPUTS_TO_PROCESS): A dictionary of cell outputs
that need further processing.
plot_data_folder (Path): Path to where plot data should be stored for the
tutorial.
Returns:
str: MDX formatted string.
"""
processed_mdx = []
for key, values in cell_outputs_to_process.items():
if not values:
continue
if key == "bokeh":
processed_mdx.extend(handle_bokeh(values, plot_data_folder))
if key == "image":
processed_mdx.extend(handle_image(values))
if key == "markdown":
processed_mdx.extend(handle_markdown(values))
if key == "pandas":
processed_mdx.extend(handle_pandas(values))
if key == "plain":
processed_mdx.extend(handle_plain(values))
if key == "plotly":
processed_mdx.extend(handle_plotly(values, plot_data_folder))
if key == "tqdm":
processed_mdx.extend(handle_tqdm(values))
# Ensure the same ordering of the MDX happens as was found in the original cell
# output.
processed_mdx = sorted(processed_mdx, key=lambda item: item[0])
mdx = "\n".join([item[1] for item in processed_mdx])
return mdx
def prioritize_dtypes(
cell_outputs: List[NotebookNode],
) -> Tuple[List[List[str]], List[bool]]:
"""
Prioritize cell output data types.
Args:
cell_outputs (List[NotebookNode]): A list of cell outputs.
Returns:
Tuple[List[List[str]], List[bool]]: Return two items in the tuple; the first is
a list of prioritized data types and the second is a list boolean values
associated with the cell output having Plotly information in it or not.
"""
cell_output_dtypes = [
list(cell_output["data"].keys())
if "data" in cell_output
else [cell_output["output_type"]]
for cell_output in cell_outputs
]
prioritized_cell_output_dtypes = [
sorted(
set(dtypes).intersection(set(priorities)),
key=lambda dtype: priorities.index(dtype),
)
for dtypes in cell_output_dtypes
]
prioritized_cell_output_dtypes = [
[str(item) for item in items] for items in prioritized_cell_output_dtypes
]
plotly_flags = [
any(["plotly" in output for output in outputs])
for outputs in cell_output_dtypes
]
return prioritized_cell_output_dtypes, plotly_flags
def aggregate_bokeh(
prioritized_data_dtype: str,
cell_output: NotebookNode,
data: NotebookNode,
cell_outputs_to_process: CELL_OUTPUTS_TO_PROCESS,
i: int,
) -> None:
"""
Aggregate Bokeh cell outputs.
Args:
prioritized_data_dtype (str): The prioritized cell output data type.
cell_output (NotebookNode): The actual cell output from the notebook.
data (NotebookNode): The data of the cell output.
cell_outputs_to_process (CELL_OUTPUTS_TO_PROCESS): Dictionary containing
aggregated cell output objects.
i (int): Index for the cell output in the list of cell output objects.
Returns:
None: Does not return anything, instead adds values to the
cell_outputs_to_process if applicable.
"""
if prioritized_data_dtype == "application/vnd.bokehjs_load.v0+json":
pass
# Bokeh `show` outputs.
if prioritized_data_dtype == "application/vnd.bokehjs_exec.v0+json":
data = cell_output["data"]["application/javascript"]
cell_outputs_to_process["bokeh"].append({"index": i, "data": data})
# Bokeh applications.
if prioritized_data_dtype == "text/html" and "Bokeh Application" in data:
cell_outputs_to_process["bokeh"].append({"index": i, "data": data})
def aggregate_images_and_plotly(
prioritized_data_dtype: str,
cell_output: NotebookNode,
data: NotebookNode,
plotly_flags: List[bool],
cell_outputs_to_process: CELL_OUTPUTS_TO_PROCESS,
i: int,
) -> None:
"""
Aggregates images or Plotly cell outputs into an appropriate bucket.
Args:
prioritized_data_dtype (str): The prioritized cell output data type.
cell_output (NotebookNode): The actual cell output from the notebook.
data (NotebookNode): The data of the cell output.
plotly_flags (List[bool]): True if a Plotly plot was found in the cell outputs
else False.
cell_outputs_to_process (CELL_OUTPUTS_TO_PROCESS): Dictionary containing
aggregated cell output objects.
i (int): Index for the cell output in the list of cell output objects.
Returns:
None: Does not return anything, instead adds values to the
cell_outputs_to_process if applicable.
"""
if prioritized_data_dtype.startswith("image"):
if not plotly_flags[i]:
cell_outputs_to_process["image"].append(
{"index": i, "data": data, "mime_type": prioritized_data_dtype},
)
# Plotly outputs a static image, but we can use the JSON in the cell
# output to create interactive plots using a React component.
if plotly_flags[i]:
data = cell_output["data"]["application/vnd.plotly.v1+json"]
cell_outputs_to_process["plotly"].append({"index": i, "data": data})
def aggregate_plain_output(
prioritized_data_dtype: str,
cell_output: NotebookNode,
data: NotebookNode,
cell_outputs_to_process: CELL_OUTPUTS_TO_PROCESS,
i: int,
) -> None:
"""
Aggregate plain text cell outputs together.
Args:
prioritized_data_dtype (str): The prioritized cell output data type.
cell_output (NotebookNode): The actual cell output from the notebook.
data (NotebookNode): The data of the cell output.
cell_outputs_to_process (CELL_OUTPUTS_TO_PROCESS): Dictionary containing
aggregated cell output objects.
i (int): Index for the cell output in the list of cell output objects.
Returns:
None: Does not return anything, instead adds values to the
cell_outputs_to_process if applicable.
"""
# Ignore error outputs.
if "name" in cell_output and cell_output["name"] == "stderr":
pass
# Ignore matplotlib legend text output.
if prioritized_data_dtype == "text/plain" and "matplotlib" in data:
pass
cell_outputs_to_process["plain"].append({"index": i, "data": data})
def aggregate_output_types(cell_outputs: List[NotebookNode]) -> CELL_OUTPUTS_TO_PROCESS:
"""
Aggregate cell outputs into a dictionary for further processing.
Args:
cell_outputs (List[NotebookNode]): List of cell outputs.
Returns:
CELL_OUTPUTS_TO_PROCESS: Dictionary containing aggregated cell output objects.
"""
# We will use the below cell output data types for prioritizing the output shown in
# the MDX file.
prioritized_cell_output_dtypes, plotly_flags = prioritize_dtypes(cell_outputs)
cell_outputs_to_process = {
"bokeh": [],
"image": [],
"markdown": [],
"pandas": [],
"plain": [],
"plotly": [],
"tqdm": [],
}
for i, cell_output in enumerate(cell_outputs):
prioritized_data_dtype = prioritized_cell_output_dtypes[i][0]
# If there is no `data` key in the cell_output, then it may be an error that
# needs to be handled. Even if it is not an error, the data is stored in a
# different key if no `data` key is found.
data = (
cell_output["data"][prioritized_data_dtype]
if "data" in cell_output
else cell_output["text"]
)
bokeh_check = "bokeh" in prioritized_data_dtype or (
prioritized_data_dtype == "text/html" and "Bokeh Application" in data
)
if bokeh_check:
aggregate_bokeh(
prioritized_data_dtype,
cell_output,
data,
cell_outputs_to_process,
i,
)
image_check = prioritized_data_dtype.startswith("image")
if image_check:
aggregate_images_and_plotly(
prioritized_data_dtype,
cell_output,
data,
plotly_flags,
cell_outputs_to_process,
i,
)
plain_check = prioritized_data_dtype in ["text/plain", "stream"]
if plain_check:
aggregate_plain_output(
prioritized_data_dtype,
cell_output,
data,
cell_outputs_to_process,
i,
)
if prioritized_data_dtype == "text/markdown":
cell_outputs_to_process["markdown"].append({"index": i, "data": data})
if "dataframe" in data:
cell_outputs_to_process["pandas"].append({"index": i, "data": data})
if prioritized_data_dtype == "application/vnd.jupyter.widget-view+json":
data = cell_output["data"]["text/plain"]
cell_outputs_to_process["tqdm"].append({"index": i, "data": data})
return cell_outputs_to_process
def handle_cell_outputs(cell: NotebookNode, plot_data_folder: Path) -> str:
"""
Handle cell outputs and convert to MDX.
Args:
cell (NotebookNode): The cell where the outputs need converting.
plot_data_folder (Path): Path to the folder where plot data should be
stored.
Returns:
str: MDX formatted cell output.
"""
mdx = ""
# Return an empty string if there are no actual cell outputs.
cell_outputs = cell.get("outputs", [])
if not cell_outputs:
return mdx
# We will loop over all cell outputs and bucket them into the appropriate key in the
# dictionary below for further processing. Doing it in this way helps aggregate like
# outputs together e.g. tqdm outputs.
cell_outputs_to_process = aggregate_output_types(cell_outputs)
# Now we process all aggregated cell outputs into a single output for the type.
md = aggregate_mdx(cell_outputs_to_process, plot_data_folder)
return md
def handle_code_cell(cell: NotebookNode, plot_data_folder: Path) -> str:
"""
Handle code cells in Jupyter notebooks and convert them to MDX.
Args:
cell (NotebookNode): A Jupyter notebook cell that contains code.
plot_data_folder (Path): Path to the folder where plot data should be
stored.
Returns:
str: MDX formatted code cell.
"""
cell_input_mdx = handle_cell_input(cell, "python")
cell_output_mdx = handle_cell_outputs(cell, plot_data_folder)
return cell_input_mdx + cell_output_mdx
def transform_notebook(path: Path) -> str:
"""
Transform a notebook located at the given path into MDX.
Args:
path (Path): Path to the Jupyter notebook tutorial.
Returns:
str: MDX formatted string.
"""
filename, assets_folder = create_folders(path)
img_folder = assets_folder / "img"
plot_data_folder = assets_folder / "plot_data"
save_folder = assets_folder.joinpath("..").resolve()
nb = load_notebook(path)
nb_metadata = load_nb_metadata()
mdx = ""
mdx += create_frontmatter(path, nb_metadata)
mdx += create_imports()
mdx += create_buttons(nb_metadata, path.stem)
for cell in nb["cells"]:
cell_type = cell["cell_type"]
# Handle a Markdown cell.
if cell_type == "markdown":
mdx += handle_markdown_cell(cell, img_folder, LIB_DIR)
# Handle a code cell.
if cell_type == "code":
mdx += handle_code_cell(cell, plot_data_folder)
# Write the MDX file to disk.
save_path = save_folder / f"{filename}.mdx"
with save_path.open("w") as f:
f.write(mdx)
# Return the string for debugging purposes.
return mdx
if __name__ == "__main__":
tutorials_metadata = load_nb_metadata()
print("--------------------------------------------")
print("Converting tutorial notebooks into mdx files")
print("--------------------------------------------")
for _, value in tutorials_metadata.items():
path = (LIB_DIR / value["nb_path"]).resolve()
print(f"{path.stem}")
mdx = transform_notebook(path)
print("")
| beanmachine-main | website/scripts/convert_ipynb_to_mdx.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch.distributions as dist
from torch import tensor
class ToplevelSmokeTest(unittest.TestCase):
def test_toplevel_package_imports(self):
# these decorators should execute without error
@bm.random_variable
def foo(i):
return dist.Bernoulli(0.5)
@bm.functional
def foo_sum(n):
return sum(foo(i) for i in range(n))
@bm.random_variable
def bar():
return dist.Normal(0, 1)
# exercise invocation from top-level package directly
# Compositional Inference
samples = bm.CompositionalInference().infer(
[foo_sum(3)], {foo(0): tensor(0.0)}, 100, 1
)
bm.Diagnostics(samples)
# NUTS
samples = bm.SingleSiteNoUTurnSampler().infer(
[bar()], {foo(0): tensor(0.0)}, 100, 1, num_adaptive_samples=100
)
bm.Diagnostics(samples)
| beanmachine-main | tests/ppl/smoke_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import pytest
import torch.distributions as dist
@pytest.fixture(autouse=True)
def fix_random_seed():
"""Fix the random state for every test in the test suite."""
bm.seed(0)
@pytest.fixture(autouse=True)
def disable_torch_distribution_validation():
"""Disables validation of Torch distribution arguments."""
dist.Distribution.set_default_validate_args(False)
| beanmachine-main | tests/ppl/conftest.py |
beanmachine-main | tests/ppl/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import pytest
import torch
from beanmachine.ppl.experimental.torch_jit_backend import get_backend, TorchJITBackend
from ..inference.inference_test import SampleModel
def test_get_backend():
with pytest.warns(
UserWarning, match="The support of TorchInductor is experimental"
):
# test if switching to inductor triggers the warning
backend = get_backend(nnc_compile=False, experimental_inductor_compile=True)
assert backend is TorchJITBackend.INDUCTOR
backend = get_backend(nnc_compile=True, experimental_inductor_compile=False)
assert backend is TorchJITBackend.NNC
backend = get_backend(nnc_compile=False, experimental_inductor_compile=False)
assert backend is TorchJITBackend.NONE
@pytest.mark.skip(reason="The CPU backend of TorchInductor isn't working in fbcode yet")
def test_inductor_compile():
model = SampleModel()
queries = [model.foo()]
observations = {model.bar(): torch.tensor(0.5)}
num_samples = 30
num_chains = 2
# verify that Inductor can run through
samples = bm.GlobalNoUTurnSampler(experimental_inductor_compile=True).infer(
queries,
observations,
num_samples,
num_adaptive_samples=num_samples,
num_chains=num_chains,
)
# sanity check: make sure that the samples are valid
assert not torch.isnan(samples[model.foo()]).any()
| beanmachine-main | tests/ppl/experimental/torch_jit_backend_test.py |
beanmachine-main | tests/ppl/experimental/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.split_rule import (
CompositeRules,
DimensionalRule,
Operator,
SplitRule,
)
@pytest.fixture
def grow_dim():
return 1
@pytest.fixture
def grow_val():
return 2.1
def test_dimensional_rule_addition(grow_dim, grow_val):
lax_rule = SplitRule(
grow_dim=grow_dim, grow_val=grow_val + 10, operator=Operator.le
)
existing_dimensional_rule = DimensionalRule(
grow_dim=grow_dim, min_val=grow_val - 20, max_val=grow_val
)
assert (
existing_dimensional_rule.max_val
== existing_dimensional_rule.add_rule(lax_rule).max_val
)
assert (
existing_dimensional_rule.min_val
== existing_dimensional_rule.add_rule(lax_rule).min_val
)
restrictive_rule_le = SplitRule(
grow_dim=grow_dim, grow_val=grow_val - 10, operator=Operator.le
)
assert (
existing_dimensional_rule.max_val
> existing_dimensional_rule.add_rule(restrictive_rule_le).max_val
)
assert (
existing_dimensional_rule.min_val
== existing_dimensional_rule.add_rule(restrictive_rule_le).min_val
)
restrictive_rule_gt = SplitRule(
grow_dim=grow_dim, grow_val=grow_val - 10, operator=Operator.gt
)
assert (
existing_dimensional_rule.max_val
== existing_dimensional_rule.add_rule(restrictive_rule_gt).max_val
)
assert (
existing_dimensional_rule.min_val
< existing_dimensional_rule.add_rule(restrictive_rule_gt).min_val
)
@pytest.fixture
def all_dims():
return [0, 2]
@pytest.fixture
def all_split_rules(all_dims):
all_rules = []
for dim in all_dims:
all_rules.append(SplitRule(grow_dim=dim, grow_val=5, operator=Operator.le))
return all_rules
@pytest.fixture
def X():
return torch.Tensor([[1.0, 3.0, 7.0], [-1.1, 100, 5]])
def test_composite_rules(all_dims, all_split_rules, X):
composite_rule = CompositeRules(all_dims=all_dims, all_split_rules=all_split_rules)
X_cond = X[composite_rule.condition_on_rules(X)]
for dim in all_dims:
assert torch.all(X_cond[:, dim] > composite_rule.dimensional_rules[dim].min_val)
assert torch.all(
X_cond[:, dim] <= composite_rule.dimensional_rules[dim].max_val
)
invalid_split_rule = SplitRule(
grow_dim=max(all_dims) + 1, grow_val=12, operator=Operator.le
)
with pytest.raises(ValueError):
_ = composite_rule.add_rule(invalid_split_rule)
valid_split_rule = SplitRule(
grow_dim=max(all_dims), grow_val=1000.0, operator=Operator.gt
)
valid_new_composite_rule = composite_rule.add_rule(valid_split_rule)
assert valid_new_composite_rule.most_recent_split_rule() == valid_split_rule
| beanmachine-main | tests/ppl/experimental/bart/bart_split_rule_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.scalar_samplers import (
NoiseStandardDeviation,
)
@pytest.fixture
def X():
return torch.Tensor([[3.0, 1.0], [4.0, 1.0], [1.5, 1.0], [-1.0, 1.0]])
@pytest.fixture
def residual(X):
return X * 0.1
@pytest.fixture
def sigma():
return NoiseStandardDeviation(prior_concentration=0.1, prior_rate=0.2)
def test_sigma_sampling(sigma, X, residual):
prev_val = sigma.val
sample = sigma.sample(X=X, residual=residual)
assert not prev_val == sigma.val
assert sigma.val == sample
| beanmachine-main | tests/ppl/experimental/bart/bart_scalar_sampler_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.exceptions import (
GrowError,
PruneError,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.mutation import (
GrowMutation,
PruneMutation,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.node import (
LeafNode,
SplitNode,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.split_rule import (
CompositeRules,
Operator,
SplitRule,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.tree import Tree
@pytest.fixture
def X():
return torch.Tensor(
[[3.0], [4.0], [1.5], [-1.0]]
) # only r1 containing all positive entries is growable
@pytest.fixture
def l1_non_growable():
return LeafNode(
depth=1,
composite_rules=CompositeRules(
all_dims=[0],
all_split_rules=[SplitRule(grow_dim=0, grow_val=0, operator=Operator.le)],
),
val=-10,
)
@pytest.fixture
def l2_non_growable():
return LeafNode(
depth=2,
composite_rules=CompositeRules(
all_dims=[0],
all_split_rules=[
SplitRule(grow_dim=0, grow_val=1.5, operator=Operator.le),
SplitRule(grow_dim=0, grow_val=0, operator=Operator.gt),
],
),
val=15,
)
@pytest.fixture
def r2_growable():
return LeafNode(
depth=2,
composite_rules=CompositeRules(
all_dims=[0],
all_split_rules=[
SplitRule(grow_dim=0, grow_val=1.5, operator=Operator.gt),
SplitRule(grow_dim=0, grow_val=0, operator=Operator.gt),
],
),
val=15,
)
@pytest.fixture
def r1_grown(r2_growable, l2_non_growable):
return SplitNode(
depth=1,
left_child=l2_non_growable,
right_child=r2_growable,
composite_rules=CompositeRules(
all_dims=[0],
all_split_rules=[SplitRule(grow_dim=0, grow_val=0, operator=Operator.gt)],
),
)
@pytest.fixture
def root(l1_non_growable, r1_grown):
return SplitNode(
depth=0,
left_child=l1_non_growable,
right_child=r1_grown,
composite_rules=CompositeRules(all_dims=[0]),
)
@pytest.fixture
def tree(root, r1_grown, l1_non_growable, r2_growable, l2_non_growable):
"""
root_node
/\
(x1 <= 0)l1 r1 (x1 > 0)
/ \
(x1 <= 1.5) l2 r2 (x1 > 1.5)
The tree is made such that all positive input gets a positive prediciton and vice-versa.
"""
tree_ = Tree(nodes=[root, l1_non_growable, r1_grown, l2_non_growable, r2_growable])
return tree_
def test_num_nodes(tree):
assert tree.num_nodes() == 5
def test_leaf_split_nodes(tree):
for node in tree.split_nodes():
assert isinstance(node, SplitNode)
for node in tree.split_nodes():
assert isinstance(node, SplitNode)
def test_prunable_split_nodes(tree):
for node in tree.prunable_split_nodes():
assert isinstance(node.left_child, LeafNode)
assert isinstance(node.left_child, LeafNode)
assert len(tree.prunable_split_nodes()) == tree.num_prunable_split_nodes()
def test_growable_leaves(tree, r2_growable, l1_non_growable, X):
assert tree.num_growable_leaf_nodes(X) == 1
growable_leaves = tree.growable_leaf_nodes(X)
assert len(tree.growable_leaf_nodes(X)) == len(growable_leaves)
assert r2_growable in growable_leaves
assert l1_non_growable not in growable_leaves
assert l2_non_growable not in growable_leaves
def test_prediction(tree, X):
for x1 in X:
x1 = x1.reshape(1, 1)
assert float(x1) * tree.predict(x1) >= 0
def test_mutate_prune(tree, root, l1_non_growable, r1_grown):
old_tree_len = tree.num_nodes()
pruned_r1 = SplitNode.prune_node(r1_grown)
# pruning an internal node
with pytest.raises(PruneError):
_ = PruneMutation(old_node=root, new_node=l1_non_growable)
mutation = PruneMutation(old_node=r1_grown, new_node=pruned_r1)
tree.mutate(mutation)
assert tree.num_nodes() == old_tree_len - 2
def test_mutate_grow(tree, r2_growable):
old_tree_len = tree.num_nodes()
l3 = LeafNode(
depth=3,
composite_rules=CompositeRules(
all_dims=[0],
all_split_rules=[SplitRule(grow_dim=0, grow_val=3, operator=Operator.le)],
),
val=15,
)
r3 = LeafNode(
depth=3,
composite_rules=CompositeRules(
all_dims=[0],
all_split_rules=[SplitRule(grow_dim=0, grow_val=1.5, operator=Operator.gt)],
),
val=15,
)
r2_grown = SplitNode(
depth=2,
left_child=l3,
right_child=r3,
composite_rules=CompositeRules(
all_dims=[0],
all_split_rules=[SplitRule(grow_dim=0, grow_val=1.5, operator=Operator.gt)],
),
)
# growing an internal node
with pytest.raises(GrowError):
_ = GrowMutation(old_node=r2_grown, new_node=r2_growable)
mutation = GrowMutation(old_node=r2_growable, new_node=r2_grown)
tree.mutate(mutation)
assert tree.num_nodes() == old_tree_len + 2
| beanmachine-main | tests/ppl/experimental/bart/bart_tree_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import deepcopy
import pytest
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.exceptions import (
PruneError,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.node import (
BaseNode,
LeafNode,
SplitNode,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.split_rule import (
CompositeRules,
Operator,
SplitRule,
)
@pytest.fixture
def composite_rule():
all_rules = []
all_dims = [0, 1, 2]
for dim in all_dims:
all_rules.append(SplitRule(grow_dim=dim, grow_val=0, operator=Operator.le))
composite_rule = CompositeRules(all_dims=all_dims, all_split_rules=all_rules)
return composite_rule
@pytest.fixture
def left_rule():
return SplitRule(grow_dim=0, grow_val=-0.5, operator=Operator.le)
@pytest.fixture
def right_rule():
return SplitRule(grow_dim=0, grow_val=-0.5, operator=Operator.gt)
@pytest.fixture
def all_pass_composite_rule():
all_rules = []
all_dims = [0, 1, 2]
for dim in all_dims:
all_rules.append(
SplitRule(grow_dim=dim, grow_val=float("inf"), operator=Operator.le)
)
composite_rule = CompositeRules(all_dims=all_dims, all_split_rules=all_rules)
return composite_rule
@pytest.fixture
def X():
return torch.Tensor([[1.0, 3.0, 7.0], [-1.1, -1, -5]])
def test_conditioning(X, composite_rule):
base_node = BaseNode(depth=0, composite_rules=composite_rule)
assert torch.all(
base_node.data_in_node(X) == X[composite_rule.condition_on_rules(X)]
)
def test_leaf_node_prediction(composite_rule):
val = 10
leaf_node = LeafNode(composite_rules=composite_rule, depth=0, val=val)
assert leaf_node.predict() == val
@pytest.fixture
def leaf_node(composite_rule):
return LeafNode(composite_rules=composite_rule, depth=0)
@pytest.fixture
def loose_leaf(all_pass_composite_rule):
return LeafNode(composite_rules=all_pass_composite_rule, depth=0)
def test_growable_dims(leaf_node, loose_leaf, X):
assert leaf_node.get_num_growable_dims(X) == 0 # only one row of X passes the test
assert loose_leaf.get_num_growable_dims(X) == X.shape[-1] # everything passes
assert len(loose_leaf.get_growable_dims(X)) == loose_leaf.get_num_growable_dims(X)
def test_is_grow(leaf_node, loose_leaf, X):
assert not leaf_node.is_growable(X) # no splittable_dims. Cannot grow.
assert loose_leaf.is_growable(X)
def test_grow_node(leaf_node, left_rule, right_rule, X):
grown_leaf = LeafNode.grow_node(
leaf_node, left_rule=left_rule, right_rule=right_rule
)
assert isinstance(grown_leaf, SplitNode)
assert grown_leaf.left_child is not None
assert grown_leaf.right_child is not None
assert grown_leaf.most_recent_rule() == left_rule
def test_prune_node(leaf_node, composite_rule):
split_node = SplitNode(
left_child=leaf_node,
right_child=deepcopy(leaf_node),
depth=1,
composite_rules=composite_rule,
)
grandfather_node = SplitNode(
left_child=leaf_node,
right_child=split_node,
depth=0,
composite_rules=composite_rule,
)
assert split_node.is_prunable()
assert not grandfather_node.is_prunable()
assert isinstance(SplitNode.prune_node(split_node), LeafNode)
with pytest.raises(PruneError):
SplitNode.prune_node(grandfather_node)
def test_partition_of_split(loose_leaf, X):
grow_val = X[0, 0]
growable_vals = loose_leaf.get_growable_vals(X=X, grow_dim=0)
assert torch.isclose(
torch.tensor(
[loose_leaf.get_partition_of_split(X=X, grow_dim=0, grow_val=grow_val)]
),
torch.mean(
(growable_vals == grow_val.item()).to(torch.float), dtype=torch.float
),
)
| beanmachine-main | tests/ppl/experimental/bart/bart_node_test.py |
beanmachine-main | tests/ppl/experimental/bart/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.exceptions import (
PruneError,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.grow_prune_tree_proposer import (
GrowPruneTreeProposer,
MutationKind,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.node import (
LeafNode,
SplitNode,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.split_rule import (
CompositeRules,
Operator,
SplitRule,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.tree import Tree
@pytest.fixture(autouse=True)
def seed():
torch.manual_seed(5)
@pytest.fixture
def X():
return torch.Tensor([[3.0, 1.0], [4.0, 1.0], [1.5, 1.0], [-1.0, 1.0]])
@pytest.fixture
def root_node(X):
return SplitNode(
depth=0,
composite_rules=CompositeRules(all_dims=list(range(X.shape[-1]))),
)
@pytest.fixture
def single_node_tree(X):
leaf_root = LeafNode(
depth=0,
composite_rules=CompositeRules(all_dims=list(range(X.shape[-1]))),
)
tree_ = Tree(nodes=[leaf_root])
return tree_
@pytest.fixture
def r1_growable(X):
return LeafNode(
depth=1,
composite_rules=CompositeRules(
all_dims=list(range(X.shape[-1])),
all_split_rules=[SplitRule(grow_dim=0, grow_val=0, operator=Operator.gt)],
),
val=-10,
)
@pytest.fixture
def l1_non_growable(X):
return LeafNode(
depth=1,
composite_rules=CompositeRules(
all_dims=list(range(X.shape[-1])),
all_split_rules=[SplitRule(grow_dim=0, grow_val=0, operator=Operator.le)],
),
val=-10,
)
@pytest.fixture
def single_layer_tree(root_node, r1_growable, l1_non_growable):
"""
root_node
/\
(x1 <= 0)l1 r1 (x1 > 0)
The tree is made such that all positive input gets a positive prediciton and vice-versa.
"""
root_node._left_child = l1_non_growable
root_node._right_child = r1_growable
tree_ = Tree(nodes=[root_node, l1_non_growable, r1_growable])
return tree_
@pytest.fixture
def l2_non_growable(X):
return LeafNode(
depth=2,
composite_rules=CompositeRules(
all_dims=list(range(X.shape[-1])),
all_split_rules=[SplitRule(grow_dim=0, grow_val=3, operator=Operator.le)],
),
val=-10,
)
@pytest.fixture
def r2_growable(X):
return LeafNode(
depth=1,
composite_rules=CompositeRules(
all_dims=list(range(X.shape[-1])),
all_split_rules=[SplitRule(grow_dim=0, grow_val=0, operator=Operator.gt)],
),
val=-10,
)
@pytest.fixture
def r1_grown(X):
return SplitNode(
depth=1,
composite_rules=CompositeRules(
all_dims=list(range(X.shape[-1])),
all_split_rules=[SplitRule(grow_dim=0, grow_val=3, operator=Operator.gt)],
),
)
@pytest.fixture
def double_layer_tree(
root_node, r1_grown, l1_non_growable, r2_growable, l2_non_growable
):
"""
root_node
/\
(x1 <= 0)l1 r1 (x1 > 0)
/\
(<=3)l2 r2 (>3)
"""
root_node._left_child = l1_non_growable
root_node._right_child = r1_grown
r1_grown._left_child = l2_non_growable
r1_grown._right_child = r2_growable
tree_ = Tree(
nodes=[root_node, l1_non_growable, r1_grown, l2_non_growable, r2_growable]
)
return tree_
@pytest.fixture
def proposer():
return GrowPruneTreeProposer()
def test_new_mutation(proposer, single_node_tree, X):
assert proposer._get_new_mutation(X=X, tree=single_node_tree) == MutationKind.grow
def test_select_root_to_grow(proposer, single_node_tree, X):
assert (
proposer._select_leaf_to_grow(single_node_tree, X) == single_node_tree._nodes[0]
)
def test_select_leaf_to_grow(proposer, single_layer_tree, X, r1_growable):
assert proposer._select_leaf_to_grow(single_layer_tree, X) == r1_growable
def test_select_dim_to_grow(proposer, single_node_tree, X):
assert proposer._select_grow_dim(leaf_to_grow=single_node_tree._nodes[0], X=X) == 0
def test_select_node_to_prune(proposer, single_node_tree, double_layer_tree, r1_grown):
assert proposer._select_split_node_to_prune(tree=double_layer_tree) == r1_grown
with pytest.raises(PruneError):
_ = proposer._select_split_node_to_prune(tree=single_node_tree)
def test_propose(proposer, single_node_tree, X):
proposed_tree = proposer.propose(
tree=single_node_tree,
X=X,
partial_residual=torch.zeros(X.shape[0], 1),
alpha=0.5,
beta=0.5,
sigma_val=0.01,
leaf_mean_prior_scale=1,
)
assert isinstance(proposed_tree, Tree)
assert abs(proposed_tree.num_nodes() - single_node_tree.num_nodes()) in [
0,
2,
] # 2: grow or prune, 0 for no change
| beanmachine-main | tests/ppl/experimental/bart/bart_tree_proposer_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.bart_model import (
LeafMean,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.grow_from_root_tree_proposer import (
GrowFromRootTreeProposer,
SortedInvariants,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.node import LeafNode
from beanmachine.ppl.experimental.causal_inference.models.bart.split_rule import (
CompositeRules,
)
@pytest.fixture(autouse=True)
def seed():
torch.manual_seed(5)
@pytest.fixture
def gfr_proposer():
gfr = GrowFromRootTreeProposer()
gfr.num_cuts = 2
gfr.num_null_cuts = 1
return gfr
@pytest.fixture
def X():
return torch.Tensor([[3.0, 1.0], [4.0, 1.0], [1.5, 1.0], [-1.0, 1.0]])
@pytest.fixture
def w(X):
num_vars = X.shape[-1]
weights = torch.Tensor([1 / num_vars for _ in range(num_vars - 1)])
return weights
def test_sample_variables(gfr_proposer, w):
num_vars_to_sample = max(len(w) - 1, 1)
assert (
len(gfr_proposer._sample_variables(num_vars_to_sample, w)) == num_vars_to_sample
)
impossible_num_vars_to_sample = len(w) + 1
assert len(gfr_proposer._sample_variables(impossible_num_vars_to_sample, w)) == len(
w
)
def test_presort(gfr_proposer, X):
O_ = gfr_proposer._presort(X)
num_observations, num_dims = X.shape
for inp_dim in range(num_dims):
for obs in range(1, num_observations):
assert X[O_[inp_dim, obs - 1], inp_dim] <= X[O_[inp_dim, obs], inp_dim]
def test_get_uniq_elems(gfr_proposer, X):
O_ = gfr_proposer._presort(X)
uniq_vals, val_counts = gfr_proposer._get_uniq_elems(X=X, O_=O_)
num_observations, num_dims = X.shape
for inp_dim in range(num_dims):
dim_val_counts = val_counts[inp_dim]
assert sum(dim_val_counts.values()) == num_observations
for id_, uniq_val in enumerate(uniq_vals[inp_dim]):
assert dim_val_counts[uniq_val] > 0
if id_ > 0:
assert uniq_val >= uniq_vals[inp_dim][id_ - 1]
assert set(uniq_vals[inp_dim]) == {_.item() for _ in X[:, inp_dim]}
@pytest.fixture
def invariants(gfr_proposer, X):
O_ = gfr_proposer._presort(X)
uniq_vals, val_counts = gfr_proposer._get_uniq_elems(X=X, O_=O_)
return SortedInvariants(O_=O_, uniq_vals=uniq_vals, val_counts=val_counts)
def test_select_cutpoints(gfr_proposer, X, invariants):
num_observations, num_dims = X.shape
cutpoints = gfr_proposer._select_cutpoints(
candidate_dims=list(range(num_dims)), uniq_vals=invariants.uniq_vals
)
num_dim_cuts = 0
for point_id, point in enumerate(cutpoints):
assert (
point.cut_val < invariants.uniq_vals[point.dim][-1]
) # no degenerate splits
if point_id > 0 and cutpoints[point_id - 1].dim == point.dim:
assert cutpoints[point_id - 1].cut_val < point.cut_val
num_dim_cuts += 1
elif point_id > 0 and cutpoints[point_id - 1].dim != point.dim:
assert num_dim_cuts <= gfr_proposer.num_cuts
num_dim_cuts = 0
else:
num_dim_cuts += 1
@pytest.fixture
def partial_residual(X):
return torch.ones((len(X), 1)) * 0.2
@pytest.fixture
def sigma_val():
return 0.1
@pytest.fixture
def leaf_sampler():
return LeafMean(prior_loc=0.0, prior_scale=0.1)
@pytest.fixture
def current_node(X):
return LeafNode(
depth=0,
val=0.1,
composite_rules=CompositeRules(all_dims=list(range(X.shape[-1]))),
)
@pytest.fixture
def alpha():
return 0.95
@pytest.fixture
def beta():
return 1.25
@pytest.fixture
def cut_points(gfr_proposer, invariants):
num_dims = invariants.O_.shape[0]
return gfr_proposer._select_cutpoints(
candidate_dims=list(range(num_dims)), uniq_vals=invariants.uniq_vals
)
def test_sample_cut_point(
gfr_proposer,
X,
invariants,
cut_points,
partial_residual,
sigma_val,
leaf_sampler,
current_node,
alpha,
beta,
):
num_observations, num_dims = X.shape
num_trials = 10
all_sampled_cutpoints = []
for _ in range(num_trials):
all_sampled_cutpoints.append(
gfr_proposer._sample_cut_point(
candidate_cut_points=cut_points,
partial_residual=partial_residual,
invariants=invariants,
sigma_val=sigma_val,
leaf_sampler=leaf_sampler,
current_node=current_node,
alpha=alpha,
beta=beta,
)
)
for point in all_sampled_cutpoints:
if point is not None:
assert point in cut_points
def test_sift(
gfr_proposer,
X,
invariants,
cut_points,
partial_residual,
sigma_val,
leaf_sampler,
current_node,
alpha,
beta,
):
cut_point = gfr_proposer._sample_cut_point(
candidate_cut_points=cut_points,
partial_residual=partial_residual,
invariants=invariants,
sigma_val=sigma_val,
leaf_sampler=leaf_sampler,
current_node=current_node,
alpha=alpha,
beta=beta,
)
left_invariants, right_invariants = gfr_proposer._sift(
X=X, cut_point=cut_point, invariants=invariants
)
assert (
invariants.O_.shape[0] == left_invariants.O_.shape[0]
and invariants.O_.shape[0] == right_invariants.O_.shape[0]
) # num dims shouldnt change
assert (
invariants.O_.shape[1]
== left_invariants.O_.shape[1] + right_invariants.O_.shape[1]
)
for dim in range(invariants.O_.shape[0]):
assert set(invariants.uniq_vals[dim]) == set(
left_invariants.uniq_vals[dim]
).union(set(right_invariants.uniq_vals[dim]))
for val in invariants.uniq_vals[dim]:
assert (
invariants.val_counts[dim][val]
== left_invariants.val_counts[dim][val]
+ right_invariants.val_counts[dim][val]
)
def test_propose(
X,
invariants,
cut_points,
partial_residual,
sigma_val,
leaf_sampler,
current_node,
alpha,
beta,
w,
):
proposer = GrowFromRootTreeProposer()
tree_, variable_counts = proposer.propose(
X=X,
partial_residual=partial_residual,
m=X.shape[-1],
w=w,
sigma_val=sigma_val,
leaf_sampler=leaf_sampler,
alpha=alpha,
beta=beta,
root_node=current_node,
num_cuts=2,
num_null_cuts=1,
)
all_leaves = tree_.leaf_nodes()
assert len(all_leaves) > 0
if len(all_leaves) > 0:
assert sum(variable_counts) > 0
assert tree_.predict(X).shape == partial_residual.shape
| beanmachine-main | tests/ppl/experimental/bart/xbart_grow_from_root_proposer_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.bart_model import (
BART,
XBART,
)
@pytest.fixture
def X():
return torch.Tensor([[3.0, 1.0], [4.0, 1.0], [1.5, 1.0], [-1.0, 1.0]])
@pytest.fixture
def y(X):
return X[:, 0] + X[:, 1]
@pytest.fixture
def bart(X, y):
return BART(num_trees=1).fit(X=X, y=y, num_burn=1, num_samples=39)
@pytest.fixture
def X_test():
return torch.Tensor([[3.1, 2.5]])
@pytest.fixture
def y_test(X_test):
return X_test[:, 0] + X_test[:, 1]
def test_predict(X_test, y_test, bart):
y_pred = bart.predict(X_test)
assert len(X_test) == len(y_pred)
assert len(y_test) == len(y_pred)
def test_predict_with_quantiles_bart(X_test, bart):
quantiles = torch.Tensor([0.5])
y_pred, qvals = bart.predict_with_quantiles(X_test, quantiles=quantiles)
posterior_samples = bart.get_posterior_predictive_samples(X_test)
# median for even number of samples is not unique
assert (1 - bart.num_samples % 2) or torch.all(
torch.median(posterior_samples, dim=1)[0] == qvals
)
@pytest.fixture
def xbart(X, y):
return XBART(num_trees=1).fit(X=X, y=y, num_burn=1, num_samples=9)
def test_predict_xbart(X_test, y_test, xbart):
y_pred = xbart.predict(X_test)
assert len(X_test) == len(y_pred)
assert len(y_test) == len(y_pred)
def test_predict_with_quantiles_xbart(X_test, xbart):
quantiles = torch.Tensor([0.5])
y_pred, qvals = xbart.predict_with_quantiles(X_test, quantiles=quantiles)
posterior_samples = xbart.get_posterior_predictive_samples(X_test)
# median for even number of samples is not unique
assert (1 - xbart.num_samples % 2) or torch.all(
torch.median(posterior_samples, dim=1)[0] == qvals
)
| beanmachine-main | tests/ppl/experimental/bart/bart_model_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
from beanmachine.ppl.experimental.gp import (
bm_sample_from_prior,
make_prior_random_variables,
)
from beanmachine.ppl.experimental.gp.models import BoTorchGP, SimpleGP
from botorch.posteriors.gpytorch import GPyTorchPosterior
from gpytorch import kernels, likelihoods
from gpytorch.means import ConstantMean
from gpytorch.priors import GammaPrior, UniformPrior
class ModelTest(unittest.TestCase):
def setUp(self):
x = torch.randn(3, 1)
y = torch.randn(3)
mean = ConstantMean(constant_prior=UniformPrior(-1, 1))
kernel = kernels.MaternKernel(lengthscale_prior=GammaPrior(0.5, 0.5))
lik = likelihoods.GaussianLikelihood()
self.model = SimpleGP(x, y, mean, kernel, lik)
self.bo_model = BoTorchGP(x, y, mean, kernel, lik)
self.name_to_rv = make_prior_random_variables(self.model)
@bm.random_variable
def y():
sampled_model = bm_sample_from_prior(
self.model.to_pyro_random_module(),
self.name_to_rv,
)
return sampled_model.likelihood(sampled_model(x))
self.y = y
def test_infer(self):
self.model.train()
bm.GlobalNoUTurnSampler().infer(
list(self.name_to_rv.values()), {}, num_samples=2, num_chains=1
)
def test_load_and_predict(self):
self.model.eval()
d = {
"kernel.lengthscale_prior": torch.ones(1),
"mean.mean_prior": torch.tensor(1.0),
}
self.model.bm_load_samples(d)
assert self.model.kernel.lengthscale.item() == 1.0
assert isinstance(self.model(torch.randn(3, 1)), dist.MultivariateNormal)
def test_posterior(self):
self.bo_model.eval()
d = {
"kernel.lengthscale_prior": torch.ones(1),
"mean.mean_prior": torch.tensor(1.0),
}
self.bo_model.bm_load_samples(d)
assert isinstance(self.bo_model.posterior(torch.randn(3, 1)), GPyTorchPosterior)
obs_noise = torch.ones(1, 1)
mvn = self.bo_model.posterior(torch.randn(3, 1), obs_noise)
assert isinstance(mvn, GPyTorchPosterior)
| beanmachine-main | tests/ppl/experimental/gp/models_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import unittest
import beanmachine.ppl as bm
import gpytorch
import torch
from beanmachine.ppl.experimental.gp import (
bm_sample_from_prior,
make_prior_random_variables,
)
from beanmachine.ppl.experimental.gp.models import SimpleGP
from gpytorch import likelihoods
from gpytorch.distributions import MultivariateNormal
from gpytorch.kernels import PeriodicKernel, ScaleKernel
from gpytorch.priors import UniformPrior
class Regression(SimpleGP):
def __init__(self, x_train, y_train, kernel, likelihood, *args, **kwargs):
mean = gpytorch.means.ConstantMean()
super().__init__(x_train, y_train, mean, kernel, likelihood)
def forward(self, data):
if data.dim() > 2:
data_shape = data.shape[1]
else:
data_shape = data.shape[0]
jitter = torch.eye(data_shape, data_shape)
for _ in range(data.dim() - 1):
jitter = jitter.unsqueeze(0)
mean = self.mean(data)
cov = self.kernel(data) + jitter
if cov.dim() > mean.dim() + 1:
cov = cov.squeeze(0)
return MultivariateNormal(mean, cov)
class InferenceTests(unittest.TestCase):
def test_simple_regression(self):
torch.manual_seed(1)
n_samples = 100
x_train = torch.linspace(0, 1, 10)
y_train = torch.sin(x_train * (2 * math.pi))
kernel = ScaleKernel(
base_kernel=PeriodicKernel(
period_length_prior=UniformPrior(0.5, 1.5),
lengthscale_prior=UniformPrior(0.01, 1.5),
),
outputscale_prior=UniformPrior(0.01, 2.0),
)
likelihood = likelihoods.GaussianLikelihood()
likelihood.noise = 1e-4
gp = Regression(x_train, y_train, kernel, likelihood)
name_to_rv = make_prior_random_variables(gp)
@bm.random_variable
def y():
sampled_model = bm_sample_from_prior(gp.to_pyro_random_module(), name_to_rv)
return sampled_model.likelihood(sampled_model(x_train))
queries = list(name_to_rv.values())
obs = {y(): y_train}
samples = bm.GlobalNoUTurnSampler(nnc_compile=False).infer(
queries, obs, n_samples, num_chains=1
)
# get predictives
x_test = torch.linspace(0, 1, 21).unsqueeze(-1)
y_test = torch.sin(x_test * (2 * math.pi)).squeeze(0)
gp.eval()
s = samples.get_chain(0)
lengthscale_samples = s[name_to_rv["kernel.base_kernel.lengthscale_prior"]]
outputscale_samples = s[name_to_rv["kernel.outputscale_prior"]]
period_length_samples = s[name_to_rv["kernel.base_kernel.period_length_prior"]]
gp.pyro_load_from_samples(
{
"kernel.outputscale_prior": outputscale_samples,
"kernel.base_kernel.lengthscale_prior": lengthscale_samples,
"kernel.base_kernel.period_length_prior": period_length_samples,
}
)
expanded_x_test = x_test.unsqueeze(0).repeat(n_samples, 1, 1)
output = gp.likelihood(gp(expanded_x_test.detach()))
assert (
(y_test.squeeze() - output.mean.squeeze().mean(0)).abs().mean() < 1.0
).item()
| beanmachine-main | tests/ppl/experimental/gp/inference_test.py |
beanmachine-main | tests/ppl/experimental/gp/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch.distributions as dist
from beanmachine.ppl.world.utils import get_default_transforms, initialize_value
def test_get_default_transforms():
bernoulli = dist.Bernoulli(0.1)
transforms = get_default_transforms(bernoulli)
assert dist.transforms.identity_transform == transforms
normal = dist.Normal(0, 1)
transforms = get_default_transforms(normal)
assert dist.transforms.identity_transform == transforms
gamma = dist.Gamma(1, 1)
transforms = get_default_transforms(gamma)
assert transforms.bijective
def test_initialize_value():
distribution = dist.Normal(0, 1)
value = initialize_value(distribution)
assert value.item() == pytest.approx(0, abs=1e-5)
first_sample = initialize_value(distribution, True)
second_sample = initialize_value(distribution, True)
assert first_sample.item() != second_sample.item()
| beanmachine-main | tests/ppl/world/utils_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
from beanmachine.ppl.world import World
class SampleModel:
@bm.random_variable
def foo(self):
return dist.Uniform(0.0, 1.0)
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), 1.0)
class DiscreteModel:
@bm.random_variable
def foo(self):
return dist.Categorical(torch.ones(3))
@bm.random_variable
def bar(self):
return dist.Normal(self.foo().float(), torch.tensor(1.0))
class DynamicModel:
@bm.random_variable
def foo(self):
return dist.Bernoulli(0.5)
@bm.random_variable
def bar(self, i: int):
return dist.Normal(0.0, 1.0)
@bm.random_variable
def baz(self):
mu = self.bar(int(self.foo()))
return dist.Normal(mu, 1.0)
class ChangeSupportModel:
@bm.random_variable
def foo(self):
return dist.Bernoulli(0.3)
@bm.random_variable
def bar(self):
if self.foo():
return dist.Categorical(logits=torch.rand((3,)))
else:
return dist.Normal(0.0, 1.0)
@bm.random_variable
def baz(self):
return dist.Bernoulli(self.foo())
def test_basic_operations():
model = SampleModel()
observations = {model.bar(): torch.rand(())}
world = World(observations=observations)
assert world.observations == observations
assert len(world.latent_nodes) == 0
assert len(world) == 0
with world:
model.bar() # this will add bar() and its parent foo() to world
assert len(world) == 2
assert model.bar() in world
assert world.latent_nodes == {model.foo()}
# edge connection
assert model.foo() in world.get_variable(model.bar()).parents
assert model.bar() in world.get_variable(model.foo()).children
assert len(world.get_variable(model.bar()).children) == 0
assert len(world.get_variable(model.foo()).parents) == 0
assert world.get_variable(model.foo()).value == world.call(model.foo())
def test_initialization():
model = SampleModel()
with World():
val1 = model.bar()
with World():
val2 = model.bar()
assert val1 != val2
def test_log_prob():
model = SampleModel()
world1 = World(observations={model.foo(): torch.tensor(0.0)})
world1.call(model.bar())
log_prob1 = world1.log_prob()
# set to a value with extremely small probability
world2 = world1.replace({model.bar(): torch.tensor(100.0)})
log_prob2 = world2.log_prob()
assert log_prob1 > log_prob2
def test_enumerate():
model = DiscreteModel()
world = World(observations={model.bar(): torch.tensor(0.0)})
with world:
model.bar()
assert (torch.tensor([0.0, 1.0, 2.0]) == world.enumerate_node(model.foo())).all()
def test_change_parents():
model = DynamicModel()
world = World(initialize_fn=lambda d: torch.zeros_like(d.sample()))
with world:
model.baz()
assert model.foo() in world.get_variable(model.baz()).parents
assert model.bar(0) in world.get_variable(model.baz()).parents
assert model.bar(1) not in world.get_variable(model.baz()).parents
assert model.baz() in world.get_variable(model.bar(0)).children
world2 = world.replace({model.foo(): torch.tensor(1.0)})
assert model.bar(0) not in world2.get_variable(model.baz()).parents
assert model.bar(1) in world2.get_variable(model.baz()).parents
assert model.baz() in world2.get_variable(model.bar(1)).children
assert model.baz() not in world2.get_variable(model.bar(0)).children
def test_distribution_and_log_prob_update():
model = ChangeSupportModel()
with World(observations={model.baz(): torch.tensor(1.0)}) as world:
model.bar()
model.baz()
world = world.replace({model.foo(): torch.tensor(0.0)})
world2 = world.replace({model.foo(): torch.tensor(1.0)})
bar_var = world.get_variable(model.bar())
assert isinstance(bar_var.distribution, dist.Normal)
bar_var2 = world2.get_variable(model.bar())
assert isinstance(bar_var2.distribution, dist.Categorical)
# verify that the children's log prob is recomputed when foo gets updated
baz_var = world.get_variable(model.baz()) # Bernoulli(0.0)
baz_var2 = world2.get_variable(model.baz()) # Bernoulli(1.0)
# recall that baz() is observed to be 1.0
assert baz_var.log_prob < baz_var2.log_prob
| beanmachine-main | tests/ppl/world/world_test.py |
beanmachine-main | tests/ppl/world/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.world.initialize_fn import init_from_prior, init_to_uniform
@pytest.mark.parametrize("init_fn", [init_from_prior, init_to_uniform])
@pytest.mark.parametrize(
"distribution",
[
dist.Uniform(0.0, 1.0),
dist.Normal(0.0, 1.0).expand((3,)),
dist.Bernoulli(0.5),
dist.Exponential(1.0),
dist.Dirichlet(torch.tensor([0.5, 0.5])),
dist.Categorical(logits=torch.randn(5, 10)),
dist.Bernoulli(0.5).expand((3, 5, 7)),
dist.Poisson(rate=2.0),
],
)
def test_initialize_validness(init_fn, distribution):
value = init_fn(distribution)
# make sure values are initialize within the constraint
assert torch.all(distribution.support.check(value))
assert not torch.any(torch.isnan(distribution.log_prob(value)))
assert value.size() == distribution.sample().size()
| beanmachine-main | tests/ppl/world/initialize_fn_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.world.variable import Variable
def test_log_prob():
var1 = Variable(value=torch.zeros(3), distribution=dist.Bernoulli(0.8))
# verify that the cached property `log_prob` is recomputed when we replace the
# fields of a Variable
var2 = var1.replace(value=torch.ones(3))
assert var1.log_prob.sum() < var2.log_prob.sum()
var3 = var1.replace(distribution=dist.Normal(0.0, 1.0))
assert var1.log_prob.sum() < var3.log_prob.sum()
# Expects an error here because support doesn't match
var4 = var1.replace(distribution=dist.Categorical(logits=torch.rand(2, 4)))
with pytest.raises(RuntimeError):
var4.log_prob
var5 = Variable(
value=torch.tensor(10).double(),
distribution=dist.Uniform(
torch.tensor(0.0).double(), torch.tensor(1.0).double()
),
)
# Check that the log prob has the right dtype
assert var5.log_prob.dtype == torch.double
assert torch.isinf(var5.log_prob)
var6 = var5.replace(value=torch.tensor(1))
assert torch.isinf(var6.log_prob)
| beanmachine-main | tests/ppl/world/variable_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from beanmachine.ppl.utils import tensorops
class TensorOpsTest(unittest.TestCase):
def test_gradients(self) -> None:
for type_ in [torch.float32, torch.float64]:
x = torch.randn(3, requires_grad=True, dtype=type_)
prec = torch.Tensor([[1, 0.1, 0], [0.1, 2, 0.5], [0, 0.5, 3]]).to(type_)
mu = torch.randn(3, dtype=type_)
# first gradient is `-(x - mu) @ prec`, second gradient is `- prec`
f = -(x - mu) @ prec @ (x - mu) / 2
grad, hess = tensorops.gradients(f, x)
self.assertTrue(grad.allclose(-(x - mu) @ prec))
self.assertTrue(hess.allclose(-prec))
self.assertEqual(grad.dtype, type_, "gradient dtype must match input")
self.assertEqual(hess.dtype, type_, "hessian dtype must match input")
def test_simplex_gradients(self) -> None:
for type_ in [torch.float32, torch.float64]:
x = torch.randn(3, requires_grad=True, dtype=type_)
prec = torch.Tensor([[1, 0.1, 0], [0.1, 2, 0.5], [0, 0.5, 3]]).to(type_)
prec_diag = torch.Tensor([1.0, 1.9, 3.0]).to(type_)
mu = torch.randn(3, dtype=type_)
# first gradient is `-(x - mu) @ prec`, second gradient is `- prec`
f = -(x - mu) @ prec @ (x - mu) / 2
grad, hess = tensorops.simplex_gradients(f, x)
self.assertTrue(grad.allclose(-(x - mu) @ prec))
self.assertTrue(hess.allclose(-prec_diag))
self.assertEqual(grad.dtype, type_, "gradient dtype must match input")
self.assertEqual(hess.dtype, type_, "hessian dtype must match input")
def test_halfspace_gradients(self) -> None:
for type_ in [torch.float32, torch.float64]:
x = torch.randn(3, requires_grad=True, dtype=type_)
prec = torch.Tensor([[1, 0.1, 0], [0.1, 2, 0.5], [0, 0.5, 3]]).to(type_)
prec_diag = torch.Tensor([1.0, 2.0, 3.0]).to(type_)
mu = torch.randn(3, dtype=type_)
# first gradient is `-(x - mu) @ prec`, second gradient is `- prec`
f = -(x - mu) @ prec @ (x - mu) / 2
grad, hess = tensorops.halfspace_gradients(f, x)
self.assertTrue(grad.allclose(-(x - mu) @ prec))
self.assertTrue(hess.allclose(-prec_diag))
self.assertEqual(grad.dtype, type_, "gradient dtype must match input")
self.assertEqual(hess.dtype, type_, "hessian dtype must match input")
def test_gradients_negative(self) -> None:
# output must have one element
x = torch.randn(3, requires_grad=True)
with self.assertRaises(ValueError) as cm:
tensorops.gradients(2 * x, x)
self.assertTrue(
"output tensor must have exactly one element" in str(cm.exception)
)
| beanmachine-main | tests/ppl/utils/tensorops_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from beanmachine.ppl.utils.set_of_tensors import SetOfTensors
from torch import tensor
class SetOfTensorsTest(unittest.TestCase):
def test_set_of_tensors_1(self) -> None:
self.maxDiff = None
# Show that we deduplicate these tensors.
t = [
tensor(1.0),
tensor([]),
tensor([1.0]),
tensor([1.0, 2.0]),
tensor([1.0, 2.0, 3.0, 4.0]),
tensor([[1.0]]),
tensor([[1.0], [2.0]]),
tensor([[1.0, 2.0]]),
tensor([[1.0, 2.0], [3.0, 4.0]]),
tensor(1.0),
tensor([]),
tensor([1.0]),
tensor([1.0, 2.0]),
tensor([1.0, 2.0, 3.0, 4.0]),
tensor([[1.0]]),
tensor([[1.0], [2.0]]),
tensor([[1.0, 2.0]]),
tensor([[1.0, 2.0], [3.0, 4.0]]),
]
s = SetOfTensors(t)
self.assertEqual(9, len(s))
observed = str(s)
expected = """
tensor(1.)
tensor([1., 2., 3., 4.])
tensor([1., 2.])
tensor([1.])
tensor([[1., 2.],
[3., 4.]])
tensor([[1., 2.]])
tensor([[1.],
[2.]])
tensor([[1.]])
tensor([])"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/utils/set_of_tensors_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from beanmachine.ppl.utils.multidictionary import MultiDictionary
class MultiDictionaryTest(unittest.TestCase):
def test_multidictionary(self) -> None:
d = MultiDictionary()
d.add(1, "alpha")
d.add(1, "bravo")
d.add(2, "charlie")
d.add(2, "delta")
self.assertEqual(2, len(d))
self.assertEqual(2, len(d[1]))
self.assertEqual(2, len(d[2]))
self.assertEqual(0, len(d[3]))
self.assertTrue("alpha" in d[1])
self.assertTrue("alpha" not in d[2])
expected = """
{1:{alpha,
bravo}
2:{charlie,
delta}}"""
self.assertEqual(expected.strip(), str(d).strip())
| beanmachine-main | tests/ppl/utils/multidictionary_test.py |
beanmachine-main | tests/ppl/utils/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for Graph from graph.py"""
import unittest
from beanmachine.ppl.utils.graph import Graph
class SimpleNode(object):
name: str
label: int
def __init__(self, name: str, label: int):
self.name = name
self.label = label
class GraphTest(unittest.TestCase):
def test_graph(self) -> None:
self.maxDiff = None
"""Tests for Graph from graph.py"""
g: Graph[str] = Graph(str, str)
p1 = g.with_plate()
p1.with_edge("a1", "a2").with_edge("a2", "a3")
p2 = p1.with_plate()
p2.with_edge("a0", "a1").with_edge("a3", "a0")
p3 = g.with_plate()
p3.with_edge("b0", "b1").with_edge("b1", "b2").with_edge("b2", "b3")
p3.with_edge("b2", "a3").with_edge("a1", "b3")
g.with_edge("start", "a0").with_edge("start", "b0")
g.with_edge("a3", "end").with_edge("b3", "end")
observed = g.to_dot()
expected = """
digraph "graph" {
a0;
a1;
a2;
a3;
b0;
b1;
b2;
b3;
end;
start;
a0 -> a1;
a1 -> a2;
a1 -> b3;
a2 -> a3;
a3 -> a0;
a3 -> end;
b0 -> b1;
b1 -> b2;
b2 -> a3;
b2 -> b3;
b3 -> end;
start -> a0;
start -> b0;
subgraph cluster__0 {
a1;
a2;
a3;
subgraph cluster__0_0 {
a0;
}
}
subgraph cluster__1 {
b0;
b1;
b2;
b3;
}
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_isomorphism(self) -> None:
self.maxDiff = None
# a1 b1 c1
# | |
# a2 b2
# / \ / \
# a5 s3 b5
# |
# s4
#
# a1 and b1 are isomorphic, a1 and c1 are not
a1 = SimpleNode("a1", 1)
b1 = SimpleNode("b1", 1)
c1 = SimpleNode("c1", 1)
a2 = SimpleNode("a2", 2)
a5 = SimpleNode("a5", 5)
b2 = SimpleNode("b2", 2)
b5 = SimpleNode("b5", 5)
s3 = SimpleNode("s3", 3)
s4 = SimpleNode("s4", 4)
g: Graph[SimpleNode] = Graph(
lambda x: x.name, lambda x: str(x.label), lambda x: str(x.label)
)
g = g.with_edge(a1, a2).with_edge(a2, a5).with_edge(a2, s3)
g = g.with_edge(b1, b2).with_edge(b2, s3).with_edge(b2, b5)
g = g.with_edge(s3, s4)
g = g.with_node(c1)
self.assertTrue(g.are_dags_isomorphic(a1, b1))
self.assertTrue(g.are_dags_isomorphic(a2, b2))
self.assertFalse(g.are_dags_isomorphic(a1, c1))
self.assertFalse(g.are_dags_isomorphic(a1, b2))
reachable = ",".join(sorted(str(n.label) for n in g.reachable(b2)))
self.assertEqual(reachable, "2,3,4,5")
g.merge_isomorphic(a2, b2)
# After merging b2 into a2:
# a1 b1 c1
# \ /
# a2
# / | \
# a5 s3 b5
# |
# s4
observed = g.to_dot()
expected = """
digraph "graph" {
a1[label=1];
a2[label=2];
a5[label=5];
b1[label=1];
b5[label=5];
c1[label=1];
s3[label=3];
s4[label=4];
a1 -> a2;
a2 -> a5;
a2 -> b5;
a2 -> s3;
b1 -> a2;
s3 -> s4;
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_merge(self) -> None:
self.maxDiff = None
# s1
# / | \
# a2 b2 c2
# / \ / \ / \
# a3 a4 b3 b4
# | |
# a5 b5
#
# The three "2" nodes are isomorphic.
s1 = SimpleNode("s1", 1)
a2 = SimpleNode("a2", 2)
b2 = SimpleNode("b2", 2)
c2 = SimpleNode("c2", 2)
a3 = SimpleNode("a3", 3)
a4 = SimpleNode("a4", 4)
b3 = SimpleNode("b3", 3)
b4 = SimpleNode("b4", 4)
a5 = SimpleNode("a5", 5)
b5 = SimpleNode("b5", 5)
g: Graph[SimpleNode] = Graph(
lambda x: x.name, lambda x: str(x.label), lambda x: str(x.label)
)
g = g.with_edge(s1, a2).with_edge(s1, b2).with_edge(s1, c2)
g = g.with_edge(a2, a3).with_edge(a2, a4).with_edge(b2, a4)
g = g.with_edge(b2, b3).with_edge(c2, b3).with_edge(c2, b4)
g = g.with_edge(a4, a5).with_edge(b4, b5)
observed = g.to_dot()
expected = """
digraph "graph" {
a2[label=2];
a3[label=3];
a4[label=4];
a5[label=5];
b2[label=2];
b3[label=3];
b4[label=4];
b5[label=5];
c2[label=2];
s1[label=1];
a2 -> a3;
a2 -> a4;
a4 -> a5;
b2 -> a4;
b2 -> b3;
b4 -> b5;
c2 -> b3;
c2 -> b4;
s1 -> a2;
s1 -> b2;
s1 -> c2;
}
"""
self.assertEqual(observed.strip(), expected.strip())
g.merge_isomorphic_many([a2, b2, c2])
observed = g.to_dot()
# s1
# |
# a2
# / | | \
# a3 a4 b3 b4
# | |
# a5 b5
expected = """
digraph "graph" {
a2[label=2];
a3[label=3];
a4[label=4];
a5[label=5];
b3[label=3];
b4[label=4];
b5[label=5];
s1[label=1];
a2 -> a3;
a2 -> a4;
a2 -> b3;
a2 -> b4;
a4 -> a5;
b4 -> b5;
s1 -> a2;
}
"""
self.assertEqual(observed.strip(), expected.strip())
g.merge_isomorphic_children(a2)
# s1
# |
# a2
# / \
# a3 a4
# / \
# a5 b5
# Note that the isomorphic 5 nodes are not recursively merged.
observed = g.to_dot()
expected = """
digraph "graph" {
a2[label=2];
a3[label=3];
a4[label=4];
a5[label=5];
b5[label=5];
s1[label=1];
a2 -> a3;
a2 -> a4;
a4 -> a5;
a4 -> b5;
s1 -> a2;
}
"""
self.assertEqual(observed.strip(), expected.strip())
| beanmachine-main | tests/ppl/utils/graph_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for item_counter.py"""
import unittest
from beanmachine.ppl.utils.item_counter import ItemCounter
class ItemCounterTest(unittest.TestCase):
def test_item_counter(self) -> None:
i = ItemCounter()
self.assertTrue("a" not in i.items)
self.assertTrue("b" not in i.items)
i.add_item("a")
i.add_item("a")
i.add_item("b")
i.add_item("b")
self.assertEqual(i.items["a"], 2)
self.assertEqual(i.items["b"], 2)
i.remove_item("b")
i.remove_item("a")
i.remove_item("a")
self.assertTrue("a" not in i.items)
self.assertEqual(i.items["b"], 1)
| beanmachine-main | tests/ppl/utils/item_counter_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for print_tree from treeprinter.py"""
import unittest
from beanmachine.ppl.utils.treeprinter import print_tree
class TreePrinterTest(unittest.TestCase):
def test_print_tree(self) -> None:
"""Tests for print_tree from treeprinter.py"""
d = {"foo": 2, "bar": {"blah": [2, 3, {"abc": (6, 7, (5, 5, 6))}]}}
observed = print_tree(d, unicode=False)
expected = """dict
+-foo
| +-2
+-bar
+-blah
+-2
+-3
+-dict
+-abc
+-6
+-7
+-tuple
+-5
+-5
+-6
"""
self.assertEqual(observed, expected)
| beanmachine-main | tests/ppl/utils/treeprinter_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for memoize.py"""
import unittest
from beanmachine.ppl.utils.memoize import memoize
count1 = 0
def fib(n):
global count1
count1 = count1 + 1
return 1 if n <= 1 else fib(n - 1) + fib(n - 2)
count2 = 0
@memoize
def fib_mem(n):
global count2
count2 = count2 + 1
return 1 if n <= 1 else fib_mem(n - 1) + fib_mem(n - 2)
class MemoizeTest(unittest.TestCase):
"""Tests for memoize.py"""
def test_memoize(self) -> None:
"""Tests for memoize.py"""
global count1
global count2
f10 = fib(10)
self.assertEqual(f10, 89)
self.assertEqual(count1, 177)
f10 = fib_mem(10)
self.assertEqual(f10, 89)
self.assertEqual(count2, 11)
| beanmachine-main | tests/ppl/utils/memoize_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for partition functions from equivalence.py"""
import unittest
from typing import Any, Iterable
from beanmachine.ppl.utils.equivalence import partition_by_kernel, partition_by_relation
def _brace(s: str) -> str:
return "{" + s + "}"
def _comma(s: Iterable[str]) -> str:
return ",".join(s)
def _set_str(items: Iterable[Any]) -> str:
return _brace(_comma(sorted({str(item) for item in items})))
def _set_set_str(results: Iterable[Any]) -> str:
return _set_str([_set_str(eqv) for eqv in results])
class PartitionTest(unittest.TestCase):
def test_partition_(self) -> None:
"""Tests for partition_kernel from equivalence.py"""
def three_kernel(x: int) -> int:
return (x % 3 + 3) % 3
def three_relation(x: int, y: int) -> bool:
return (x - y) % 3 == 0
expected = """{{-1,-4,-7,2,5,8},{-2,-5,-8,1,4,7},{-3,-6,-9,0,3,6,9}}"""
s = set(range(-9, 10))
observed1 = _set_set_str(partition_by_relation(s, three_relation))
observed2 = _set_set_str(partition_by_kernel(s, three_kernel))
self.assertEqual(observed1, expected)
self.assertEqual(observed2, expected)
| beanmachine-main | tests/ppl/utils/equivalence_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for print_graph from dotbuilder.py"""
import unittest
from typing import Any, Dict
from beanmachine.ppl.utils.dotbuilder import DotBuilder, print_graph
class GraphPrinterTest(unittest.TestCase):
def test_print_tree(self) -> None:
"""Tests for print_graph from dotbuilder.py"""
bar = {"blah": [2, 3, {"abc": (6, 7, (5, 5, 6))}]}
d: Dict[Any, Any] = {"foo": 2, "bar1": bar, "bar2": bar}
d["self"] = d # type: ignore
observed = print_graph([d])
expected = """
digraph "graph" {
N0[label=dict];
N10[label=5];
N1[label=2];
N2[label=dict];
N3[label=list];
N4[label=3];
N5[label=dict];
N6[label=tuple];
N7[label=6];
N8[label=7];
N9[label=tuple];
N0 -> N0[label=self];
N0 -> N1[label=foo];
N0 -> N2[label=bar1];
N0 -> N2[label=bar2];
N2 -> N3[label=blah];
N3 -> N1[label=0];
N3 -> N4[label=1];
N3 -> N5[label=2];
N5 -> N6[label=abc];
N6 -> N7[label=0];
N6 -> N8[label=1];
N6 -> N9[label=2];
N9 -> N10[label=0];
N9 -> N10[label=1];
N9 -> N7[label=2];
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_builder(self) -> None:
self.maxDiff = None
db = DotBuilder("my_graph")
db.with_comment("comment")
db.start_subgraph("my_subgraph", True)
db.with_label("graph_label")
db.with_node("A1", "A")
db.with_node("A2", "A")
db.with_edge("A1", "A2", "edge_label")
db.end_subgraph()
observed = str(db)
expected = """
digraph my_graph {
// comment
subgraph cluster_my_subgraph {
label=graph_label
A1[label=A];
A2[label=A];
A1 -> A2[label=edge_label];
}
}
"""
self.assertEqual(observed.strip(), expected.strip())
| beanmachine-main | tests/ppl/utils/dotbuilder_test.py |
beanmachine-main | tests/ppl/testlib/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for hypothesis_testing.py"""
import unittest
from beanmachine.ppl.testlib.hypothesis_testing import (
inverse_chi2_cdf,
inverse_normal_cdf,
mean_equality_hypothesis_confidence_interval,
mean_equality_hypothesis_test,
variance_equality_hypothesis_confidence_interval,
variance_equality_hypothesis_test,
)
from torch import tensor
class HypothesisTestingTest(unittest.TestCase):
def test_hypothesis_test_inverse_normal_cdf(self) -> None:
"""Minimal test for inverse normal CDF used to calculate z values"""
# Check that the median has the probability we expect
median = inverse_normal_cdf(0.5)
self.assertEqual(
median, 0.0, msg="Unexpected value for median of normal distribution"
)
# Record and check the values we get for z_0.01
expected_z_one_percent = -2.3263478740408408
observed_z_one_percent = inverse_normal_cdf(0.01)
self.assertEqual(
observed_z_one_percent,
expected_z_one_percent,
msg="Expected value for z_0.01",
)
# Record and check the values we get for z_0.99
expected_z_99_percent = 2.3263478740408408
observed_z_99_percent = inverse_normal_cdf(1 - 0.01)
self.assertEqual(
observed_z_99_percent,
expected_z_99_percent,
msg="Expected value for z_0.99",
)
# Record and check the values we get for z_0.005
expected_z_half_percent = -2.575829303548901
observed_z_half_percent = inverse_normal_cdf(0.005)
self.assertEqual(
observed_z_half_percent,
expected_z_half_percent,
msg="Expected value for z_0.005",
)
# This example shows why 1-p can be problematic
# Compare this value to -expected_z_half_percent
expected_z_995_thousandths = 2.5758293035489004
observed_z_995_thousandths = inverse_normal_cdf(0.995)
self.assertTrue(
not (expected_z_995_thousandths == -expected_z_half_percent),
msg="Numerical z_p is usually not exactly -z_(1-p)",
)
self.assertEqual(
observed_z_995_thousandths,
expected_z_995_thousandths,
msg="Expected value for z_0.005",
)
def test_hypothesis_test_mean(self) -> None:
"""Minimal test for mean equality hypothesis test"""
sample_mean = tensor(10)
true_mean = tensor(0)
true_std = tensor(1)
sample_size = tensor(1)
p_value = 0.01
observed_result = mean_equality_hypothesis_test(
sample_mean, true_mean, true_std, sample_size, p_value
)
self.assertFalse(observed_result, msg="Mean is not within confidence interval")
sample_mean = tensor(0)
true_mean = tensor(0)
true_std = tensor(1)
sample_size = tensor(1)
p_value = 0.01
observed_result = mean_equality_hypothesis_test(
sample_mean, true_mean, true_std, sample_size, p_value
)
self.assertTrue(observed_result, msg="Mean is not within confidence interval")
# This test case is at the edge of acceptable.
# It should pass because of the = in <= in the
# mean_equality_hypothesis_test method
expected_z_995_thousandths = 2.5758293035489004
sample_mean = tensor(expected_z_995_thousandths)
true_mean = tensor(0)
true_std = tensor(1)
sample_size = tensor(1)
p_value = 0.01
observed_result = mean_equality_hypothesis_test(
sample_mean, true_mean, true_std, sample_size, p_value
)
self.assertTrue(observed_result, msg="Mean is not within confidence interval")
# The following two tests are pushing the edge case around what
# should be acceptable to the test. It is strange that the one
# slighly larger than the alpha value does not fail.
# TODO: Investigate and explain why this passes when it should be
# just outside the acceptable boundary.
expected_z_995_thousandths = 2.5758293035489004
sample_mean = tensor(expected_z_995_thousandths * 1.00000001)
true_mean = tensor(0)
true_std = tensor(1)
sample_size = tensor(1)
p_value = 0.01
observed_result = mean_equality_hypothesis_test(
sample_mean, true_mean, true_std, sample_size, p_value
)
self.assertTrue(observed_result, msg="Mean is not within confidence interval")
# This one, with bigger multiplierf, finally returns False
expected_z_995_thousandths = 2.5758293035489004
sample_mean = tensor(expected_z_995_thousandths * 1.0000001)
true_mean = tensor(0)
true_std = tensor(1)
sample_size = tensor(1)
p_value = 0.01
observed_result = mean_equality_hypothesis_test(
sample_mean, true_mean, true_std, sample_size, p_value
)
self.assertFalse(observed_result, msg="Mean is not within confidence interval")
def test_confidence_interval_mean(self) -> None:
"""Minimal test for mean confidence interval"""
sample_mean = tensor(2)
true_mean = tensor(0)
true_std = tensor(1)
sample_size = tensor(1)
p_value = 0.05
lower_bound, upper_bound = mean_equality_hypothesis_confidence_interval(
true_mean, true_std, sample_size, p_value
)
observed_result = lower_bound <= sample_mean <= upper_bound
self.assertFalse(observed_result, msg="Mean is not within confidence interval")
sample_mean = tensor(1.95)
true_mean = tensor(0)
true_std = tensor(1)
sample_size = tensor(1)
p_value = 0.05
lower_bound, upper_bound = mean_equality_hypothesis_confidence_interval(
true_mean, true_std, sample_size, p_value
)
observed_result = lower_bound <= sample_mean <= upper_bound
self.assertTrue(observed_result, msg="Mean is not within confidence interval")
def test_hypothesis_test_inverse_chi2_cdf(self) -> None:
"""Minimal test for inverse chi-squared CDF used to calculate chi2 values"""
# Check that the median has the probability we expect
# A rule of thumb for chi2 is that median is df-0.7
# in this test we pick a more specific value from test run
median = inverse_chi2_cdf(100, 0.5)
self.assertEqual(
median,
99.33412923598846,
msg="Unexpected value for median of chi square distribution",
)
# Record and check the values we get for chi2_0.01
# From C.M. Thompson tables from 1941, we expect 70.0648
# more specific value reflects results from test run
# NB: Test run appears to contradict least significant
# digit in table the table cited above, but not if we take
# into account p used for lookup in distribution
# is 0.990, which suggests only on 4 digits are valid.
expected_chi2_one_percent = 70.06489492539978
observed_chi2_one_percent = inverse_chi2_cdf(100, 0.01)
self.assertEqual(
observed_chi2_one_percent,
expected_chi2_one_percent,
msg="Unexpected value for chi2_0.01",
)
# Record and check the values we get for chi2_0.99
# Table above predicts 135.807
expected_chi2_99_percent = 135.80672317102676
observed_chi2_99_percent = inverse_chi2_cdf(100, 1 - 0.01)
self.assertEqual(
observed_chi2_99_percent,
expected_chi2_99_percent,
msg="Unexpected value for chi2_0.99",
)
# Record and check the values we get for chi2_0.005
# Table above predicts 67.3276
expected_chi2_half_percent = 67.32756330547916
observed_chi2_half_percent = inverse_chi2_cdf(100, 0.005)
self.assertEqual(
observed_chi2_half_percent,
expected_chi2_half_percent,
msg="Unexpected value for chi2_0.005",
)
def test_hypothesis_test_variance(self) -> None:
"""Minimal test for variance equality hypothesis test"""
# Based on solved example in Scheaffer & McClave, 1986, Pg 300
sample_std = tensor(0.0003) ** 0.5
true_std = tensor(0.0002) ** 0.5
degrees_of_freedom = tensor(9)
alpha = 0.05
observed_result = variance_equality_hypothesis_test(
sample_std, true_std, degrees_of_freedom, alpha
)
self.assertTrue(observed_result, msg="Variance is within confidence interval")
sample_std = tensor(0.002) ** 0.5
true_std = tensor(0.0002) ** 0.5
degrees_of_freedom = tensor(9)
alpha = 0.05
observed_result = variance_equality_hypothesis_test(
sample_std, true_std, degrees_of_freedom, alpha
)
self.assertFalse(
observed_result, msg="Variance is not within confidence interval"
)
# Based on lookup of chi-squared table values
# The interval for chi-square at p=0.1 split over both distribution ends is
# approximately [77.9, 124.3]
# First, we check the lower bound
sample_std = tensor(78.0 / 100.0) ** 0.5
true_std = tensor(1.0)
degrees_of_freedom = tensor(100)
alpha = 0.1
observed_result = variance_equality_hypothesis_test(
sample_std, true_std, degrees_of_freedom, alpha
)
self.assertTrue(observed_result, msg="Variance is within confidence interval")
sample_std = tensor(77.0 / 100.0) ** 0.5
true_std = tensor(1.0)
degrees_of_freedom = tensor(100)
alpha = 0.1
observed_result = variance_equality_hypothesis_test(
sample_std, true_std, degrees_of_freedom, alpha
)
self.assertFalse(
observed_result, msg="Variance is not within confidence interval"
)
# Second, we check the upper bound
sample_std = tensor(124.0 / 100.0) ** 0.5
true_std = tensor(1.0)
degrees_of_freedom = tensor(100)
alpha = 0.1
observed_result = variance_equality_hypothesis_test(
sample_std, true_std, degrees_of_freedom, alpha
)
self.assertTrue(observed_result, msg="Variance is within confidence interval")
sample_std = tensor(125.0 / 100.0) ** 0.5
true_std = tensor(1.0)
degrees_of_freedom = tensor(100)
alpha = 0.1
observed_result = variance_equality_hypothesis_test(
sample_std, true_std, degrees_of_freedom, alpha
)
self.assertFalse(
observed_result, msg="Variance is not within confidence interval"
)
def test_confidence_interval_variance(self) -> None:
"""Minimal test for variance confidence interval"""
true_std = tensor(1.0)
degrees_of_freedom = tensor(100)
alpha = 0.05
observed_interval = variance_equality_hypothesis_confidence_interval(
true_std, degrees_of_freedom, alpha
)
observed_lower, observed_upper = observed_interval
expected_std_lower1 = tensor(0.86)
expected_std_lower2 = tensor(0.87)
expected_std_upper1 = tensor(1.13)
expected_std_upper2 = tensor(1.14)
self.assertLessEqual(expected_std_lower1, observed_lower, "Lower bound too low")
self.assertLessEqual(
observed_lower, expected_std_lower2, "Lower bound too high"
)
self.assertLessEqual(expected_std_upper1, observed_upper, "Upper bound too low")
self.assertLessEqual(
observed_upper, expected_std_upper2, "Upper bound too high"
)
| beanmachine-main | tests/ppl/testlib/hypothesis_testing_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import unittest
import warnings
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
import torch.utils._pytree as pytree
@bm.random_variable
def foo():
return dist.Normal(torch.tensor(0.0), torch.tensor(1.0))
class RVIdentifierTest(unittest.TestCase):
class SampleModel:
@staticmethod
@bm.random_variable
def foo():
return dist.Normal(torch.tensor(0.0), torch.tensor(1.0))
@bm.random_variable
def bar(self, sigma: float):
return dist.Normal(self.foo(), torch.tensor(sigma))
@bm.random_variable
def baz(self):
return dist.Normal(self.foo(), self.bar(1.0))
class SampleModelWithEq:
@bm.random_variable
def foo(self):
return dist.Normal(torch.tensor(0.0), torch.tensor(1.0))
def __eq__(self, other):
return isinstance(other, RVIdentifierTest.SampleModelWithEq)
class SampleModelWithIndex:
@bm.random_variable
def foo(self, u: int):
return dist.Normal(torch.tensor(0.0), torch.tensor(1.0))
def test_indexed_model_rv_identifier(self):
model = self.SampleModelWithIndex()
# RVs indexed using primitives should not show a user warning
with warnings.catch_warnings():
warnings.simplefilter("error")
model.foo(1)
with self.assertWarns(UserWarning) as context:
model.foo(torch.tensor(1))
self.assertEqual(
"PyTorch tensors are hashed by memory address instead of value. "
"Therefore, it is not recommended to use tensors as indices of random variables.",
str(context.warning),
msg="RVs indexed using tensor should show the correct user warning",
)
def test_pickle_unbound_rv_identifier(self):
original_foo_key = foo()
foo_bytes = pickle.dumps(foo())
reloaded_foo_key = pickle.loads(foo_bytes)
# reloaded RVIdentifier should be equivalent to the original copy
self.assertEqual(original_foo_key, reloaded_foo_key)
self.assertEqual(reloaded_foo_key, foo())
# In fact, when unpickling, it will recover the reference to the decorated
# function
self.assertIs(reloaded_foo_key.wrapper, foo)
# ^ this requires the function to be available when unpickling
def test_pickle_rv_with_same_name(self):
rv_bytes = pickle.dumps((foo(), self.SampleModel.foo()))
foo_key_1, foo_key_2 = pickle.loads(rv_bytes)
self.assertEqual(foo(), foo_key_1)
self.assertEqual(self.SampleModel.foo(), foo_key_2)
# the two 'foo' functions with same name are not equivalent
self.assertNotEqual(foo_key_1, foo_key_2)
def test_pickle_bound_rv_identifier(self):
model = self.SampleModel()
bar_key = model.bar(3.0)
# we should dump the model and RVIdentifier together if we want to recover the
# reference
model_and_rv_bytes = pickle.dumps((model, bar_key))
reloaded_model, reloaded_bar_key = pickle.loads(model_and_rv_bytes)
# We should be able to use the reloaded model to generate new RVIdentifier that
# are equivalent to the unpickled ones
self.assertEqual(reloaded_model.bar(3.0), reloaded_bar_key)
# However, notice that the reloaded model is a copy of the original model with
# the same value, so unless __eq__ is defined on the model, Python will compare
# object by address (so the reloaded model & identifier are not equal to the
# original ones)
self.assertNotEqual(reloaded_model, model)
self.assertNotEqual(bar_key, reloaded_bar_key)
def test_pickle_bound_rv_in_model_with_eq_operator(self):
model = self.SampleModelWithEq()
foo_key = model.foo()
model_and_rv_bytes = pickle.dumps((model, foo_key))
reloaded_model, reloaded_foo_key = pickle.loads(model_and_rv_bytes)
self.assertEqual(reloaded_model, model)
self.assertEqual(foo_key, reloaded_foo_key)
self.assertEqual(model.foo(), reloaded_foo_key)
# Though instead of defining __eq__ and maintain multiple copies of the model,
# it might be better to just use the unpickled model in a new session, i.e.
del model # mock the case where model is not defined in the new session yet
model, bar_key = pickle.loads(model_and_rv_bytes)
self.assertEqual(model.foo(), foo_key)
# For global scope random variables, the definition of functions have to be
# available when unpickling. Similarly, for class cope random variables, the
# definition of class also needs to be available.
def test_pickle_multiple_models(self):
model1 = self.SampleModel()
model2 = self.SampleModel()
self.assertNotEqual(model1.baz(), model2.baz())
rv_set = {model1.baz(), model2.baz(), model2.bar(1.5)}
# the following will be similar to how
serialized_bytes = pickle.dumps(
{"model1": model1, "model2": model2, "values_to_keep": rv_set}
)
# notice that we can also dump the two models separately as long as they don't
# cross reference each other
# delete current variables and "start a new session"
del model1
del model2
del rv_set
restored_state = pickle.loads(serialized_bytes)
model1 = restored_state.get("model1")
model2 = restored_state.get("model2")
rv_set = restored_state.get("values_to_keep")
self.assertNotEqual(model1.baz(), model2.baz())
self.assertIn(model1.baz(), rv_set)
self.assertIn(model2.baz(), rv_set)
self.assertNotIn(model1.bar(1.5), rv_set)
self.assertIn(model2.bar(1.5), rv_set)
def test_sorting_rv_identifier(self):
model = self.SampleModel()
observations = {
model.foo(): torch.tensor(1.0),
model.bar(0.5): torch.tensor(1.0),
model.baz(): torch.tensor(1.0),
}
# make sure the following doesn't raise
sorted(observations.keys())
pytree.tree_flatten(observations)
| beanmachine-main | tests/ppl/model/rv_identifier_test.py |
beanmachine-main | tests/ppl/model/__init__.py |
|
beanmachine-main | tests/ppl/diagnostics/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Dict
import beanmachine.ppl as bm
import beanmachine.ppl.diagnostics.common_statistics as common_statistics
import numpy as np
import pandas as pd
import torch
import torch.distributions as dist
from beanmachine.ppl.diagnostics.diagnostics import Diagnostics
from statsmodels.tsa.stattools import acf
diri_dis = dist.Dirichlet(
torch.tensor([[1.0, 2.0, 3.0], [2.0, 1.0, 3.0], [2.0, 3.0, 1.0]])
)
beta_dis = dist.Beta(torch.tensor([1.0, 2.0, 3.0]), torch.tensor([9.0, 8.0, 7.0]))
normal_dis = dist.Normal(torch.tensor([0.0, 1.0, 2.0]), torch.tensor([0.5, 1.0, 1.5]))
@bm.random_variable
def diri(i, j):
return diri_dis
@bm.random_variable
def beta(i):
return beta_dis
@bm.random_variable
def normal():
return normal_dis
@bm.random_variable
def foo():
return dist.Normal(0, 1)
@bm.random_variable
def bar():
return dist.Normal(torch.randn(3, 1, 2), torch.ones(3, 1, 2))
def dist_summary_stats() -> Dict[str, torch.tensor]:
exact_mean = {
"beta": beta_dis.mean.reshape(-1),
"diri": diri_dis.mean.reshape(-1),
"normal": normal_dis.mean.reshape(-1),
}
exact_std = {
"beta": torch.sqrt(beta_dis.variance.reshape(-1)),
"diri": torch.sqrt(diri_dis.variance.reshape(-1)),
"normal": torch.sqrt(normal_dis.variance.reshape(-1)),
}
exact_CI_2_5 = {"normal": normal_dis.mean - 1.96 * torch.sqrt(normal_dis.variance)}
exact_CI_50 = {"normal": normal_dis.mean}
exact_CI_97_5 = {"normal": normal_dis.mean + 1.96 * torch.sqrt(normal_dis.variance)}
exact_stats = {
"avg": exact_mean,
"std": exact_std,
"2.5%": exact_CI_2_5,
"50%": exact_CI_50,
"97.5%": exact_CI_97_5,
}
return exact_stats
class DiagnosticsTest(unittest.TestCase):
def test_basic_diagnostics(self):
def _inference_evaulation(summary: pd.DataFrame):
exact_stats = dist_summary_stats()
for col in summary.columns:
if not (col in exact_stats):
continue
for dis, res in exact_stats[col].items():
query_res = summary.loc[summary.index.str.contains(f"^{dis}")]
for i, val in enumerate(query_res[col].values):
self.assertAlmostEqual(
val,
res[i].item(),
msg=f"query {query_res.index[i]} for {col}",
delta=0.5,
)
def _test_plot_object(diag, query, query_samples):
plot_object = diag.plot([query])
trace_object = diag.trace([query])
index = 0
num_samples = query_samples[0].numel()
# test the trace plot over the first chain of beta(0)
for i in range(num_samples):
assert all(
a == b
for a, b in zip(
plot_object[0]["data"][index]["y"], query_samples[:, i]
)
), f"plot object for {diag._stringify_query(query)} is not correct"
assert all(
a == b
for a, b in zip(
trace_object[0]["data"][index]["y"], query_samples[:, i]
)
), f"trace object for {diag._stringify_query(query)} {i} is not correct"
index += 2
def _test_autocorr_object(diag, query, query_samples):
autocorr_object = diag.autocorr([query])
index = 0
num_samples = query_samples[0].numel()
# test the autocorr results over the first chain of beta(0)
for i in range(num_samples):
expected_acf = acf(
query_samples[:, i].detach().numpy(),
True,
nlags=num_samples - 1,
fft=False,
)
for ns in range(num_samples):
self.assertAlmostEqual(
autocorr_object[0]["data"][index]["y"][ns],
expected_acf[ns],
msg=f"autocorr data for {diag._stringify_query(query)}\
is not correct",
delta=0.3,
)
index += 1
np.random.seed(123)
torch.manual_seed(123)
mh = bm.SingleSiteAncestralMetropolisHastings()
query_list = [beta(0), diri(1, 5), normal()]
num_chains = 2
samples = mh.infer(query_list, {}, 200, num_chains)
out_df = Diagnostics(samples).summary()
_inference_evaulation(out_df)
out_df = Diagnostics(samples).summary([diri(1, 5), beta(0)])
_inference_evaulation(out_df)
out_df = Diagnostics(samples).summary(query_list=[diri(1, 5)], chain=1)
_inference_evaulation(out_df)
self.assertRaises(ValueError, Diagnostics(samples).summary, [diri(1, 3)])
self.assertRaises(ValueError, Diagnostics(samples).summary, [diri(1, 5), foo()])
query = beta(0)
query_samples = samples[query][0]
_test_plot_object(Diagnostics(samples), query, query_samples)
_test_autocorr_object(Diagnostics(samples), query, query_samples)
def test_r_hat_one_chain(self):
mh = bm.SingleSiteAncestralMetropolisHastings()
samples = mh.infer([normal()], {}, 5, 1)
diagnostics = Diagnostics(samples)
with self.assertWarns(UserWarning):
results = diagnostics.split_r_hat([normal()])
self.assertTrue(results.empty)
def test_r_hat_column(self):
mh = bm.SingleSiteAncestralMetropolisHastings()
samples = mh.infer([normal()], {}, 5, 2)
diagnostics = Diagnostics(samples)
out_df = diagnostics.summary()
self.assertTrue("r_hat" in out_df.columns)
out_df = diagnostics.summary(chain=0)
self.assertTrue("r_hat" not in out_df.columns)
def test_r_hat_no_column(self):
mh = bm.SingleSiteAncestralMetropolisHastings()
samples = mh.infer([normal()], {}, 5, 1)
out_df = Diagnostics(samples).summary()
self.assertTrue("r_hat" not in out_df.columns)
def test_r_hat(self):
samples = torch.tensor([[0.0, 1.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0]])
self.assertAlmostEqual(common_statistics.r_hat(samples), 2.3558, delta=0.001)
self.assertAlmostEqual(
common_statistics.split_r_hat(samples), 3.7193, delta=0.001
)
def test_r_hat_additional_dimension(self):
samples = torch.tensor(
[
[[0.0, 2.0], [2.0, 4.0], [4.0, 8.0], [6.0, 0.0]],
[[8.0, 12.0], [10.0, 6.0], [12.0, 1.0], [14.0, 2.0]],
[[16.0, -5.0], [18.0, 4.0], [20.0, 2.0], [22.0, 4.0]],
]
)
dim1, dim2 = common_statistics.r_hat(samples)
self.assertAlmostEqual(dim1, 3.2171, delta=0.001)
self.assertAlmostEqual(dim2, 0.9849, delta=0.001)
dim1, dim2 = common_statistics.split_r_hat(samples)
self.assertAlmostEqual(dim1, 5.3385, delta=0.001)
self.assertAlmostEqual(dim2, 1.0687, delta=0.001)
def test_effective_sample_size(self):
samples = torch.tensor(
[[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]], dtype=torch.double
)
n_eff = common_statistics.effective_sample_size(samples)
self.assertAlmostEqual(n_eff, 2.6114, delta=0.001)
def test_effective_sample_size_additional_dimension(self):
samples = torch.tensor(
[
[[0.0, 2.0], [2.0, 4.0], [4.0, 8.0], [6.0, 0.0]],
[[8.0, 12.0], [10.0, 6.0], [12.0, 1.0], [14.0, 2.0]],
[[16.0, -5.0], [18.0, 4.0], [20.0, 2.0], [22.0, 4.0]],
]
)
dim1, dim2 = common_statistics.effective_sample_size(samples)
self.assertAlmostEqual(dim1, 1.9605, delta=0.001)
self.assertAlmostEqual(dim2, 15.1438, delta=0.001)
def test_effective_sample_size_columns(self):
mh = bm.SingleSiteAncestralMetropolisHastings()
samples = mh.infer([normal()], {}, 5, 2)
out_df = Diagnostics(samples).summary()
self.assertTrue("n_eff" in out_df.columns)
def test_singleton_dims(self):
mh = bm.SingleSiteAncestralMetropolisHastings()
obs = {bar(): torch.ones(3, 1, 2)}
samples = mh.infer([bar()], obs, 5, 2)
diagnostics = Diagnostics(samples)
out_df = diagnostics.summary()
self.assertTrue("r_hat" in out_df.columns)
| beanmachine-main | tests/ppl/diagnostics/diagnostics_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
class SingleSiteNewtonianMonteCarloConjugateTest(
unittest.TestCase, AbstractConjugateTests
):
# TODO: Decrease the num_samples; num_samples>=2000 to get n_eff>=30 is
# unreasonable. It currently fails for num_samples=1000 because because
# hessian (for transform proposer) is extremely close to 0
def test_beta_binomial_conjugate_run(self):
nw = bm.SingleSiteNewtonianMonteCarlo()
self.beta_binomial_conjugate_run(nw, num_samples=2000)
def test_gamma_gamma_conjugate_run(self):
nw_transform = bm.SingleSiteNewtonianMonteCarlo()
self.gamma_gamma_conjugate_run(nw_transform, num_samples=200)
def test_gamma_normal_conjugate_run(self):
nw = bm.SingleSiteNewtonianMonteCarlo()
self.gamma_normal_conjugate_run(nw, num_samples=600)
def test_normal_normal_conjugate_run(self):
nw = bm.SingleSiteNewtonianMonteCarlo()
self.normal_normal_conjugate_run(nw, num_samples=500)
def test_dirichlet_categorical_conjugate_run(self):
nw = bm.SingleSiteNewtonianMonteCarlo()
self.dirichlet_categorical_conjugate_run(nw, num_samples=2000)
| beanmachine-main | tests/ppl/inference/single_site_newtonian_monte_carlo_conjugate_test_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import unittest
import beanmachine.ppl as bm
import numpy as np
import torch
import torch.distributions as dist
import xarray as xr
from beanmachine.ppl.inference.monte_carlo_samples import merge_dicts, MonteCarloSamples
class MonteCarloSamplesTest(unittest.TestCase):
class SampleModel(object):
@bm.random_variable
def foo(self):
return dist.Normal(torch.tensor(0.0), torch.tensor(1.0))
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), torch.tensor(1.0))
def test_default_four_chains(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
foo_key = model.foo()
mcs = mh.infer([foo_key], {}, 10)
self.assertEqual(mcs[foo_key].shape, torch.zeros(4, 10).shape)
self.assertEqual(mcs.get_variable(foo_key).shape, torch.zeros(4, 10).shape)
self.assertEqual(mcs.get_chain(3)[foo_key].shape, torch.zeros(10).shape)
self.assertEqual(mcs.num_chains, 4)
self.assertCountEqual(mcs.keys(), [foo_key])
mcs = mh.infer([foo_key], {}, 7, num_adaptive_samples=3)
self.assertEqual(mcs.num_adaptive_samples, 3)
self.assertEqual(mcs[foo_key].shape, torch.zeros(4, 7).shape)
self.assertEqual(mcs.get_variable(foo_key).shape, torch.zeros(4, 7).shape)
self.assertEqual(
mcs.get_variable(foo_key, True).shape, torch.zeros(4, 10).shape
)
self.assertEqual(mcs.get_chain(3)[foo_key].shape, torch.zeros(7).shape)
self.assertEqual(mcs.num_chains, 4)
self.assertCountEqual(mcs.keys(), [foo_key])
def test_one_chain(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
foo_key = model.foo()
bar_key = model.bar()
mcs = mh.infer([foo_key, bar_key], {}, 10, 1)
self.assertEqual(mcs[foo_key].shape, torch.zeros(1, 10).shape)
self.assertEqual(mcs.get_variable(foo_key).shape, torch.zeros(1, 10).shape)
self.assertEqual(mcs.get_chain()[foo_key].shape, torch.zeros(10).shape)
self.assertEqual(mcs.num_chains, 1)
self.assertCountEqual(mcs.keys(), [foo_key, bar_key])
mcs = mh.infer([foo_key, bar_key], {}, 7, 1, num_adaptive_samples=3)
self.assertEqual(mcs.num_adaptive_samples, 3)
self.assertEqual(mcs[foo_key].shape, torch.zeros(1, 7).shape)
self.assertEqual(mcs.get_variable(foo_key).shape, torch.zeros(1, 7).shape)
self.assertEqual(
mcs.get_variable(foo_key, True).shape, torch.zeros(1, 10).shape
)
self.assertEqual(mcs.get_chain()[foo_key].shape, torch.zeros(7).shape)
self.assertEqual(mcs.num_chains, 1)
self.assertCountEqual(mcs.keys(), [foo_key, bar_key])
def test_chain_exceptions(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
foo_key = model.foo()
mcs = mh.infer([foo_key], {}, 10)
with self.assertRaisesRegex(IndexError, r"Please specify a valid chain"):
mcs.get_chain(-1)
with self.assertRaisesRegex(IndexError, r"Please specify a valid chain"):
mcs.get_chain(4)
with self.assertRaisesRegex(
ValueError,
r"The current MonteCarloSamples object has already"
r" been restricted to a single chain",
):
one_chain = mcs.get_chain()
one_chain.get_chain()
def test_num_adaptive_samples(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
foo_key = model.foo()
mcs = mh.infer([foo_key], {}, 10, num_adaptive_samples=3)
self.assertEqual(mcs[foo_key].shape, torch.zeros(4, 10).shape)
self.assertEqual(mcs.get_variable(foo_key).shape, torch.zeros(4, 10).shape)
self.assertEqual(
mcs.get_variable(foo_key, include_adapt_steps=True).shape,
torch.zeros(4, 13).shape,
)
self.assertEqual(mcs.get_num_samples(), 10)
self.assertEqual(mcs.get_num_samples(include_adapt_steps=True), 13)
def test_dump_and_restore_samples(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
foo_key = model.foo()
samples = mh.infer([foo_key], {}, num_samples=10, num_chains=2)
self.assertEqual(samples[foo_key].shape, (2, 10))
dumped = pickle.dumps((model, samples))
# delete local variables and pretend that we are starting from a new session
del model
del mh
del foo_key
del samples
# reload from dumped bytes
reloaded_model, reloaded_samples = pickle.loads(dumped)
# check the values still exist and have the correct shape
self.assertEqual(reloaded_samples[reloaded_model.foo()].shape, (2, 10))
def test_get_rv_with_default(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
foo_key = model.foo()
samples = mh.infer([foo_key], {}, num_samples=10, num_chains=2)
self.assertIn(model.foo(), samples)
self.assertIsInstance(samples.get(model.foo()), torch.Tensor)
self.assertIsNone(samples.get(model.bar()))
self.assertEqual(samples.get(model.foo(), chain=0).shape, (10,))
def test_merge_dicts(self):
model = self.SampleModel()
chain_lists = [{model.foo(): torch.rand(3)}, {model.foo(): torch.rand(3)}]
rv_dict = merge_dicts(chain_lists)
self.assertIn(model.foo(), rv_dict)
self.assertEqual(rv_dict.get(model.foo()).shape, (2, 3))
chain_lists.append({model.bar(): torch.rand(3)})
with self.assertRaises(ValueError):
merge_dicts(chain_lists)
def test_type_conversion(self):
model = self.SampleModel()
samples = MonteCarloSamples(
[{model.foo(): torch.rand(5), model.bar(): torch.rand(5)}],
num_adaptive_samples=3,
)
xr_dataset = samples.to_xarray()
self.assertIsInstance(xr_dataset, xr.Dataset)
self.assertIn(model.foo(), xr_dataset)
assert np.allclose(samples[model.bar()].numpy(), xr_dataset[model.bar()])
xr_dataset = samples.to_xarray(include_adapt_steps=True)
self.assertEqual(xr_dataset[model.foo()].shape, (1, 5))
inference_data = samples.to_inference_data()
self.assertIn(model.foo(), inference_data.posterior)
def test_get_variable(self):
model = self.SampleModel()
samples = MonteCarloSamples(
[{model.foo(): torch.arange(10)}], num_adaptive_samples=3
).get_chain(0)
self.assertTrue(
torch.all(samples.get_variable(model.foo()) == torch.arange(3, 10))
)
self.assertTrue(
torch.all(samples.get_variable(model.foo(), True) == torch.arange(10))
)
def test_get_log_likehoods(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
foo_key = model.foo()
bar_key = model.bar()
mcs = mh.infer(
[foo_key],
{bar_key: torch.tensor(4.0)},
num_samples=5,
num_chains=2,
)
self.assertTrue(hasattr(mcs, "log_likelihoods"))
self.assertIn(bar_key, mcs.log_likelihoods)
self.assertTrue(hasattr(mcs, "adaptive_log_likelihoods"))
self.assertIn(bar_key, mcs.adaptive_log_likelihoods)
self.assertEqual(
mcs.get_log_likelihoods(bar_key).shape, torch.zeros(2, 5).shape
)
mcs = mcs.get_chain(0)
self.assertEqual(mcs.get_log_likelihoods(bar_key).shape, torch.zeros(5).shape)
mcs = mh.infer(
[foo_key],
{bar_key: torch.tensor(4.0)},
num_samples=5,
num_chains=2,
num_adaptive_samples=3,
)
self.assertEqual(
mcs.get_log_likelihoods(bar_key).shape, torch.zeros(2, 5).shape
)
self.assertEqual(
mcs.adaptive_log_likelihoods[bar_key].shape, torch.zeros(2, 3).shape
)
self.assertEqual(
mcs.get_chain(0).get_log_likelihoods(bar_key).shape, torch.zeros(5).shape
)
self.assertEqual(
mcs.get_log_likelihoods(bar_key, True).shape, torch.zeros(2, 8).shape
)
self.assertEqual(
mcs.get_chain(0).adaptive_log_likelihoods[bar_key].shape,
torch.zeros(1, 3).shape,
)
def test_thinning(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
samples = mh.infer([model.foo()], {}, num_samples=20, num_chains=1)
self.assertEqual(samples.get(model.foo(), chain=0).shape, (20,))
self.assertEqual(samples.get(model.foo(), chain=0, thinning=4).shape, (5,))
def test_add_group(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
samples = mh.infer([model.foo()], {}, num_samples=20, num_chains=1)
bar_samples = MonteCarloSamples(samples.samples, default_namespace="bar")
bar_samples.add_groups(samples)
self.assertEqual(samples.observations, bar_samples.observations)
self.assertEqual(samples.log_likelihoods, bar_samples.log_likelihoods)
self.assertIn("posterior", bar_samples.namespaces)
def test_to_inference_data(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
samples = mh.infer([model.foo()], {}, num_samples=10, num_chains=1)
az_xarray = samples.to_inference_data()
self.assertNotIn("warmup_posterior", az_xarray)
samples = mh.infer(
[model.foo()], {}, num_samples=10, num_adaptive_samples=2, num_chains=1
)
az_xarray = samples.to_inference_data(include_adapt_steps=True)
self.assertIn("warmup_posterior", az_xarray)
| beanmachine-main | tests/ppl/inference/monte_carlo_samples_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
class SingleSiteUniformMetropolisHastingsConjugateTest(
unittest.TestCase, AbstractConjugateTests
):
def setUp(self):
self.mh = bm.SingleSiteUniformMetropolisHastings()
def test_beta_binomial_conjugate_run(self):
self.beta_binomial_conjugate_run(self.mh)
def test_gamma_gamma_conjugate_run(self):
self.gamma_gamma_conjugate_run(self.mh, random_seed=123)
def test_gamma_normal_conjugate_run(self):
self.gamma_normal_conjugate_run(self.mh, num_samples=7500)
def test_normal_normal_conjugate_run(self):
self.normal_normal_conjugate_run(self.mh, num_samples=5000)
def test_dirichlet_categorical_conjugate_run(self):
self.dirichlet_categorical_conjugate_run(self.mh, num_samples=5000)
| beanmachine-main | tests/ppl/inference/single_site_uniform_mh_conjugate_test_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
class SingleSiteAdaptiveRandomWalkConjugateTest(
unittest.TestCase, AbstractConjugateTests
):
def setUp(self):
self.mh = bm.SingleSiteRandomWalk(step_size=5.0)
def test_beta_binomial_conjugate_run(self):
self.mh = bm.SingleSiteRandomWalk(step_size=1.0)
self.beta_binomial_conjugate_run(
self.mh, num_samples=3000, num_adaptive_samples=1600
)
@unittest.skip("Known to fail. Investigating in T77865889.")
def test_gamma_gamma_conjugate_run(self):
self.mh = bm.SingleSiteRandomWalk(step_size=3.0)
self.gamma_gamma_conjugate_run(
self.mh, num_samples=5000, num_adaptive_samples=7000
)
def test_gamma_normal_conjugate_run(self):
self.mh = bm.SingleSiteRandomWalk(step_size=5.0)
self.gamma_normal_conjugate_run(
self.mh, num_samples=6000, num_adaptive_samples=5000
)
@unittest.skip("Known to fail. Investigating in T77865889.")
def test_normal_normal_conjugate_run(self):
self.normal_normal_conjugate_run(
self.mh, num_samples=2000, num_adaptive_samples=2000
)
@unittest.skip("Known to fail. Investigating in T77865889.")
def test_dirichlet_categorical_conjugate_run(self):
self.dirichlet_categorical_conjugate_run(
self.mh, num_samples=2000, num_adaptive_samples=2000
)
| beanmachine-main | tests/ppl/inference/single_site_random_walk_adaptive_conjugate_test_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
class SampleModel:
@bm.random_variable
def foo(self):
return dist.Normal(torch.tensor(0.0), torch.tensor(1.0))
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), torch.tensor(1.0))
class ReproducibleModel:
@bm.random_variable
def K_minus_one(self):
return dist.Poisson(rate=2.0)
@bm.functional
def K(self):
return self.K_minus_one() + 1
@bm.random_variable
def mu(self):
return dist.Normal(0, 1)
def test_single_site_ancestral_mh():
model = SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
foo_key = model.foo()
bar_key = model.bar()
sampler = mh.sampler(
[model.foo()], {model.bar(): torch.tensor(0.0)}, num_samples=10
)
for world in sampler:
assert foo_key in world
assert bar_key in world
assert foo_key in world.get_variable(bar_key).parents
assert bar_key in world.get_variable(foo_key).children
def test_single_site_ancestral_mh_reproducible_results():
model = ReproducibleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
queries = [model.mu()]
observations = {}
torch.manual_seed(42)
samples = mh.infer(queries, observations, num_samples=5, num_chains=1)
run_1 = samples.get_variable(model.mu()).clone()
torch.manual_seed(42)
samples = mh.infer(queries, observations, num_samples=5, num_chains=1)
run_2 = samples.get_variable(model.mu()).clone()
assert run_1.allclose(run_2)
torch.manual_seed(43)
samples = mh.infer(queries, observations, num_samples=5, num_chains=1)
run_3 = samples.get_variable(model.mu()).clone()
assert not run_1.allclose(run_3)
| beanmachine-main | tests/ppl/inference/single_site_ancestral_mh_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
class SingleSiteNoUTurnConjugateTest(unittest.TestCase, AbstractConjugateTests):
def setUp(self):
self.nuts = bm.SingleSiteNoUTurnSampler()
def test_beta_binomial_conjugate_run(self):
self.beta_binomial_conjugate_run(
self.nuts, num_samples=300, num_adaptive_samples=300
)
def test_gamma_gamma_conjugate_run(self):
self.gamma_gamma_conjugate_run(
self.nuts, num_samples=300, num_adaptive_samples=300
)
def test_gamma_normal_conjugate_run(self):
self.gamma_normal_conjugate_run(
self.nuts, num_samples=300, num_adaptive_samples=300
)
def test_normal_normal_conjugate_run(self):
self.normal_normal_conjugate_run(
self.nuts, num_samples=300, num_adaptive_samples=300
)
def test_dirichlet_categorical_conjugate_run(self):
self.dirichlet_categorical_conjugate_run(
self.nuts, num_samples=300, num_adaptive_samples=300
)
| beanmachine-main | tests/ppl/inference/single_site_no_u_turn_conjugate_test_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
@bm.random_variable
def foo():
return dist.Normal(0.0, 1.0)
def test_set_random_seed():
def sample_with_seed(seed):
bm.seed(seed)
return bm.SingleSiteAncestralMetropolisHastings().infer(
[foo()], {}, num_samples=20, num_chains=1
)
samples1 = sample_with_seed(123)
samples2 = sample_with_seed(123)
assert torch.allclose(samples1[foo()], samples2[foo()])
def test_detach_samples():
"""Test to ensure samples are detached from torch computation graphs."""
queries = [foo()]
samples = bm.SingleSiteAncestralMetropolisHastings().infer(
queries=queries,
observations={},
num_samples=20,
num_chains=1,
)
rv_data = samples[foo()]
idata = samples.to_inference_data()
assert hasattr(rv_data, "detach")
assert not hasattr(idata["posterior"][foo()], "detach")
| beanmachine-main | tests/ppl/inference/utils_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import beanmachine.ppl as bm
import pytest
import torch
import torch.distributions as dist
class SampleModel:
@bm.random_variable
def foo(self):
return dist.Normal(0.0, 1.0)
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), 1.0)
@pytest.mark.parametrize(
"algorithm",
[
bm.GlobalNoUTurnSampler(nnc_compile=True),
bm.GlobalHamiltonianMonteCarlo(trajectory_length=1.0, nnc_compile=True),
],
)
def test_nnc_compile(algorithm):
model = SampleModel()
queries = [model.foo()]
observations = {model.bar(): torch.tensor(0.5)}
num_samples = 30
num_chains = 2
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# verify that NNC can run through
samples = algorithm.infer(
queries,
observations,
num_samples,
num_adaptive_samples=num_samples,
num_chains=num_chains,
)
# sanity check: make sure that the samples are valid
assert not torch.isnan(samples[model.foo()]).any()
| beanmachine-main | tests/ppl/inference/nnc_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import sys
import beanmachine.ppl as bm
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer
from beanmachine.ppl.world import init_from_prior, World
class SampleModel:
@bm.random_variable
def foo(self):
return dist.Normal(0.0, 1.0)
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), 1.0)
@bm.functional
def baz(self):
return self.bar() * 2.0
class SampleDoubleModel:
@bm.random_variable
def foo(self):
return dist.Normal(torch.tensor(0.0).double(), torch.tensor(1.0).double())
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), torch.tensor(1.0).double())
@pytest.mark.parametrize("multiprocess", [False, True])
def test_inference(multiprocess):
if multiprocess and sys.platform.startswith("win"):
pytest.skip(
"Windows does not support fork-based multiprocessing (which is necessary "
"for running parallel inference within pytest."
)
model = SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
queries = [model.foo(), model.baz()]
observations = {model.bar(): torch.tensor(0.5)}
num_samples = 30
num_chains = 2
samples = mh.infer(
queries,
observations,
num_samples,
num_adaptive_samples=num_samples,
num_chains=num_chains,
run_in_parallel=multiprocess,
mp_context="fork",
)
assert model.foo() in samples
assert isinstance(samples[model.foo()], torch.Tensor)
assert samples[model.foo()].shape == (num_chains, num_samples)
assert samples.get_num_samples(include_adapt_steps=True) == num_samples * 2
# make sure that the RNG state for each chain is different
assert not torch.equal(
samples.get_chain(0)[model.foo()], samples.get_chain(1)[model.foo()]
)
def test_get_proposers():
world = World()
model = SampleModel()
world.call(model.bar())
nuts = bm.GlobalNoUTurnSampler()
proposers = nuts.get_proposers(world, world.latent_nodes, 10)
assert all(isinstance(proposer, BaseProposer) for proposer in proposers)
def test_initialize_world():
model = SampleModel()
world = World.initialize_world([model.bar()], {})
assert model.foo() in world
assert model.bar() in world
def test_initialize_from_prior():
model = SampleModel()
queries = [model.foo()]
samples_from_prior = []
for _ in range(10000):
world = World.initialize_world(queries, {}, initialize_fn=init_from_prior)
val = world.get(model.foo())
samples_from_prior.append(val.item())
assert samples_from_prior[0] != samples_from_prior[1]
assert math.isclose(sum(samples_from_prior) / 10000.0, 0.0, abs_tol=1e-2)
def test_initialization_resampling():
mh = bm.SingleSiteAncestralMetropolisHastings()
@bm.random_variable
def foo():
return dist.Uniform(3.0, 5.0)
# verify that the method re-sample as expected
retries = 0
def init_after_three_tries(d: dist.Distribution):
nonlocal retries
retries += 1
return torch.tensor(float("nan")) if retries < 3 else d.sample()
sampler = mh.sampler(
[foo()], {}, num_samples=10, initialize_fn=init_after_three_tries
)
for world in sampler:
assert not torch.isinf(world.log_prob()) and not torch.isnan(world.log_prob())
# an extreme case where the init value is always out of the support
def init_to_zero(d: dist.Distribution):
return torch.zeros_like(d.sample())
with pytest.raises(ValueError, match="Cannot find a valid initialization"):
mh.infer([foo()], {}, num_samples=10, initialize_fn=init_to_zero)
@pytest.mark.parametrize(
"algorithm",
[
bm.GlobalNoUTurnSampler(full_mass_matrix=False),
bm.GlobalNoUTurnSampler(full_mass_matrix=True),
bm.GlobalHamiltonianMonteCarlo(trajectory_length=1.0),
bm.SingleSiteAncestralMetropolisHastings(),
bm.SingleSiteNewtonianMonteCarlo(),
bm.SingleSiteUniformMetropolisHastings(),
],
)
def test_inference_with_double_dtype(algorithm):
model = SampleDoubleModel()
queries = [model.foo()]
bar_val = torch.tensor(0.5).double()
# make sure that the inference can run successfully
samples = algorithm.infer(
queries,
{model.bar(): bar_val},
num_samples=20,
num_chains=1,
)
assert samples[model.foo()].dtype == bar_val.dtype
| beanmachine-main | tests/ppl/inference/inference_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
class SingleSiteUniformMetropolisHastingsTest(unittest.TestCase):
class SampleBernoulliModel(object):
@bm.random_variable
def foo(self):
return dist.Beta(torch.tensor(2.0), torch.tensor(2.0))
@bm.random_variable
def bar(self):
return dist.Bernoulli(self.foo())
class SampleCategoricalModel(object):
@bm.random_variable
def foo(self):
return dist.Dirichlet(torch.tensor([0.5, 0.5]))
@bm.random_variable
def bar(self):
return dist.Categorical(self.foo())
def test_single_site_uniform_mh_with_bernoulli(self):
model = self.SampleBernoulliModel()
mh = bm.SingleSiteUniformMetropolisHastings()
foo_key = model.foo()
bar_key = model.bar()
sampler = mh.sampler([foo_key], {bar_key: torch.tensor(0.0)}, num_samples=5)
for world in sampler:
self.assertTrue(foo_key in world)
self.assertTrue(bar_key in world)
self.assertTrue(foo_key in world.get_variable(bar_key).parents)
self.assertTrue(bar_key in world.get_variable(foo_key).children)
def test_single_site_uniform_mh_with_categorical(self):
model = self.SampleCategoricalModel()
mh = bm.SingleSiteUniformMetropolisHastings()
foo_key = model.foo()
bar_key = model.bar()
sampler = mh.sampler([foo_key], {bar_key: torch.tensor(0.0)}, num_samples=5)
for world in sampler:
self.assertTrue(foo_key in world)
self.assertTrue(bar_key in world)
self.assertTrue(foo_key in world.get_variable(bar_key).parents)
self.assertTrue(bar_key in world.get_variable(foo_key).children)
| beanmachine-main | tests/ppl/inference/single_site_uniform_mh_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from typing import Optional
import beanmachine.ppl as bm
import numpy
import pytest
import scipy.stats
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.vi import ADVI, MAP, VariationalInfer
from beanmachine.ppl.inference.vi.gradient_estimator import monte_carlo_approximate_sf
from beanmachine.ppl.inference.vi.variational_world import VariationalWorld
from beanmachine.ppl.world import init_from_prior, RVDict
from torch import optim
from torch.distributions import constraints
from torch.distributions.utils import _standard_normal
cpu_device = torch.device("cpu")
class NealsFunnel(dist.Distribution):
"""
Neal's funnel.
p(x,y) = N(y|0,3) N(x|0,exp(y/2))
"""
support = constraints.real
def __init__(self, validate_args=None):
d = 2
batch_shape, event_shape = torch.Size([]), (d,)
super(NealsFunnel, self).__init__(
batch_shape, event_shape, validate_args=validate_args
)
def rsample(self, sample_shape=None):
if not sample_shape:
sample_shape = torch.Size((1,))
eps = _standard_normal(
(sample_shape[0], 2), dtype=torch.float, device=torch.device("cpu")
)
z = torch.zeros(eps.shape)
z[..., 1] = torch.tensor(3.0) * eps[..., 1]
z[..., 0] = torch.exp(z[..., 1] / 2.0) * eps[..., 0]
return z
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
x = value[..., 0]
y = value[..., 1]
log_prob = dist.Normal(0, 3).log_prob(y)
log_prob += dist.Normal(0, torch.exp(y / 2)).log_prob(x)
return log_prob
class BayesianRobustLinearRegression:
def __init__(self, n, d):
self.n = n
self.d = d
self.X_train = torch.randn(n, d)
self.beta_truth = torch.randn(d + 1, 1)
noise = dist.StudentT(df=4.0).sample((n, 1))
self.y_train = (
torch.cat((self.X_train, torch.ones(n, 1)), -1).mm(self.beta_truth) + noise
)
@bm.random_variable
def beta(self):
return dist.Independent(
dist.StudentT(df=4.0 * torch.ones(self.d + 1)),
1,
)
@bm.random_variable
def X(self):
return dist.Normal(0, 1) # dummy
@bm.random_variable
def y(self):
X_with_ones = torch.cat((self.X(), torch.ones(self.X().shape[0], 1)), -1)
b = self.beta().squeeze()
if b.dim() == 1:
b = b.unsqueeze(0)
mu = X_with_ones.mm(b.T)
return dist.Independent(
dist.StudentT(df=4.0, loc=mu, scale=1),
1,
)
class NormalNormal:
def __init__(
self,
mean_0: float = 0.0,
variance_0: float = 1.0,
variance_x: float = 1.0,
device: Optional[torch.device] = cpu_device,
):
self.device = device
self.mean_0 = mean_0
self.variance_0 = variance_0
self.variance_x = variance_x
@bm.random_variable
def mu(self):
return dist.Normal(
torch.zeros(1).to(self.device), 10 * torch.ones(1).to(self.device)
)
@bm.random_variable
def x(self, i):
return dist.Normal(self.mu(), torch.ones(1).to(self.device))
def conjugate_posterior(self, observations: RVDict) -> torch.dist:
# Normal-Normal conjugate prior formula (https://en.wikipedia.org/wiki/Conjugate_prior#When_likelihood_function_is_a_continuous_distribution)
expected_variance = 1 / (
(1 / self.variance_0) + (sum(observations.values()) / self.variance_x)
)
expected_std = numpy.sqrt(expected_variance)
expected_mean = expected_variance * (
(self.mean_0 / self.variance_0)
+ (sum(observations.values()) / self.variance_x)
)
return dist.Normal(expected_mean, expected_std)
class LogScaleNormal:
@bm.param
def phi(self):
return torch.zeros(2) # mean, log std
@bm.random_variable
def q_mu(self):
params = self.phi()
return dist.Normal(params[0], params[1].exp())
class BinaryGaussianMixture:
@bm.random_variable
def h(self, i):
return dist.Bernoulli(0.5)
@bm.random_variable
def x(self, i):
return dist.Normal(self.h(i).float(), 0.1)
class TestAutoGuide:
@pytest.mark.parametrize("auto_guide_inference", [ADVI, MAP])
def test_can_use_functionals(self, auto_guide_inference):
test_rv = bm.random_variable(lambda: dist.Normal(0, 1))
test_functional = bm.functional(lambda: test_rv() ** 2)
auto_guide = auto_guide_inference(
queries=[test_rv(), test_functional()],
observations={},
)
world = auto_guide.infer(num_steps=10)
assert world.call(test_functional()) is not None
@pytest.mark.parametrize("auto_guide_inference", [ADVI, MAP])
def test_neals_funnel(self, auto_guide_inference):
nf = bm.random_variable(NealsFunnel)
auto_guide = auto_guide_inference(
queries=[nf()],
observations={},
optimizer=lambda params: torch.optim.Adam(params, lr=1e-1),
)
world = auto_guide.infer(
num_steps=100,
)
if auto_guide_inference == ADVI:
# compare 1D marginals of empirical distributions using 2-sample K-S test
nf_samples = NealsFunnel().sample((20,)).squeeze().numpy()
vi_samples = (
world.get_guide_distribution(nf())
.sample((20,))
.detach()
.squeeze()
.numpy()
)
assert (
scipy.stats.ks_2samp(nf_samples[:, 0], vi_samples[:, 0]).pvalue >= 0.05
)
assert (
scipy.stats.ks_2samp(nf_samples[:, 1], vi_samples[:, 1]).pvalue >= 0.05
)
else:
vi_samples = world.get_guide_distribution(nf()).v.detach().squeeze().numpy()
map_truth = [0, -4.5]
assert numpy.isclose(map_truth, vi_samples, atol=0.05).all().item()
@pytest.mark.parametrize("auto_guide_inference", [ADVI, MAP])
def test_normal_normal(self, auto_guide_inference):
model = NormalNormal()
auto_guide = auto_guide_inference(
queries=[model.mu()],
observations={
model.x(1): torch.tensor(9.0),
model.x(2): torch.tensor(10.0),
},
optimizer=lambda params: torch.optim.Adam(params, lr=1e0),
)
world = auto_guide.infer(
num_steps=100,
)
mu_approx = world.get_guide_distribution(model.mu())
sample_mean = mu_approx.sample((100,)).mean()
assert sample_mean > 5.0
if auto_guide_inference == ADVI:
sample_var = mu_approx.sample((100,)).var()
assert sample_var > 0.1
@pytest.mark.parametrize("auto_guide_inference", [ADVI, MAP])
def test_brlr(self, auto_guide_inference):
brlr = BayesianRobustLinearRegression(n=100, d=7)
auto_guide = auto_guide_inference(
queries=[brlr.beta()],
observations={
brlr.X(): brlr.X_train,
brlr.y(): brlr.y_train,
},
optimizer=lambda params: torch.optim.Adam(params, lr=1e-1),
)
world = auto_guide.infer(
num_steps=100,
)
beta_samples = world.get_guide_distribution(brlr.beta()).sample((100,))
for i in range(beta_samples.shape[1]):
assert torch.norm(beta_samples[:, i].mean() - brlr.beta_truth[i]) < 0.2
@pytest.mark.parametrize(
"auto_guide_inference, expected", [(ADVI, 1.0), (MAP, 0.0)]
)
def test_constrained_positive_reals(self, auto_guide_inference, expected):
exp = dist.Exponential(torch.tensor([1.0]))
positive_rv = bm.random_variable(lambda: exp)
auto_guide = auto_guide_inference(queries=[positive_rv()], observations={})
world = auto_guide.infer(num_steps=100)
assert (
abs(
world.get_guide_distribution(positive_rv()).sample((100,)).mean().item()
- expected
)
<= 0.2
)
@pytest.mark.parametrize("auto_guide_inference", [ADVI, MAP])
def test_constrained_interval(self, auto_guide_inference):
beta = dist.Beta(torch.tensor([1.0]), torch.tensor([1.0]))
interval_rv = bm.random_variable(lambda: beta)
auto_guide = auto_guide_inference(
queries=[interval_rv()],
observations={},
)
world = auto_guide.infer(num_steps=100)
assert (
abs(
world.get_guide_distribution(interval_rv()).sample((100,)).mean().item()
- beta.mean
)
<= 0.2
)
@pytest.mark.parametrize("auto_guide_inference", [ADVI, MAP])
def test_dirichlet(self, auto_guide_inference):
dirichlet = dist.Dirichlet(2 * torch.ones(2))
alpha = bm.random_variable(lambda: dirichlet)
auto_guide = auto_guide_inference([alpha()], {})
world = auto_guide.infer(num_steps=100)
map_truth = torch.tensor([0.5, 0.5])
vi_estimate = world.get_guide_distribution(alpha()).sample((100,)).mean(dim=0)
assert vi_estimate.isclose(map_truth, atol=0.1).all().item()
class TestStochasticVariationalInfer:
@pytest.fixture(autouse=True)
def set_seed(self):
bm.seed(41)
def test_normal_normal_guide(self):
normal_normal_model = NormalNormal()
log_scale_normal_model = LogScaleNormal()
world = VariationalInfer(
queries_to_guides={normal_normal_model.mu(): log_scale_normal_model.q_mu()},
observations={
normal_normal_model.x(1): torch.tensor(9.0),
normal_normal_model.x(2): torch.tensor(10.0),
},
optimizer=lambda params: torch.optim.Adam(params, lr=1e0),
).infer(
num_steps=100,
)
mu_approx = world.get_variable(log_scale_normal_model.q_mu()).distribution
sample_mean = mu_approx.sample((100, 1)).mean()
assert sample_mean > 5.0
sample_var = mu_approx.sample((100, 1)).var()
assert sample_var > 0.1
def test_normal_normal_guide_step(self):
normal_normal_model = NormalNormal()
log_scale_normal_model = LogScaleNormal()
# 100 steps, each 1 iteration
world = VariationalInfer(
queries_to_guides={
normal_normal_model.mu(): log_scale_normal_model.q_mu(),
},
observations={
normal_normal_model.x(1): torch.tensor(9.0),
normal_normal_model.x(2): torch.tensor(10.0),
},
optimizer=lambda params: torch.optim.Adam(params, lr=1e0),
).infer(num_steps=100)
mu_approx = world.get_variable(log_scale_normal_model.q_mu()).distribution
sample_mean = mu_approx.sample((100, 1)).mean()
assert sample_mean > 5.0
sample_var = mu_approx.sample((100, 1)).var()
assert sample_var > 0.1
def test_conditional_guide(self):
@bm.random_variable
def mu():
return dist.Normal(torch.zeros(1), torch.ones(1))
@bm.random_variable
def alpha():
return dist.Normal(torch.zeros(1), torch.ones(1))
@bm.random_variable
def x(i):
return dist.Normal(mu() + alpha(), torch.ones(1))
@bm.param
def phi_mu():
return torch.zeros(2) # mean, log std
@bm.random_variable
def q_mu():
params = phi_mu()
return dist.Normal(params[0] - alpha(), params[1].exp())
@bm.param
def phi_alpha():
return torch.zeros(2) # mean, log std
@bm.random_variable
def q_alpha():
params = phi_alpha()
return dist.Normal(params[0], params[1].exp())
world = VariationalInfer(
queries_to_guides={
mu(): q_mu(),
alpha(): q_alpha(),
},
observations={
x(1): torch.tensor(9.0),
x(2): torch.tensor(10.0),
},
optimizer=lambda params: torch.optim.Adam(params, lr=1e0),
).infer(
num_steps=100,
)
vi = VariationalInfer(
queries_to_guides={
mu(): q_mu(),
alpha(): q_alpha(),
},
observations={
x(1): torch.tensor(9.0),
x(2): torch.tensor(10.0),
},
optimizer=lambda params: torch.optim.Adam(params, lr=1e-1),
)
vi.infer(num_steps=100)
world = VariationalWorld(
params=vi.params,
observations={
**{alpha(): torch.tensor(10.0)},
**vi.observations,
},
)
mu_approx, _ = world._run_node(q_mu())
sample_mean_alpha_10 = mu_approx.sample((100, 1)).mean()
world = VariationalWorld(
params=vi.params,
observations={
**{alpha(): torch.tensor(-10.0)},
**vi.observations,
},
)
mu_approx, _ = world._run_node(q_mu())
sample_mean_alpha_neg_10 = mu_approx.sample((100, 1)).mean()
assert sample_mean_alpha_neg_10 > sample_mean_alpha_10
def test_discrete_mixture(self):
model = BinaryGaussianMixture()
N = 10
with bm.world.World.initialize_world(
itertools.chain.from_iterable([model.x(i), model.h(i)] for i in range(N)),
initialize_fn=init_from_prior,
):
data = torch.tensor([[model.x(i), model.h(i)] for i in range(N)])
@bm.param
def phi(i):
return torch.tensor(0.5, requires_grad=True)
@bm.random_variable
def q_h(i):
return dist.Bernoulli(logits=phi(i))
vi = VariationalInfer(
queries_to_guides={model.h(i): q_h(i) for i in range(N)},
observations={model.x(i): data[i, 0] for i in range(N)},
optimizer=lambda p: optim.Adam(p, lr=5e-1),
)
world = vi.infer(
num_steps=30, num_samples=50, mc_approx=monte_carlo_approximate_sf
)
accuracy = (
(
(
torch.stack(
[
world.get_guide_distribution(model.h(i)).probs
for i in range(N)
]
)
> 0.5
)
== data[:, 1]
)
.float()
.mean()
)
assert accuracy.float().item() > 0.80
def test_logistic_regression(self):
n, d = 500, 2
X = torch.randn(n, d)
W = torch.randn(d)
@bm.random_variable
def y():
return dist.Independent(
dist.Bernoulli(probs=torch.sigmoid(X @ W)),
1,
)
@bm.param
def w():
return torch.randn(d)
@bm.random_variable
def q_y():
weights = w()
data = X
p = torch.sigmoid(data @ weights)
return dist.Independent(
dist.Bernoulli(probs=p),
1,
)
world = VariationalInfer(
queries_to_guides={y(): q_y()},
observations={},
optimizer=lambda params: torch.optim.Adam(params, lr=3e-2),
).infer(
num_steps=4000,
num_samples=1,
# NOTE: since y/q_y are discrete and not reparameterizable, we must
# use the score function estimator
mc_approx=monte_carlo_approximate_sf,
)
l2_error = (world.get_param(w()) - W).norm()
assert l2_error < 0.5
def test_subsample(self):
# mu ~ N(0, 10) and x | mu ~ N(mu, 1)
num_total = 3
normal_normal_model = NormalNormal(mean_0=1, variance_0=100, variance_x=1)
log_scale_normal_model = LogScaleNormal()
total_observations = {
normal_normal_model.x(i): torch.tensor(1.0) for i in range(num_total)
}
expected_mean = normal_normal_model.conjugate_posterior(total_observations).mean
expected_stddev = normal_normal_model.conjugate_posterior(
total_observations
).stddev
for num_samples in range(1, num_total):
world = VariationalInfer(
queries_to_guides={
normal_normal_model.mu(): log_scale_normal_model.q_mu(),
},
observations={
normal_normal_model.x(i): torch.tensor(1.0)
for i in range(num_samples)
},
optimizer=lambda params: torch.optim.Adam(params, lr=5e-2),
).infer(
num_steps=50,
subsample_factor=num_samples / num_total,
num_samples=10,
)
mu_approx = world.get_guide_distribution(normal_normal_model.mu())
assert (mu_approx.mean - expected_mean).norm() < 0.05
assert (mu_approx.stddev - expected_stddev).norm() < 0.05
def test_subsample_fail(self):
# mu ~ N(0, 10) and x | mu ~ N(mu, 1)
num_total = 3
normal_normal_model = NormalNormal(mean_0=1, variance_0=100, variance_x=1)
log_scale_normal_model = LogScaleNormal()
total_observations = {
normal_normal_model.x(i): torch.tensor(1.0) for i in range(num_total)
}
expected_mean = normal_normal_model.conjugate_posterior(total_observations).mean
expected_stddev = normal_normal_model.conjugate_posterior(
total_observations
).stddev
for num_samples in range(1, num_total):
world = VariationalInfer(
queries_to_guides={
normal_normal_model.mu(): log_scale_normal_model.q_mu(),
},
observations={
normal_normal_model.x(i): torch.tensor(1.0)
for i in range(num_samples)
},
optimizer=lambda params: torch.optim.Adam(params, lr=5e-2),
).infer(
num_steps=50,
subsample_factor=1.0,
num_samples=10,
)
mu_approx = world.get_guide_distribution(normal_normal_model.mu())
assert (mu_approx.mean - expected_mean).norm() > 0.05 or (
mu_approx.stddev - expected_stddev
).norm() > 0.05
| beanmachine-main | tests/ppl/inference/vi_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import beanmachine.ppl as bm
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.vi import VariationalInfer
cpu_device = torch.device("cpu")
class NormalNormal:
def __init__(self, device: Optional[torch.device] = cpu_device):
self.device = device
@bm.random_variable
def mu(self):
return dist.Normal(
torch.zeros(1).to(self.device), 10 * torch.ones(1).to(self.device)
)
@bm.random_variable
def x(self, i):
return dist.Normal(self.mu(), torch.ones(1).to(self.device))
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="requires GPU access to train the model"
)
def test_normal_normal_guide_step_gpu():
device = torch.device("cuda:0")
model = NormalNormal(device=device)
@bm.param
def phi():
return torch.zeros(2).to(device) # mean, log std
@bm.random_variable
def q_mu():
params = phi()
return dist.Normal(params[0], params[1].exp())
world = VariationalInfer(
queries_to_guides={model.mu(): q_mu()},
observations={
model.x(1): torch.tensor(9.0),
model.x(2): torch.tensor(10.0),
},
optimizer=lambda params: torch.optim.Adam(params, lr=1e-1),
device=device,
).infer(
num_steps=1000,
)
mu_approx = world.get_variable(q_mu()).distribution
assert (mu_approx.mean - 9.6).norm() < 1.0
assert (mu_approx.stddev - 0.7).norm() < 0.3
| beanmachine-main | tests/ppl/inference/vi_gpu_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
from beanmachine.ppl.examples.conjugate_models import (
BetaBinomialModel,
CategoricalDirichletModel,
GammaNormalModel,
NormalNormalModel,
)
# A distribution which apparently takes values on the full number line,
# but in reality it only returns zero when sampled from.
class RealSupportDist(dist.Distribution):
has_enumerate_support = False
support = dist.constraints.real
has_rsample = True
arg_constraints = {}
# Ancestral sampling will only return zero.
def rsample(self, sample_shape):
return torch.zeros(sample_shape)
# Not a properly defined PDF on the full support, but allows MCMC to explore.
def log_prob(self, value):
return torch.zeros(value.shape)
# A distribution which apparently takes values on the non-negative number line,
# but in reality it only returns 1 when sampled from.
class HalfRealSupportDist(dist.Distribution):
has_enumerate_support = False
support = dist.constraints.greater_than(lower_bound=0.0)
has_rsample = True
arg_constraints = {}
# Ancestral sampling will only return one.
def rsample(self, sample_shape):
return torch.ones(sample_shape)
# Not a properly defined PDF on the full support, but allows MCMC to explore.
def log_prob(self, value):
return torch.zeros(value.shape)
# A distribution which apparently takes values on an interval of the number line,
# but in reality it only returns 1 when sampled from.
class IntervalRealSupportDist(dist.Distribution):
has_enumerate_support = False
support = dist.constraints.interval(lower_bound=2.0, upper_bound=20.0)
has_rsample = True
arg_constraints = {}
# Ancestral sampling will only return zero.
def rsample(self, sample_shape):
return 3 * torch.ones(sample_shape)
# Not a properly defined PDF on the full support, but allows MCMC to explore.
def log_prob(self, value):
return torch.zeros(value.shape)
# A distribution which apparently takes values on the non-negative integers,
# but in reality it only returns zero when sampled from.
class IntegerSupportDist(dist.Distribution):
has_enumerate_support = False
support = dist.constraints.integer_interval(0.0, 100.0)
has_rsample = True
arg_constraints = {}
# Ancestral sampling will only return zero.
def rsample(self, sample_shape):
return torch.zeros(sample_shape)
# Not a properly defined PDF on the full support, but allows MCMC to explore.
def log_prob(self, value):
return torch.zeros(value.shape)
class SingleSiteRandomWalkTest(unittest.TestCase):
"""
These tests test for the control flow which branches
based on node_distribution.support
"""
class RealSupportModel(object):
@bm.random_variable
def p(self):
return RealSupportDist()
@bm.random_variable
def q(self):
return dist.Normal(self.p(), torch.tensor(1.0))
class HalfRealSupportModel(object):
@bm.random_variable
def p(self):
return HalfRealSupportDist()
@bm.random_variable
def q(self):
return dist.Normal(self.p(), torch.tensor(1.0))
class IntervalRealSupportModel(object):
def __init__(self):
self.lower_bound = IntervalRealSupportDist().support.lower_bound
self.upper_bound = IntervalRealSupportDist().support.upper_bound
@bm.random_variable
def p(self):
return IntervalRealSupportDist()
@bm.random_variable
def q(self):
return dist.Normal(self.p(), torch.tensor(1.0))
class IntegerSupportModel(object):
@bm.random_variable
def p(self):
return IntegerSupportDist()
@bm.random_variable
def q(self):
return dist.Normal(self.p(), torch.tensor(1.0))
def test_single_site_random_walk_full_support(self):
model = self.RealSupportModel()
mh = bm.SingleSiteRandomWalk()
p_key = model.p()
queries = [p_key]
observations = {model.q(): torch.tensor(1.0)}
predictions = mh.infer(queries, observations, 100)
predictions = predictions.get_chain()[p_key]
"""
If the ancestral sampler is used, then every sample
drawn from the chain will be 0. This is by true by
the construction of the rsample function.
Conversely, normal noise != 0 w.p. 1, giving some sample which != 0.
For RealSupportModel, we expect the RW sampler to be used.
"""
self.assertIn(False, [0 == pred for pred in predictions])
def test_single_site_random_walk_half_support(self):
model = self.HalfRealSupportModel()
mh = bm.SingleSiteRandomWalk()
p_key = model.p()
queries = [p_key]
observations = {model.q(): torch.tensor(100.0)}
predictions = mh.infer(queries, observations, 100)
predictions = predictions.get_chain()[p_key]
# Discard the first sample, it may not be drawn from the node's distribution
predictions = predictions[1:]
"""
If the ancestral sampler is used, then every sample
drawn from the chain will be 1. This is by true by
the construction of the rsample function.
If RW is correctly reached by control flow, then rsample will
draw from a Gamma distribution.
"""
self.assertIn(False, [pred == 1 for pred in predictions])
def test_single_site_random_walk_interval_support(self):
lower_bound = IntervalRealSupportDist().support.lower_bound
upper_bound = IntervalRealSupportDist().support.upper_bound
# Test for a single item of evidence
def inner_fnc(evidence: float):
model = self.IntervalRealSupportModel()
mh = bm.SingleSiteRandomWalk()
p_key = model.p()
queries = [p_key]
observations = {model.q(): evidence.detach().clone()}
predictions = mh.infer(queries, observations, 20)
predictions = predictions.get_chain()[p_key]
"""
All generated samples should remain in the correct support
if the transform is computed properly
"""
self.assertNotIn(
False, [lower_bound <= pred <= upper_bound for pred in predictions]
)
# We're mostly interested in the boundary cases
evidences = torch.cat(
(
torch.linspace(lower_bound + 0.1, lower_bound + 1, 4),
torch.linspace(upper_bound - 1, upper_bound - 0.1, 4),
)
)
for e in evidences:
inner_fnc(e)
"""
Adaptive
"""
def test_single_site_adaptive_random_walk(self):
model = NormalNormalModel(
mu=torch.tensor(0.0), std=torch.tensor(1.0), sigma=torch.ones(1)
)
mh = bm.SingleSiteRandomWalk(step_size=4)
p_key = model.normal_p()
queries = [p_key]
observations = {model.normal(): torch.tensor(100.0)}
predictions = mh.infer(queries, observations, 100, num_adaptive_samples=30)
predictions = predictions.get_chain()[p_key]
self.assertIn(True, [45 < pred < 55 for pred in predictions])
"""
These tests test for quick approximate convergence in conjugate models.
"""
def test_single_site_random_walk_rate(self):
model = NormalNormalModel(
mu=torch.zeros(1), std=torch.ones(1), sigma=torch.ones(1)
)
mh = bm.SingleSiteRandomWalk(step_size=10)
p_key = model.normal_p()
queries = [p_key]
observations = {model.normal(): torch.tensor(100.0)}
predictions = mh.infer(queries, observations, 100)
predictions = predictions.get_chain()[p_key]
self.assertIn(True, [45 < pred < 55 for pred in predictions])
def test_single_site_random_walk_rate_vector(self):
model = NormalNormalModel(
mu=torch.zeros(2), std=torch.ones(2), sigma=torch.ones(2)
)
mh = bm.SingleSiteRandomWalk(step_size=10)
p_key = model.normal_p()
queries = [p_key]
observations = {model.normal(): torch.tensor([100.0, -100.0])}
predictions = mh.infer(queries, observations, 100)
predictions = predictions.get_chain()[p_key]
self.assertIn(True, [45 < pred[0] < 55 for pred in predictions])
self.assertIn(True, [-55 < pred[1] < -45 for pred in predictions])
def test_single_site_random_walk_half_support_rate(self):
model = GammaNormalModel(
shape=torch.ones(1), rate=torch.ones(1), mu=torch.ones(1)
)
mh = bm.SingleSiteRandomWalk(step_size=4.0)
p_key = model.gamma()
queries = [p_key]
observations = {model.normal(): torch.tensor([100.0])}
predictions = mh.infer(queries, observations, 100)
predictions = predictions.get_chain()[p_key]
"""
Our single piece of evidence is the observed value 100.
100 is a very large observation w.r.t our model of mu = 1. This
implies that the normal distirubtion has very high variance, so samples
from the Gamma distribution will have very small values in expectation.
For RWMH with large step size, we expect to see this in < 100 steps.
"""
self.assertIn(True, [pred < 0.01 for pred in predictions])
def test_single_site_random_walk_interval_support_rate(self):
model = BetaBinomialModel(
alpha=torch.ones(1) * 2.0, beta=torch.ones(1), n=torch.ones(1) * 10.0
)
mh = bm.SingleSiteRandomWalk(step_size=0.3)
p_key = model.theta()
queries = [p_key]
observations = {model.x(): torch.tensor([10.0])}
predictions = mh.infer(queries, observations, 50)
predictions = predictions.get_chain()[p_key]
"""
Our single piece of evidence is the observed value 10.
This is a large observation w.r.t our model . This
implies that the Binomial distirubtion has very large parameter p, so
samples from the Beta distribution will have similarly large values in
expectation. For RWMH with small step size, we expect to accept enough
proposals to reach this value in < 50 steps.
"""
self.assertIn(True, [pred > 0.9 for pred in predictions])
def test_single_site_random_walk_simplex_support_rate(self):
model = CategoricalDirichletModel(alpha=torch.tensor([1.0, 10.0]))
mh = bm.SingleSiteRandomWalk(step_size=1.0)
p_key = model.dirichlet()
queries = [p_key]
observations = {model.categorical(): torch.tensor([1.0, 1.0, 1.0])}
predictions = mh.infer(queries, observations, 50)
predictions = predictions.get_chain()[p_key]
"""
Our single piece of evidence is the observed value 1.
This is a large observation w.r.t the simplex, which has interval [0,1].
Based on our model, we expect that this evidence is drawn from
category 1 rather than category 0. So pred[0] << pred[1] typically.
"""
self.assertIn(True, [pred[0] < 0.1 for pred in predictions])
| beanmachine-main | tests/ppl/inference/single_site_random_walk_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
class IntegrationTest(unittest.TestCase):
class LogisticRegressionModel(object):
@bm.random_variable
def theta_0(self):
return dist.Normal(0.0, 1.0)
@bm.random_variable
def theta_1(self):
return dist.Normal(0.0, torch.ones(3))
@bm.random_variable
def y(self, X):
logits = (X * self.theta_1() + self.theta_0()).sum(-1)
return dist.Bernoulli(logits=logits)
def test_logistic_regression(self):
torch.manual_seed(1)
true_coefs = torch.tensor([1.0, 2.0, 3.0])
true_intercept = torch.tensor(1.0)
X = torch.randn(3000, 3)
Y = dist.Bernoulli(logits=(X * true_coefs + true_intercept).sum(-1)).sample()
model = self.LogisticRegressionModel()
nw = bm.SingleSiteNewtonianMonteCarlo()
samples_nw = nw.infer(
queries=[model.theta_1(), model.theta_0()],
observations={model.y(X): Y},
num_samples=1000,
num_chains=1,
)
coefs_mean = samples_nw[model.theta_1()].view(-1, 3).mean(0)
intercept_mean = samples_nw[model.theta_0()].view(-1).mean(0)
self.assertTrue(torch.isclose(coefs_mean, true_coefs, atol=0.15).all())
self.assertTrue(torch.isclose(intercept_mean, true_intercept, atol=0.15).all())
| beanmachine-main | tests/ppl/inference/inference_integration_test_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
from unittest.mock import patch
import beanmachine.ppl as bm
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.nuts_proposer import NUTSProposer
from beanmachine.ppl.inference.proposer.sequential_proposer import SequentialProposer
from beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import (
SingleSiteAncestralProposer,
)
from beanmachine.ppl.inference.proposer.single_site_uniform_proposer import (
SingleSiteUniformProposer,
)
from beanmachine.ppl.world import World
class SampleModel:
@bm.random_variable
def foo(self, i: int):
return dist.Beta(2.0, 2.0)
@bm.random_variable
def bar(self, i: int):
return dist.Bernoulli(self.foo(i))
@bm.random_variable
def baz(self):
return dist.Normal(self.bar(0) + self.bar(1), 1.0)
class ChangingSupportSameShapeModel:
# the support of `component` is changing, but (because we indexed alpha
# by k) all random_variables have the same shape
@bm.random_variable
def K(self):
return dist.Poisson(rate=2.0)
@bm.random_variable
def alpha(self, k):
return dist.Dirichlet(torch.ones(k))
@bm.random_variable
def component(self, i):
alpha = self.alpha(self.K().int().item() + 2)
return dist.Categorical(alpha)
class ChangingShapeModel:
# here since we did not index alpha, its shape in each world is changing
@bm.random_variable
def K(self):
return dist.Poisson(rate=2.0)
@bm.random_variable
def alpha(self):
return dist.Dirichlet(torch.ones(self.K().int().item() + 2))
@bm.random_variable
def component(self, i):
return dist.Categorical(self.alpha())
def test_inference_config():
model = SampleModel()
nuts = bm.GlobalNoUTurnSampler()
compositional = bm.CompositionalInference({model.foo: nuts})
queries = [model.foo(0), model.foo(1)]
observations = {model.baz(): torch.tensor(2.0)}
# verify that inference can run without error
compositional.infer(queries, observations, num_chains=1, num_samples=10)
# verify that proposers are spawned correctly
world = World.initialize_world(queries, observations)
with patch.object(nuts, "get_proposers", wraps=nuts.get_proposers) as mock:
proposers = compositional.get_proposers(
world, target_rvs=world.latent_nodes, num_adaptive_sample=0
)
# NUTS should receive {foo(0), foo(1)} as its target rvs
mock.assert_called_once_with(world, {model.foo(0), model.foo(1)}, 0)
# there should be one NUTS proposer for both foo(0) and foo(1), one ancestral MH
# proposer for bar(0), and another ancestral MH proposer for bar(1)
assert len(proposers) == 3
# TODO: find a way to validate the proposer instead of relying on the order of
# return value
assert isinstance(proposers[0], NUTSProposer)
assert proposers[0]._target_rvs == {model.foo(0), model.foo(1)}
# the rest of nodes are updated by default proposers (uniform proposer for bernoulli)
assert isinstance(proposers[1], SingleSiteUniformProposer)
assert isinstance(proposers[2], SingleSiteUniformProposer)
assert {proposers[1].node, proposers[2].node} == {model.bar(0), model.bar(1)}
# test overriding default kwarg
compositional = bm.CompositionalInference(
{
model.foo: bm.GlobalNoUTurnSampler(),
...: bm.SingleSiteAncestralMetropolisHastings(),
}
)
compositional.infer(queries, observations, num_chains=1, num_samples=2)
world = World.initialize_world(queries, observations)
proposers = compositional.get_proposers(
world, target_rvs=world.latent_nodes, num_adaptive_sample=0
)
assert isinstance(proposers[0], NUTSProposer)
assert isinstance(proposers[1], SingleSiteAncestralProposer)
assert isinstance(proposers[2], SingleSiteAncestralProposer)
assert {proposers[1].node, proposers[2].node} == {model.bar(0), model.bar(1)}
def test_config_inference_with_tuple_of_rv():
model = SampleModel()
nuts = bm.GlobalNoUTurnSampler()
compositional = bm.CompositionalInference({(model.foo, model.baz): nuts})
world = World.initialize_world([model.baz()], {})
with patch.object(nuts, "get_proposers", wraps=nuts.get_proposers) as mock:
compositional.get_proposers(
world, target_rvs=world.latent_nodes, num_adaptive_sample=10
)
# NUTS should receive {foo(0), foo(1), model.baz()} as its target rvs
mock.assert_called_once_with(
world, {model.foo(0), model.foo(1), model.baz()}, 10
)
def test_config_inference_with_tuple_of_inference():
model = SampleModel()
compositional = bm.CompositionalInference(
{
(model.foo, model.bar): (
bm.SingleSiteAncestralMetropolisHastings(),
bm.SingleSiteUniformMetropolisHastings(),
),
model.baz: bm.GlobalNoUTurnSampler(),
}
)
# verify that inference can run without error
compositional.infer([model.baz()], {}, num_chains=1, num_samples=10)
# examine the proposer types
world = World.initialize_world([model.baz()], {})
proposers = compositional.get_proposers(
world, target_rvs=world.latent_nodes, num_adaptive_sample=10
)
assert len(proposers) == 2
sequential_proposer = proposers[int(isinstance(proposers[0], NUTSProposer))]
assert isinstance(sequential_proposer, SequentialProposer)
assert len(sequential_proposer.proposers) == 4
proposer_count = Counter(map(type, sequential_proposer.proposers))
assert proposer_count[SingleSiteAncestralProposer] == 2
def test_nested_compositional_inference():
model = SampleModel()
ancestral_mh = bm.SingleSiteAncestralMetropolisHastings()
compositional = bm.CompositionalInference(
{
(model.foo, model.bar): bm.CompositionalInference(
{
model.foo: bm.GlobalNoUTurnSampler(),
# this ancestral mh class is never going to be invoked
model.baz: ancestral_mh,
}
)
}
)
with patch.object(
ancestral_mh, "get_proposers", wraps=ancestral_mh.get_proposers
) as mock:
# verify that inference can run without error
compositional.infer([model.baz()], {}, num_chains=1, num_samples=10)
# the ancestral_mh instance shouldn't been invoked at all
mock.assert_not_called()
def test_block_inference_with_default_algorithm():
model = SampleModel()
# block foo and baz together, but uses the default inference
compositional = bm.CompositionalInference({(model.foo, model.bar, model.baz): ...})
# make sure that things can run without failure
queries = [model.baz()]
observations = {}
compositional.infer(queries, observations, num_chains=1, num_samples=10)
# check to see if proposers are indeed blocked together
world = World.initialize_world(queries, observations)
proposers = compositional.get_proposers(world, world.latent_nodes, 0)
assert len(proposers) == 1
assert isinstance(proposers[0], SequentialProposer)
@pytest.mark.xfail(
raises=RuntimeError,
reason="Need to redesign how change in support is being handled in block inference",
)
def test_block_inference_changing_support():
model = ChangingSupportSameShapeModel()
queries = [model.K()] + [model.component(j) for j in range(3)]
compositional = bm.CompositionalInference(
{
(model.K, model.component): bm.SingleSiteAncestralMetropolisHastings(),
...: bm.SingleSiteNewtonianMonteCarlo(),
},
)
sampler = compositional.sampler(queries, {}, num_samples=10, num_adaptive_samples=5)
old_world = next(sampler)
for world in sampler: # this should run without failing
# since it's actually possible to sample two identical values, we need
# to check for tensor identity
if world[model.K()] is not old_world[model.K()]:
# if one of the node in a block is updated, the rest of the nodes should
# also been updated
for i in range(3):
assert world[model.component(i)] is not old_world[model.component(i)]
else:
# just as a sanity check to show that the tensor identity check is doing
# what we expected
assert world[model.component(0)] is old_world[model.component(0)]
old_world = world
# disable NNC because changing support => non-static model
compositional = bm.CompositionalInference(
{(model.K, model.component): bm.SingleSiteAncestralMetropolisHastings()},
nnc_compile=False,
)
sampler = compositional.sampler(queries, {})
with pytest.raises(KeyError):
world = next(sampler)
# since the support of poisson is all natural numbers, it's possible that we
# sample a new alue of K that's 1 greater than current one....
K_val = world.call(model.K())
new_world = world.replace({model.K(): K_val + 1})
# Since NUTS only supports static model, this is going to raise an error
# TODO: this error is thrown in hmc_utils when fetching
# transforms but should be checked earlier in the model
sampler.send(new_world)
def test_block_inference_changing_shape():
model = ChangingShapeModel()
queries = [model.K()] + [model.component(j) for j in range(3)]
# disable NNC because changing shape => non-static model
compositional = bm.CompositionalInference(nnc_compile=False)
# cannot perform inference since the shape of alpha can change if the value
# of K changes
with pytest.raises(RuntimeError):
compositional.infer(queries, {}, num_samples=10, num_chains=1)
def test_default_num_adaptive_samples():
model = SampleModel()
num_samples = 100
compositional = bm.CompositionalInference(
{
model.bar: bm.SingleSiteAncestralMetropolisHastings(),
...: bm.SingleSiteRandomWalk(),
}
)
# none of the method in compositional requires adaptation, so default to 0
assert compositional._get_default_num_adaptive_samples(num_samples) == 0
compositional = bm.CompositionalInference(
{
model.foo: bm.GlobalNoUTurnSampler(),
model.bar: bm.SingleSiteAncestralMetropolisHastings(),
}
)
# default to num_samples // 2 due to NUTS' default
assert (
compositional._get_default_num_adaptive_samples(num_samples) == num_samples // 2
)
| beanmachine-main | tests/ppl/inference/compositional_infer_test.py |
beanmachine-main | tests/ppl/inference/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
class SampleModel:
@bm.random_variable
def foo(self):
return dist.Normal(0.0, 1.0)
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), 1.0)
def test_sampler():
model = SampleModel()
nuts = bm.GlobalNoUTurnSampler()
queries = [model.foo()]
observations = {model.bar(): torch.tensor(0.5)}
num_samples = 10
sampler = nuts.sampler(queries, observations, num_samples, num_adaptive_samples=0)
worlds = list(sampler)
assert len(worlds) == num_samples
for world in worlds:
assert model.foo() in world
with world:
assert isinstance(model.foo(), torch.Tensor)
def test_two_samplers():
model = SampleModel()
queries = [model.foo()]
observations = {model.bar(): torch.tensor(0.5)}
nuts_sampler = bm.GlobalNoUTurnSampler().sampler(queries, observations)
hmc_sampler = bm.GlobalHamiltonianMonteCarlo(1.0).sampler(queries, observations)
world = next(nuts_sampler)
# it's possible to use multiple sampler interchangably to update the worlds (or
# in general, pass a new world to sampler and continue inference with existing
# hyperparameters)
for _ in range(3):
world = hmc_sampler.send(world)
world = nuts_sampler.send(world)
assert model.foo() in world
assert model.bar() in world
| beanmachine-main | tests/ppl/inference/sampler_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
class SingleSiteRandomWalkConjugateTest(unittest.TestCase, AbstractConjugateTests):
def setUp(self):
self.mh = bm.SingleSiteRandomWalk(step_size=1.0)
def test_beta_binomial_conjugate_run(self):
mh = bm.SingleSiteRandomWalk(step_size=0.3)
self.beta_binomial_conjugate_run(mh, num_samples=5000)
def test_gamma_gamma_conjugate_run(self):
self.gamma_gamma_conjugate_run(self.mh, num_samples=10000)
def test_gamma_normal_conjugate_run(self):
self.gamma_normal_conjugate_run(self.mh, num_samples=10000)
def test_normal_normal_conjugate_run(self):
mh = bm.SingleSiteRandomWalk(step_size=1.5)
self.normal_normal_conjugate_run(mh, num_samples=1000)
def test_dirichlet_categorical_conjugate_run(self):
self.dirichlet_categorical_conjugate_run(self.mh, num_samples=10000)
| beanmachine-main | tests/ppl/inference/single_site_random_walk_conjugate_test_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import beanmachine.ppl as bm
import pytest
import torch
import torch.distributions as dist
@bm.random_variable
def f():
return 123 # BAD, needs to be a distribution
@bm.random_variable
def g(n):
pass
@bm.functional
def h():
return 123 # BAD; needs to be a tensor
@bm.random_variable
def flip():
return dist.Bernoulli(0.5)
class ErrorDist(torch.distributions.Distribution):
arg_constraints = {}
support = torch.distributions.constraints.real
def __init__(self):
self.counter = 0
super().__init__()
def sample(self):
self.counter += 1
if self.counter == 20:
# throw error
torch.linalg.cholesky(torch.zeros(3, 3))
return torch.randn(1)
def log_prob(self, value):
self.counter += 1
if self.counter == 5:
torch.linalg.cholesky(torch.zeros(3, 3))
return -(value**2)
@bm.random_variable
def bad():
return ErrorDist()
def test_inference_error_reporting():
mh = bm.SingleSiteAncestralMetropolisHastings()
with pytest.raises(TypeError) as ex:
mh.infer(None, {}, 10)
assert (
str(ex.value)
== "Parameter 'queries' is required to be a list but is of type NoneType."
)
with pytest.raises(TypeError) as ex:
mh.infer([], 123, 10)
assert (
str(ex.value)
== "Parameter 'observations' is required to be a dictionary but is of type int."
)
# Should be f():
with pytest.raises(TypeError) as ex:
mh.infer([f], {}, 10)
assert (
str(ex.value)
== "A query is required to be a random variable but is of type function."
)
# Should be f():
with pytest.raises(TypeError) as ex:
mh.infer([f()], {f: torch.tensor(True)}, 10)
assert (
str(ex.value)
== "An observation is required to be a random variable but is of type function."
)
# Should be a tensor
with pytest.raises(TypeError) as ex:
mh.infer([f()], {f(): 123.0}, 10)
assert (
str(ex.value)
== "An observed value is required to be a tensor but is of type float."
)
# You can't make inferences on rv-of-rv
with pytest.raises(TypeError) as ex:
mh.infer([g(f())], {}, 10)
assert str(ex.value) == "The arguments to a query must not be random variables."
# You can't make inferences on rv-of-rv
with pytest.raises(TypeError) as ex:
mh.infer([f()], {g(f()): torch.tensor(123)}, 10)
assert (
str(ex.value) == "The arguments to an observation must not be random variables."
)
# SSAMH requires that observations must be of random variables, not
# functionals
with pytest.raises(TypeError) as ex:
mh.infer([f()], {h(): torch.tensor(123)}, 10)
assert (
str(ex.value)
== "An observation must observe a random_variable, not a functional."
)
# A functional is required to return a tensor.
with pytest.raises(TypeError) as ex:
mh.infer([h()], {}, 10)
assert str(ex.value) == "The value returned by a queried function must be a tensor."
# A random_variable is required to return a distribution
with pytest.raises(TypeError) as ex:
mh.infer([f()], {}, 10)
assert str(ex.value) == "A random_variable is required to return a distribution."
# The lookup key to the samples object is required to be an RVID.
with pytest.raises(TypeError) as ex:
mh.infer([flip()], {}, 10)[flip]
assert (
str(ex.value)
== "The key is required to be a random variable but is of type function."
)
def test_handle_cholesky_error():
mh = bm.SingleSiteAncestralMetropolisHastings()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning
samples = mh.infer([bad()], {}, 20, num_chains=1)
# Verify that the warning is triggered
assert len(w) == 1
assert "Proposal rejected" in str(w[-1])
# Verify that the inference finishes with the right number of samples
assert samples[bad()].shape == (1, 20, 1)
def test_cholesky_error_nuts_adaptation():
nuts = bm.SingleSiteNoUTurnSampler()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning
samples = nuts.infer([bad()], {}, 20, num_chains=1, num_adaptive_samples=30)
# Verify that the warning is triggered
assert len(w) == 1
assert "Numerical error" in str(w[-1])
# Verify that the inference finishes with the right number of samples
assert samples[bad()].shape == (1, 20, 1)
| beanmachine-main | tests/ppl/inference/inference_error_reporting_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
class PredictiveTest(unittest.TestCase):
@bm.random_variable
def prior(self):
return dist.Uniform(torch.tensor(0.0), torch.tensor(1.0))
@bm.random_variable
def likelihood(self):
return dist.Bernoulli(self.prior())
@bm.random_variable
def likelihood_i(self, i):
return dist.Bernoulli(self.prior())
@bm.random_variable
def prior_1(self):
return dist.Uniform(torch.tensor([0.0]), torch.tensor([1.0]))
@bm.random_variable
def likelihood_1(self):
return dist.Bernoulli(self.prior_1())
@bm.random_variable
def likelihood_dynamic(self, i):
if self.likelihood_i(i).item() > 0:
return dist.Normal(torch.zeros(1), torch.ones(1))
else:
return dist.Normal(5.0 * torch.ones(1), torch.ones(1))
@bm.random_variable
def prior_2(self):
return dist.Uniform(torch.zeros(1, 2), torch.ones(1, 2))
@bm.random_variable
def likelihood_2(self, i):
return dist.Bernoulli(self.prior_2())
@bm.random_variable
def likelihood_2_vec(self, i):
return dist.Bernoulli(self.prior_2())
@bm.random_variable
def likelihood_reg(self, x):
return dist.Normal(self.prior() * x, torch.tensor(1.0))
def test_prior_predictive(self):
queries = [self.prior(), self.likelihood()]
predictives = bm.simulate(queries, num_samples=10)
assert predictives[self.prior()].shape == (1, 10)
assert predictives[self.likelihood()].shape == (1, 10)
def test_posterior_predictive(self):
obs = {
self.likelihood_i(0): torch.tensor(1.0),
self.likelihood_i(1): torch.tensor(0.0),
}
post_samples = bm.SingleSiteAncestralMetropolisHastings().infer(
[self.prior()], obs, num_samples=10, num_chains=2
)
assert post_samples[self.prior()].shape == (2, 10)
predictives = bm.simulate(list(obs.keys()), post_samples, vectorized=True)
assert predictives[self.likelihood_i(0)].shape == (2, 10)
assert predictives[self.likelihood_i(1)].shape == (2, 10)
def test_posterior_predictive_seq(self):
obs = {
self.likelihood_i(0): torch.tensor(1.0),
self.likelihood_i(1): torch.tensor(0.0),
}
post_samples = bm.SingleSiteAncestralMetropolisHastings().infer(
[self.prior()], obs, num_samples=10, num_chains=2
)
assert post_samples[self.prior()].shape == (2, 10)
predictives = bm.simulate(list(obs.keys()), post_samples, vectorized=False)
assert predictives[self.likelihood_i(0)].shape == (2, 10)
assert predictives[self.likelihood_i(1)].shape == (2, 10)
def test_predictive_dynamic(self):
obs = {
self.likelihood_dynamic(0): torch.tensor([0.9]),
self.likelihood_dynamic(1): torch.tensor([4.9]),
}
# only query one of the variables
post_samples = bm.SingleSiteAncestralMetropolisHastings().infer(
[self.prior()], obs, num_samples=10, num_chains=2
)
assert post_samples[self.prior()].shape == (2, 10)
predictives = bm.simulate(list(obs.keys()), post_samples, vectorized=False)
assert predictives[self.likelihood_dynamic(0)].shape == (2, 10)
assert predictives[self.likelihood_dynamic(1)].shape == (2, 10)
def test_predictive_data(self):
x = torch.randn(4)
y = torch.randn(4) + 2.0
obs = {self.likelihood_reg(x): y}
post_samples = bm.SingleSiteAncestralMetropolisHastings().infer(
[self.prior()], obs, num_samples=10, num_chains=2
)
assert post_samples[self.prior()].shape == (2, 10)
test_x = torch.randn(4, 1, 1)
test_query = self.likelihood_reg(test_x)
predictives = bm.simulate([test_query], post_samples, vectorized=True)
assert predictives[test_query].shape == (4, 2, 10)
def test_posterior_predictive_1d(self):
obs = {self.likelihood_1(): torch.tensor([1.0])}
post_samples = bm.SingleSiteAncestralMetropolisHastings().infer(
[self.prior_1()], obs, num_samples=10, num_chains=1
)
assert post_samples[self.prior_1()].shape == (1, 10, 1)
predictives = bm.simulate(list(obs.keys()), post_samples, vectorized=True)
y = predictives[self.likelihood_1()].shape
assert y == (1, 10, 1)
def test_multi_chain_infer_predictive_2d(self):
torch.manual_seed(10)
obs = {
self.likelihood_2(0): torch.tensor([[1.0, 1.0]]),
self.likelihood_2(1): torch.tensor([[0.0, 1.0]]),
}
post_samples = bm.SingleSiteAncestralMetropolisHastings().infer(
[self.prior_2()], obs, num_samples=10, num_chains=2
)
assert post_samples[self.prior_2()].shape == (2, 10, 1, 2)
predictives = bm.simulate(list(obs.keys()), post_samples, vectorized=True)
predictive_0 = predictives[self.likelihood_2(0)]
predictive_1 = predictives[self.likelihood_2(1)]
assert predictive_0.shape == (2, 10, 1, 2)
assert predictive_1.shape == (2, 10, 1, 2)
assert (predictive_1 - predictive_0).sum().item() != 0
def test_empirical(self):
obs = {
self.likelihood_i(0): torch.tensor(1.0),
self.likelihood_i(1): torch.tensor(0.0),
self.likelihood_i(2): torch.tensor(0.0),
}
post_samples = bm.SingleSiteAncestralMetropolisHastings().infer(
[self.prior()], obs, num_samples=10, num_chains=4
)
empirical = bm.empirical([self.prior()], post_samples, num_samples=26)
assert empirical[self.prior()].shape == (1, 26)
predictives = bm.simulate(list(obs.keys()), post_samples, vectorized=True)
empirical = bm.empirical(list(obs.keys()), predictives, num_samples=27)
assert len(empirical) == 3
assert empirical[self.likelihood_i(0)].shape == (1, 27)
assert empirical[self.likelihood_i(1)].shape == (1, 27)
def test_return_inference_data(self):
torch.manual_seed(10)
obs = {
self.likelihood_2(0): torch.tensor([[1.0, 1.0]]),
self.likelihood_2(1): torch.tensor([[0.0, 1.0]]),
}
post_samples = bm.SingleSiteAncestralMetropolisHastings().infer(
[self.prior_2()], obs, num_samples=10, num_chains=2
)
assert post_samples[self.prior_2()].shape == (2, 10, 1, 2)
predictives = bm.simulate(
list(obs.keys()),
post_samples,
vectorized=True,
).to_inference_data()
assert "posterior" in predictives
assert "observed_data" in predictives
assert "log_likelihood" in predictives
assert "posterior_predictive" in predictives
assert predictives.posterior_predictive[self.likelihood_2(0)].shape == (
2,
10,
1,
2,
)
assert predictives.posterior_predictive[self.likelihood_2(1)].shape == (
2,
10,
1,
2,
)
def test_posterior_dict(self):
obs = {
self.likelihood_i(0): torch.tensor(1.0),
self.likelihood_i(1): torch.tensor(0.0),
}
posterior = {self.prior(): torch.tensor([0.5, 0.5])}
predictives_dict = bm.simulate(list(obs.keys()), posterior)
assert predictives_dict[self.likelihood_i(0)].shape == (1, 2)
assert predictives_dict[self.likelihood_i(1)].shape == (1, 2)
def test_posterior_dict_predictive(self):
obs = {
self.likelihood_i(0): torch.tensor(1.0),
self.likelihood_i(1): torch.tensor(0.0),
}
post_samples = bm.SingleSiteAncestralMetropolisHastings().infer(
[self.prior()], obs, num_samples=10, num_chains=1
)
assert post_samples[self.prior()].shape == (1, 10)
post_samples_dict = dict(post_samples)
predictives_dict = bm.simulate(list(obs.keys()), post_samples_dict)
assert predictives_dict[self.likelihood_i(0)].shape == (1, 10)
assert predictives_dict[self.likelihood_i(1)].shape == (1, 10)
| beanmachine-main | tests/ppl/inference/predictive_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from sys import float_info
import torch.distributions as dist
from beanmachine.ppl.testlib.hypothesis_testing import (
inverse_normal_cdf,
mean_equality_hypothesis_confidence_interval,
mean_equality_hypothesis_test,
)
from numpy import sqrt
from torch import manual_seed, mean, tensor
class HypothesisTestingTest(unittest.TestCase):
"""This class tests the hypothesis test codes."""
# Uniformly distributed random numbers
def random(self, min_bound, max_bound):
# TODO: Consider replacing with: (max_bound - min_bound) * torch.rand(size) + min_bound
# where size = (max_bound+min_bound).size()
return dist.uniform.Uniform(min_bound, max_bound).sample()
# Determining the range of floating point values we will explore
def float_exponent_range(self, safety_factor=10):
"""Provided exponents for range of floating point
numbers we are willing to test. The parameter
safety_factor should always be greater than 1, and
is used to avoid pressing extreme values."""
exp_min = float_info.min_10_exp / safety_factor
exp_max = float_info.max_10_exp / safety_factor
return exp_min, exp_max
# Determining the range of distribution of means and stds we will explore
def random_mean_and_std(self, exp_min, exp_max):
"""Generates a mean and std from a `reasonable` range
of possible test values. Please note that this generator
is by no means `exhaustive`. The purpose of the method
is to simply provide a based set of values for checking
our basic hypothesis tests."""
exp_mean = self.random(exp_min, exp_max)
exp_std = exp_mean + self.random(-3, 3)
true_mean = self.random(-1, 1) * 10**exp_mean
true_std = self.random(0, 1) * 10**exp_std
return true_mean, true_std
# Main procedure for testing the hypothesis test
# It works by checking the significance level (alpha) semantics
# of the mean equality hypothesis test.
def run_mean_equality_hypothesis_test_on_synthetic_samples(
self, samples, sample_size, alpha, random_seed=42
):
"""Generates as many samples as provided by the parameter of that
name, and performs the mean_equality_hypothesis_test
on each of these samples. Since we use the mean and standard
devaiation of the distribution, which are known, the hypothesis
test *should* faile at a rate fo alpha. In order for this to be
checked, we return the observed_alpha rate. In addition, we check
that the hypothesis_test to confidence_interval methods are consistent,
and return a count of any potential discrepancies between them."""
manual_seed(random_seed)
accepted_test = 0
exp_min, exp_max = self.float_exponent_range()
for _ in range(0, samples):
true_mean, true_std = self.random_mean_and_std(exp_min, exp_max)
d = dist.normal.Normal(loc=true_mean, scale=true_std)
sample_size = tensor([sample_size])
r = d.sample(sample_size)
sample_mean = mean(r)
# Record hypothesis_test_behavior for this single sample
accept_test = mean_equality_hypothesis_test(
sample_mean, true_mean, true_std, sample_size, alpha
)
if accept_test:
accepted_test += 1
# Compare hypothesis_test to confidence_interval
lower_bound, upper_bound = mean_equality_hypothesis_confidence_interval(
true_mean, true_std, sample_size, alpha
)
below_upper = (lower_bound <= sample_mean).all()
above_lower = (sample_mean <= upper_bound).all()
accept_interval = below_upper and above_lower
# accept_interval = min(lower_bound <= sample_mean <= upper_bound).item()
self.assertFalse(
accept_test and not accept_interval, "Interval can be too small"
)
self.assertFalse(
accept_interval and not accept_test, "Interval can be too big"
)
observed_alpha = 1 - accepted_test / samples
return observed_alpha
# Test function for the hypothesis test. Normal operation is to
# take no arguments. Auditing can be done by changing the random_seed.
# An audit would pass if the test returns False for only an alpha
# fraction of the random_seeds on average. Since this is a stochastic
# correctness criteria, we use alpha_meta for this (meta-)test.
def test_mean_equality_hypothesis_test(
self, runs=1000, samples=100, alpha=0.01, alpha_meta=0.01, random_seed=42
):
"""Check that the hypothesis tests are working as expected,
that is, their promised alpha is about the same as the rate at which
they fail. The idea here is that we run a series of checks, and treat
this as a binomial distribution.
Note, the alpha_meta for this test should not be confused with the
alpha of the individual tests.
Yes, this method is using hypothesis testing to test our hypothesis
testing method. We call this a meta-test.
Note:
1) We do the meta-test multiple times (runs)
2) Each meta-test is a Bernoulli trial. The probability of failure
should be exactly alpha.
3) We check that the total runs of the meta-test have an observed
failure rate that is equal to alpha. We do this by checking
that it falls within the alpha_meta CI.
"""
observed_alphas = [
self.run_mean_equality_hypothesis_test_on_synthetic_samples(
samples=samples,
sample_size=100,
alpha=alpha,
random_seed=(random_seed + i) * i,
)
for i in range(0, runs)
]
# Meta-test
true_mean = alpha # For binomial meta-test distribution
true_std = sqrt(alpha * (1 - alpha))
bound = inverse_normal_cdf(1 - alpha_meta / 2)
binomial_results = [
-bound <= (observed_alpha - true_mean) * sqrt(samples) / true_std <= bound
for observed_alpha in observed_alphas
]
# Notice that the meta-tests gives us a series of booleans. How do we interpret
# those? That's what we need the meta-meta-test
# Meta-meta-test.
true_mean = (
1 - alpha_meta
) # So, we'll use alpha_meta for both the meta- and meta-meta- test
true_std = sqrt(alpha_meta * (1 - alpha_meta))
observed_mean = sum(binomial_results) / runs
bound = inverse_normal_cdf(
1 - alpha_meta / 2
) # Yes, this is the same as for meta-test
meta_meta_test = (
-bound <= (observed_mean - true_mean) * sqrt(runs) / true_std <= bound
)
message = str(
(
"true_mean = "
+ str(true_mean)
+ ", "
+ " observed_mean = "
+ str(observed_mean)
+ ", "
+ "adjusted_bound = "
+ str(bound * true_std / sqrt(runs)),
)
)
self.assertTrue(
meta_meta_test,
"Unable to confirm significance level (alpha) semantics: " + message,
)
| beanmachine-main | tests/ppl/inference/hypothesis_testing_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
from beanmachine.ppl.inference import SingleSiteNewtonianMonteCarlo
from beanmachine.ppl.inference.proposer.nmc import (
SingleSiteHalfSpaceNMCProposer,
SingleSiteRealSpaceNMCProposer,
SingleSiteSimplexSpaceNMCProposer,
)
from beanmachine.ppl.world.utils import BetaDimensionTransform
from beanmachine.ppl.world.world import World
from torch import tensor
class SingleSiteNewtonianMonteCarloTest(unittest.TestCase):
class SampleNormalModel:
@bm.random_variable
def foo(self):
return dist.Normal(tensor(2.0), tensor(2.0))
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), torch.tensor(1.0))
class SampleTransformModel:
@bm.random_variable
def realspace(self):
return dist.Normal(tensor(0.0), tensor(1.0))
@bm.random_variable
def halfspace(self):
return dist.Gamma(tensor(2.0), tensor(2.0))
@bm.random_variable
def simplex(self):
return dist.Dirichlet(tensor([0.1, 0.9]))
@bm.random_variable
def interval(self):
return dist.Uniform(tensor(1.0), tensor(3.0))
@bm.random_variable
def beta(self):
return dist.Beta(tensor(1.0), tensor(1.0))
class SampleShapeModel:
@bm.random_variable
def realspace(self):
return dist.Normal(torch.zeros(2, 4), tensor(1.0))
@bm.random_variable
def halfspace(self):
return dist.Gamma(torch.zeros(1, 2, 4) + tensor(2.0), tensor(2.0))
@bm.random_variable
def simplex(self):
return dist.Dirichlet(tensor([0.1, 0.9]))
@bm.random_variable
def interval(self):
return dist.Uniform(tensor(1.0), tensor(3.0))
@bm.random_variable
def beta(self):
return dist.Beta(tensor([1.0, 2.0, 3.0]), tensor([1.0, 2.0, 3.0]))
class SampleIndependentShapeModel:
@bm.random_variable
def realspace(self):
return dist.Independent(dist.Normal(torch.zeros(2, 4), tensor(1.0)), 1)
@bm.random_variable
def halfspace(self):
return dist.Independent(
dist.Gamma(torch.zeros(1, 2, 4) + tensor(2.0), tensor(2.0)), 1
)
@bm.random_variable
def simplex(self):
return dist.Independent(dist.Dirichlet(tensor([[0.1, 0.9], [0.1, 0.9]])), 1)
@bm.random_variable
def interval(self):
return dist.Independent(
dist.Uniform(tensor([1.0, 1.0]), tensor([3.0, 3.0])), 1
)
@bm.random_variable
def beta(self):
return dist.Independent(
dist.Beta(tensor([1.0, 2.0, 3.0]), tensor([1.0, 2.0, 3.0])), 1
)
class SampleStudentTModel:
@bm.random_variable
def x(self):
return dist.StudentT(df=2.0)
def test_single_site_newtonian_monte_carlo_student_t(self):
model = self.SampleStudentTModel()
samples = (
bm.SingleSiteNewtonianMonteCarlo()
.infer(
queries=[model.x()],
observations={},
num_samples=1_000,
num_chains=1,
)
.get_chain(0)[model.x()]
)
self.assertTrue((samples.abs() > 2.0).any())
def test_single_site_newtonian_monte_carlo_no_transform(self):
model = self.SampleTransformModel()
nw = bm.SingleSiteNewtonianMonteCarlo()
real_key = model.realspace()
half_key = model.halfspace()
simplex_key = model.simplex()
interval_key = model.interval()
beta_key = model.beta()
queries = [
model.realspace(),
model.halfspace(),
model.simplex(),
model.interval(),
model.beta(),
]
observations = {}
world = World.initialize_world(queries, observations)
self.assertTrue(real_key in world)
self.assertTrue(half_key in world)
self.assertTrue(simplex_key in world)
self.assertTrue(interval_key in world)
self.assertTrue(beta_key in world)
# trigger proposer initialization
nw.get_proposers(world, world.latent_nodes, 0)
# test that resulting shapes of proposed values are correct
proposer = nw._proposers[real_key]
proposed_value = proposer.propose(world)[0][real_key]
self.assertIsInstance(
proposer,
SingleSiteRealSpaceNMCProposer,
)
self.assertEqual(proposed_value.shape, torch.Size([]))
proposer = nw._proposers[half_key]
proposed_value = proposer.propose(world)[0][half_key]
self.assertIsInstance(
proposer,
SingleSiteHalfSpaceNMCProposer,
)
self.assertEqual(proposed_value.shape, torch.Size([]))
proposer = nw._proposers[simplex_key]
proposed_value = proposer.propose(world)[0][simplex_key]
self.assertIsInstance(
proposer,
SingleSiteSimplexSpaceNMCProposer,
)
self.assertEqual(proposed_value.shape, torch.zeros(2).shape)
proposer = nw._proposers[interval_key]
proposed_value = proposer.propose(world)[0][interval_key]
self.assertEqual(proposed_value.shape, torch.Size([]))
proposer = nw._proposers[beta_key]
proposed_value = proposer.propose(world)[0][beta_key]
self.assertIsInstance(proposer, SingleSiteSimplexSpaceNMCProposer)
self.assertEqual(proposed_value.shape, torch.Size([]))
self.assertEqual(
proposer._transform,
BetaDimensionTransform(),
)
def test_single_site_newtonian_monte_carlo_transform_shape(self):
model = self.SampleShapeModel()
nw = SingleSiteNewtonianMonteCarlo()
real_key = model.realspace()
half_key = model.halfspace()
simplex_key = model.simplex()
interval_key = model.interval()
beta_key = model.beta()
queries = [
model.realspace(),
model.halfspace(),
model.simplex(),
model.interval(),
model.beta(),
]
observations = {}
world = World.initialize_world(queries, observations)
self.assertTrue(real_key in world)
self.assertTrue(half_key in world)
self.assertTrue(simplex_key in world)
self.assertTrue(interval_key in world)
self.assertTrue(beta_key in world)
# trigger proposer initialization
nw.get_proposers(world, world.latent_nodes, 0)
# test that resulting shapes of proposed values are correct
proposer = nw._proposers[real_key]
proposed_value = proposer.propose(world)[0][real_key]
self.assertEqual(proposed_value.shape, torch.Size([2, 4]))
proposer = nw._proposers[half_key]
proposed_value = proposer.propose(world)[0][half_key]
self.assertEqual(proposed_value.shape, torch.Size([1, 2, 4]))
proposer = nw._proposers[simplex_key]
proposed_value = proposer.propose(world)[0][simplex_key]
self.assertEqual(proposed_value.shape, torch.Size([2]))
proposer = nw._proposers[interval_key]
proposed_value = proposer.propose(world)[0][interval_key]
self.assertEqual(proposed_value.shape, torch.Size([]))
proposer = nw._proposers[beta_key]
proposed_value = proposer.propose(world)[0][beta_key]
self.assertEqual(proposed_value.shape, torch.Size([3]))
def test_single_site_newtonian_monte_carlo_no_transform_independent_shape(self):
model = self.SampleIndependentShapeModel()
nw = bm.SingleSiteNewtonianMonteCarlo()
real_key = model.realspace()
half_key = model.halfspace()
simplex_key = model.simplex()
interval_key = model.interval()
beta_key = model.beta()
queries = [
real_key,
half_key,
simplex_key,
interval_key,
beta_key,
]
observations = {}
world = World.initialize_world(queries, observations)
# trigger proposer initialization
nw.get_proposers(world, world.latent_nodes, 0)
# test that resulting shapes of proposed values are correct
proposer = nw._proposers[real_key]
proposed_value = proposer.propose(world)[0][real_key]
self.assertIsInstance(
proposer,
SingleSiteRealSpaceNMCProposer,
)
self.assertEqual(proposed_value.shape, torch.Size([2, 4]))
proposer = nw._proposers[half_key]
proposed_value = proposer.propose(world)[0][half_key]
self.assertIsInstance(
proposer,
SingleSiteHalfSpaceNMCProposer,
)
self.assertEqual(proposed_value.shape, torch.Size([1, 2, 4]))
proposer = nw._proposers[simplex_key]
proposed_value = proposer.propose(world)[0][simplex_key]
self.assertIsInstance(
proposer,
SingleSiteSimplexSpaceNMCProposer,
)
self.assertEqual(proposed_value.shape, torch.Size([2, 2]))
proposer = nw._proposers[interval_key]
proposed_value = proposer.propose(world)[0][interval_key]
self.assertEqual(proposed_value.shape, torch.Size([2]))
proposer = nw._proposers[beta_key]
proposed_value = proposer.propose(world)[0][beta_key]
self.assertIsInstance(
proposer,
SingleSiteSimplexSpaceNMCProposer,
)
self.assertEqual(proposed_value.shape, torch.Size([3]))
| beanmachine-main | tests/ppl/inference/single_site_newtonian_monte_carlo_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
class SingleSiteHamiltonianMonteCarloConjugateTest(
unittest.TestCase, AbstractConjugateTests
):
def test_beta_binomial_conjugate_run(self):
hmc = bm.SingleSiteHamiltonianMonteCarlo(0.5, 0.05)
self.beta_binomial_conjugate_run(hmc, num_samples=500, num_adaptive_samples=500)
def test_gamma_gamma_conjugate_run(self):
hmc = bm.SingleSiteHamiltonianMonteCarlo(0.5, 0.05)
self.gamma_gamma_conjugate_run(hmc, num_samples=500, num_adaptive_samples=500)
def test_gamma_normal_conjugate_run(self):
hmc = bm.SingleSiteHamiltonianMonteCarlo(0.5, 0.05)
self.gamma_normal_conjugate_run(hmc, num_samples=500, num_adaptive_samples=500)
def test_normal_normal_conjugate_run(self):
hmc = bm.SingleSiteHamiltonianMonteCarlo(1.0, 0.05)
self.normal_normal_conjugate_run(hmc, num_samples=500, num_adaptive_samples=500)
@unittest.skip("Known to fail. Investigating in T77865889.")
def test_dirichlet_categorical_conjugate_run(self):
hmc = bm.SingleSiteHamiltonianMonteCarlo(0.1, 0.01)
self.dirichlet_categorical_conjugate_run(
hmc, num_samples=500, num_adaptive_samples=500
)
| beanmachine-main | tests/ppl/inference/single_site_hamiltonian_monte_carlo_conjugate_test_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
class SingleSiteAncestralMetropolisHastingsConjugateTest(
unittest.TestCase, AbstractConjugateTests
):
def setUp(self):
self.mh = bm.SingleSiteAncestralMetropolisHastings()
def test_beta_binomial_conjugate_run(self):
self.beta_binomial_conjugate_run(self.mh)
def test_gamma_gamma_conjugate_run(self):
self.gamma_gamma_conjugate_run(self.mh, random_seed=123)
def test_gamma_normal_conjugate_run(self):
self.gamma_normal_conjugate_run(self.mh, num_samples=20000)
def test_normal_normal_conjugate_run(self):
self.normal_normal_conjugate_run(self.mh, num_samples=5000)
def test_dirichlet_categorical_conjugate_run(self):
self.dirichlet_categorical_conjugate_run(self.mh, num_samples=10000)
| beanmachine-main | tests/ppl/inference/single_site_ancestral_mh_conjugate_test_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from beanmachine.ppl.inference.compositional_infer import CompositionalInference
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
class CompositionalInferenceConjugateTest(unittest.TestCase, AbstractConjugateTests):
def setUp(self):
self.mh = CompositionalInference()
def test_beta_binomial_conjugate_run(self):
self.beta_binomial_conjugate_run(self.mh)
def test_gamma_gamma_conjugate_run(self):
self.gamma_gamma_conjugate_run(self.mh)
def test_gamma_normal_conjugate_run(self):
self.gamma_normal_conjugate_run(self.mh)
def test_normal_normal_conjugate_run(self):
self.normal_normal_conjugate_run(self.mh)
def test_dirichlet_categorical_conjugate_run(self):
self.dirichlet_categorical_conjugate_run(self.mh)
| beanmachine-main | tests/ppl/inference/compositional_infer_conjugate_test_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.hmc_proposer import HMCProposer
from beanmachine.ppl.world import World
@bm.random_variable
def foo():
return dist.Uniform(0.0, 1.0)
@bm.random_variable
def bar():
return dist.Normal(foo(), 1.0)
@pytest.fixture
def world():
w = World()
w.call(bar())
return w
@pytest.fixture
def hmc(world):
hmc_proposer = HMCProposer(world, world.latent_nodes, 10, trajectory_length=1.0)
return hmc_proposer
def test_potential_grads(hmc):
pe, pe_grad = hmc._potential_grads(hmc._positions)
assert isinstance(pe, torch.Tensor)
assert pe.numel() == 1
assert isinstance(pe_grad, torch.Tensor)
assert pe_grad.shape == hmc._positions.shape
def test_kinetic_grads(hmc):
momentums = hmc._initialize_momentums(hmc._positions)
ke = hmc._kinetic_energy(momentums, hmc._mass_inv)
assert isinstance(ke, torch.Tensor)
assert ke.numel() == 1
ke_grad = hmc._kinetic_grads(momentums, hmc._mass_inv)
assert isinstance(ke_grad, torch.Tensor)
assert ke_grad.shape == hmc._positions.shape
def test_leapfrog_step(hmc):
step_size = torch.tensor(0.0)
momentums = hmc._initialize_momentums(hmc._positions)
new_positions, new_momentums, pe, pe_grad = hmc._leapfrog_step(
hmc._positions, momentums, step_size, hmc._mass_inv
)
assert torch.allclose(momentums, new_momentums)
assert torch.allclose(hmc._positions, new_positions)
@pytest.mark.parametrize(
# forcing the step_size to be 0 for HMC/ NUTS
"algorithm",
[
bm.GlobalNoUTurnSampler(initial_step_size=0.0),
bm.GlobalHamiltonianMonteCarlo(trajectory_length=1.0, initial_step_size=0.0),
],
)
def test_step_size_exception(algorithm):
queries = [foo()]
observations = {bar(): torch.tensor(0.5)}
with pytest.raises(ValueError):
algorithm.infer(
queries,
observations,
num_samples=20,
num_chains=1,
)
| beanmachine-main | tests/ppl/inference/proposer/hmc_proposer_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit test for NormalEig class"""
import unittest
import torch
from beanmachine.ppl.inference.proposer.normal_eig import NormalEig
from torch.distributions.multivariate_normal import MultivariateNormal
class NormalEigTest(unittest.TestCase):
def test_normal_eig(self) -> None:
covar = torch.Tensor([[1, 0.1, 0], [0.1, 2, 0.5], [0, 0.5, 3]])
evals, evecs = torch.linalg.eigh(covar)
mean = torch.Tensor([1.0, 3.5, -1.2])
# we want to test that both distributions are identical
ref_dist = MultivariateNormal(mean, covar)
test_dist = NormalEig(mean, evals, evecs)
# density at the mean should be equal
self.assertAlmostEqual(
ref_dist.log_prob(mean).item(), test_dist.log_prob(mean).item(), 2
)
# density at a random sample should also be equal
val = test_dist.sample()
self.assertEqual(val.shape, torch.Size([3]))
self.assertAlmostEqual(
ref_dist.log_prob(val).item(), test_dist.log_prob(val).item(), 2
)
# test that the empirical mean is correct
emp_mean = sum(test_dist.sample() for _ in range(10000)) / 10000
self.assertTrue(((mean - emp_mean).abs() < 0.1).all())
# test that the empirical covariance is correct
def outerprod(x):
return torch.ger(x, x)
emp_covar = (
sum(outerprod(test_dist.sample() - mean) for _ in range(2000)) / 2000
)
self.assertTrue(((covar - emp_covar).abs() < 0.2).all())
| beanmachine-main | tests/ppl/inference/proposer/normal_eig_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from beanmachine.ppl.inference.proposer.utils import DictToVecConverter
def test_dict_to_vec_conversion():
d = {"a": torch.ones((2, 5)), "b": torch.rand(5), "c": torch.tensor(3.0)}
converter = DictToVecConverter(example_dict=d)
v = converter.to_vec(d)
assert len(v) == 16 # 2x5 + 5 + 1
# applying exp on the flatten tensor is equivalent to applying it to each
# of the tensor in the dictionary
d_exp = converter.to_dict(torch.exp(v))
for key in d:
assert torch.allclose(torch.exp(d[key]), d_exp[key])
| beanmachine-main | tests/ppl/inference/proposer/utils_test.py |
beanmachine-main | tests/ppl/inference/proposer/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.nuts_proposer import (
_Tree,
_TreeArgs,
_TreeNode,
NUTSProposer,
)
from beanmachine.ppl.world import World
@bm.random_variable
def foo():
return dist.Beta(2.0, 2.0)
@bm.random_variable
def bar():
return dist.Bernoulli(foo())
@pytest.fixture
def nuts():
world = World(observations={bar(): torch.tensor(0.8)})
world.call(bar())
nuts_proposer = NUTSProposer(world, world.latent_nodes, 10)
return nuts_proposer
@pytest.fixture
def tree_node(nuts):
momentums = nuts._initialize_momentums(nuts._positions)
return _TreeNode(
positions=nuts._positions, momentums=momentums, pe_grad=nuts._pe_grad
)
@pytest.fixture
def tree_args(tree_node, nuts):
initial_energy = nuts._hamiltonian(
nuts._positions,
tree_node.momentums,
nuts._mass_inv,
nuts._pe,
)
return _TreeArgs(
log_slice=-initial_energy,
direction=torch.tensor(1),
step_size=nuts.step_size,
initial_energy=initial_energy,
mass_inv=nuts._mass_inv,
)
def test_base_tree(tree_node, tree_args, nuts):
nuts._multinomial_sampling = False
tree_args = tree_args._replace(
log_slice=torch.log1p(-torch.rand(())) - tree_args.initial_energy
)
tree = nuts._build_tree_base_case(root=tree_node, args=tree_args)
assert isinstance(tree, _Tree)
assert torch.isclose(tree.log_weight, torch.tensor(float("-inf"))) or torch.isclose(
tree.log_weight, torch.tensor(0.0)
)
assert tree.left == tree.right
def test_base_tree_multinomial(tree_node, tree_args, nuts):
tree = nuts._build_tree_base_case(root=tree_node, args=tree_args)
assert isinstance(tree, _Tree)
# in multinomial sampling, trees are weighted by their accept prob
assert torch.isclose(
torch.clamp(tree.log_weight.exp(), max=1.0), tree.sum_accept_prob
)
def test_build_tree(tree_node, tree_args, nuts):
tree_depth = 3
tree = nuts._build_tree(root=tree_node, tree_depth=tree_depth, args=tree_args)
assert isinstance(tree, _Tree)
assert tree.turned_or_diverged or (tree.left is not tree.right)
assert tree.turned_or_diverged or tree.num_proposals == 2**tree_depth
| beanmachine-main | tests/ppl/inference/proposer/nuts_proposer_test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.