python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
import json
import numpy as np
import random
from os.path import isfile, isdir
from os.path import join as pjoin
import torch
from torch.utils.data import Dataset
#########
# Node typing: checking the type of a specific sub-tree (dict value)
#########
def is_span(val):
try:
a, (b, c) = val
return all([type(v) == int for v in [a, b, c]])
except (ValueError, TypeError):
return False
def is_span_list(val):
res = type(val) == list and len(val) > 0 and all([is_span(v) for v in val])
return res
def is_cat(val):
return type(val) == str or val is True or val is False
def is_cat_list(val):
res = (type(val) == list) and len(val) > 0 and all([is_cat(v) for v in val])
return res
def is_int(val):
return type(val) == dict
def is_int_list(val):
res = (type(val) == list) and len(val) > 0 and all([is_int(v) for v in val])
return res
#########
# Make grammar from dataset. Starts with empty full_tree
# then add all nodes found in the dataset
#########
# if new_tree is outside of what the grammar can handle, modifies grammar
# also counts number of occurence of each node
def add_tree(full_tree, new_tree, vocounts, nw=1):
for k, v in new_tree.items():
if k not in full_tree:
full_tree[k] = {"name": k, "children": {}, "values": {}, "count": 0}
full_tree[k]["count"] += nw
if is_cat(v):
full_tree[k]["values"][v] = full_tree[k]["values"].get(v, 0) + nw
w = "C:" + k + "|" + str(v)
vocounts[w] = vocounts.get(w, 0) + nw
elif is_int(v):
ws = "IB:" + k
we = "IE:" + k
vocounts[ws] = vocounts.get(ws, 0) + nw
vocounts[we] = vocounts.get(we, 0) + nw
add_tree(full_tree[k]["children"], v, vocounts, nw)
elif is_int_list(v):
ws = "ILB:" + k
wi = "IL&:" + k
we = "ILE:" + k
vocounts[ws] = vocounts.get(ws, 0) + nw
vocounts[wi] = vocounts.get(wi, 0) + nw
vocounts[we] = vocounts.get(we, 0) + nw
for c in v:
add_tree(full_tree[k]["children"], c, vocounts, nw)
elif is_span(v) or is_span_list(v):
w = "S:" + k
ws = "BE:" + k
vocounts[w] = vocounts.get(w, 0) + nw
vocounts[ws] = vocounts.get(ws, 0) + nw
# starts with an empty grammar and adds trees from the dataset
def make_full_tree(trees_weight_ls):
res = {}
vocounts = {}
for trees, weight in trees_weight_ls:
for dlg, tr in trees:
add_tree(res, tr, vocounts, weight)
tree_i2w = [k for k, v in sorted(vocounts.items(), key=lambda x: x[1], reverse=True)] + [
"BE:span"
]
return res, tree_i2w
#########
# Linearize and de-linearize trees
#########
# transforms tree into sequence of (token, start_span, end_span)
# idx_map maps the span ids before and after tokenization
def tree_to_seq(full_tree, tree, idx_map=None):
res = []
sorted_keys = sorted(
[k for k in tree.keys() if k in full_tree],
key=lambda x: full_tree[x]["count"],
reverse=True,
) + sorted([k for k, v in tree.items() if k not in full_tree])
for k in sorted_keys:
if is_cat(tree[k]):
res += [("C:" + k + "|" + str(tree[k]), -1, -1)]
elif is_span(tree[k]):
a, (b, c) = tree[k]
# res += [('S:' + k, idx_map[a][b][0], idx_map[a][c][1])]
res += [("S:" + k, -1, -1)]
res += [("BE:" + k, idx_map[a][b][0], idx_map[a][c][1])]
elif is_int(tree[k]):
res += (
[("IB:" + k, -1, -1)]
+ tree_to_seq(full_tree.get(k, {"children": {}})["children"], tree[k], idx_map)
+ [("IE:" + k, -1, -1)]
)
elif is_int_list(tree[k]):
res += [("ILB:" + k, -1, -1)]
for c in tree[k]:
res += tree_to_seq(full_tree.get(k, {"children": {}})["children"], c, idx_map) + [
("IL&:" + k, -1, -1)
]
res = res[:-1] + [("ILE:" + k, -1, -1)]
else:
raise NotImplementedError
return res
# selects sub-tree in (span in the output sequence) so we can apply recursively seq_to_tree
def select_spans(seq):
spans = [-1 for _ in seq]
active = {}
unopened = False
for i, (w, b_id, e_id) in enumerate(seq):
if w.startswith("IB:") or w.startswith("ILB:"):
active[w] = active.get(w, {})
active[w][i] = 0
for s_idx in active[w]:
active[w][s_idx] += 1
elif w.startswith("IE:") or w.startswith("ILE:"):
ws = w.replace("E:", "B:")
if ws not in active:
# closing an unopened bracket
unopened = True
else:
closed = []
for s_idx in active[ws]:
active[ws][s_idx] -= 1
if active[ws][s_idx] <= 0:
closed += [s_idx]
spans[s_idx] = i
for s_idx in closed:
del active[ws][s_idx]
# check whether all brackets have been closed
well_formed = (sum([len(ctr_dict) for ws, ctr_dict in active.items()]) == 0) and not unopened
for ws in active:
for s_idx in active[ws]:
spans[s_idx] = len(seq)
# create a dictionary of left bracket > right bracket
span_dict = {}
for s_idx, e_idx in enumerate(spans):
if e_idx > 0:
span_dict[s_idx] = e_idx
return (span_dict, well_formed)
# transforms sequence back into tree of nested dictionaries
# span_dict identifies the sub-sequences corresponding to sub-trees
def seq_to_tree(full_tree, seq, idx_rev_map=None, span_dct=None, start_id=0):
res = {}
if span_dct is None:
span_dict, well_formed = select_spans(seq)
else:
span_dict = span_dct
well_formed = True
idx = 0
while idx < len(seq):
if ":" not in seq[idx][0]:
idx += 1
continue
t, w = seq[idx][0].split(":")
# categorical node
if t == "C":
cat, val = w.split("|")
res[cat] = val
idx += 1
# span node
elif t == "S":
if idx + 1 < len(seq):
b_pre = seq[idx + 1][1]
e_pre = seq[idx + 1][2]
l_idx, b_idx = idx_rev_map[b_pre]
_, e_idx = idx_rev_map[e_pre]
res[w] = [l_idx, [b_idx, e_idx]]
else:
res[w] = [-1, [-1, -1]]
# idx += 1
idx += 2
# internal node
elif t == "IB":
sub_full_tree = full_tree.get(w, {"children": {}})["children"]
sub_span = (idx + 1, span_dict[start_id + idx] - start_id)
sub_seq = seq[sub_span[0] : sub_span[1]]
res[w] = seq_to_tree(
sub_full_tree, sub_seq, idx_rev_map, span_dict, start_id=start_id + sub_span[0]
)[0]
idx = sub_span[1]
# internal node list
elif t == "ILB":
sub_full_tree = full_tree.get(w, {"children": {}})["children"]
sub_span = (idx + 1, span_dict[start_id + idx] - start_id)
pre_sub_seq = seq[sub_span[0] : sub_span[1]]
# split sub-sequence by list items
sub_seq_ls_idx = (
[-1]
+ [i for i, sw in enumerate(pre_sub_seq) if sw[0] == "IL&:" + w]
+ [len(pre_sub_seq)]
)
sub_span_ls = [
(sub_span[0] + sub_seq_ls_idx[i] + 1, sub_span[0] + sub_seq_ls_idx[i + 1])
for i in range(len(sub_seq_ls_idx) - 1)
]
# read sub-trees
res[w] = []
for s_sub_span in sub_span_ls:
sub_seq = seq[s_sub_span[0] : s_sub_span[1]]
res[w] += [
seq_to_tree(
sub_full_tree,
sub_seq,
idx_rev_map,
span_dict,
start_id=start_id + s_sub_span[0],
)[0]
]
idx = sub_span[1]
# failure case??? TODO: raise error
else:
idx += 1
return (res, well_formed)
# returns empty tree if ta and tb are the same tree
def compare_tree(ta, tb):
res = {}
# internal node
if is_int(ta) or is_int_list(ta):
if is_int_list(ta):
ta = ta[0]
tb = tb[0]
for a in ta:
if a in tb:
comp = compare_tree(ta[a], tb[a])
if len(comp) > 0:
res[a] = comp
else:
res[a] = (ta[a], "")
for b in tb:
if b not in ta:
res[b] = ("", tb[b])
elif ta != tb:
res = (ta, tb)
return res
##################
# torch Dataset
##################
# helper function to align word indices before and after applying BPE
def align_post_tok(pre_tok, post_tok, seen_toks=0):
i, j, ci, cj = [0] * 4
idx_map = [[seen_toks, seen_toks] for _ in range(len(pre_tok.split()))]
while ci < len(pre_tok) and cj < len(post_tok):
if pre_tok[ci] == post_tok[cj]:
if pre_tok[ci] == " ":
i += 1
j += 1
if i > 0:
idx_map[i - 1][1] = j - 1 + seen_toks
idx_map[i][0] = j + seen_toks
ci += 1
cj += 1
elif post_tok[cj] == " ":
j += 1
cj += 1
elif pre_tok[ci] == " ":
i += 1
if i > 0:
idx_map[i - 1][0] = j - 1 + seen_toks
idx_map[i][1] = j + seen_toks
ci += 1
else:
cj += 1
idx_map[i][-1] = j + seen_toks
return idx_map
# applies BPE to input and creates mapping of span indices before and after BPE
def tokenize_mapidx(text, tokenizer):
# re-order lines: last chat in multi-chat is first in the list
# rev_lines = [line.strip() for line in text.split('<SEP>')]
# text_lines = [rev_lines[i - 1] for i in range(len(rev_lines), 0, -1)]
text_lines = [line.strip() for line in text.split("<SEP>")]
# tokenize text and linearize tree
seen_toks = 1
idx_maps = [[] for _ in text_lines]
res_toks = ["[CLS]"]
for lid, line in enumerate(text_lines):
tok_line = tokenizer.tokenize(line)
tok_join = " ".join(tok_line)
idx_maps[-1 - lid] = align_post_tok(line, tok_join, seen_toks)
res_toks += tok_line[:] + ["[SEP]"]
seen_toks += len(tok_line) + 1
return (" ".join(res_toks), idx_maps)
# takes raw text and tree, returns BPE-ed text and linearized tree
def tokenize_linearize(text, tree, tokenizer, full_tree, word_noise=0.0):
tok_text, idx_maps = tokenize_mapidx(text, tokenizer)
tokenized = " ".join(
[
"[UNK]" if w not in ["[CLS]", "[SEP]"] and random.random() < word_noise else w
for w in tok_text.split()
]
)
lin_tree = tree_to_seq(full_tree, tree, idx_maps)
return (tokenized, lin_tree)
# torch Dataset for the CAIP format, applies BPE and linearizes trees on-the-fly
class CAIPDataset(Dataset):
"""
CAIP: CraftAssist Instruction Parsing
"""
def __init__(
self,
tokenizer,
args,
prefix="train",
dtype="templated",
sampling=False,
word_noise=0.0,
full_tree_voc=None,
):
assert isdir(args.data_dir)
self.tokenizer = tokenizer
# We load the (input, tree) pairs for all data types and
# initialize the hard examples buffer
self.data = {}
self.sampling = sampling
self.word_noise = word_noise
dtype_samples = json.loads(args.dtype_samples)
self.dtype = dtype
self.dtypes = [t for t, p in dtype_samples]
self.sample_probas = np.array([p for t, p in dtype_samples])
self.sample_probas /= self.sample_probas.sum()
if prefix == "train":
for k in self.dtypes:
fname = pjoin(args.data_dir, prefix, k + ".json")
if isfile(fname):
print("loading", fname)
self.data[k] = json.load(open(fname))
else:
self.data[k] = []
self.hard_buffer_size = 1024
self.hard_buffer_counter = 0
else:
fname = pjoin(args.data_dir, prefix, dtype + ".json")
if isfile(fname):
print("loading", fname)
self.data[dtype] = json.load(open(fname))
else:
self.data[dtype] = []
# load meta-tree and tree vocabulary
if full_tree_voc is None:
print("making tree")
ftr, tr_i2w = make_full_tree(
[
(self.data["humanbot"], 3e5),
(self.data["prompts"], 1e5),
(self.data["templated"][:100000], 1),
]
)
self.full_tree = ftr
else:
full_tree, tr_i2w = full_tree_voc
self.full_tree = full_tree
spec_tokens = ["[PAD]", "unused", "[UNK]", "[CLS]", "[SEP]", "[MASK]", "<S>", "</S>"]
self.tree_voc = spec_tokens[:] + tr_i2w
self.tree_idxs = dict([(w, i) for i, w in enumerate(self.tree_voc)])
self.dataset_length = max([len(v) for v in self.data.values()])
if args.examples_per_epoch > 0:
self.dataset_length = min(self.dataset_length, args.examples_per_epoch)
def __len__(self):
return self.dataset_length
def __getitem__(self, idx):
# sample data type and get example
if self.sampling:
dtype = np.random.choice(self.dtypes, p=self.sample_probas)
if len(self.data[dtype]) == 0:
dtype = self.dtype
else:
dtype = self.dtype
p_text, p_tree = self.data[dtype][idx % len(self.data[dtype])]
text, tree = tokenize_linearize(
p_text, p_tree, self.tokenizer, self.full_tree, self.word_noise
)
text_idx_ls = [self.tokenizer._convert_token_to_id(w) for w in text.split()]
tree_idx_ls = [
[self.tree_idxs[w], bi, ei]
for w, bi, ei in [("<S>", -1, -1)] + tree + [("</S>", -1, -1)]
]
return (text_idx_ls, tree_idx_ls, (text, p_text, p_tree))
def add_hard_example(self, exple):
if self.hard_buffer_counter < self.hard_buffer_size:
self.data["hard"] += [exple]
else:
self.data["hard"][self.hard_buffer_counter % self.hard_buffer_size] = exple
self.hard_buffer_counter += 1
# applies padding and makes batch tensors
def caip_collate(batch, tokenizer):
# keep track of examples
pre_examples = [(p_text, p_tree) for x, y, (_, p_text, p_tree) in batch]
# input: text
batch_x_ls = [x for x, y, _ in batch]
x_len = max([len(x) for x in batch_x_ls])
x_mask_ls = [[1] * len(x) + [0] * (x_len - len(x)) for x in batch_x_ls]
batch_x_pad_ls = [x + [tokenizer.pad_token_id] * (x_len - len(x)) for x in batch_x_ls]
# output: linearized trees
batch_y_ls = [y for x, y, _ in batch]
y_len = max([len(y) for y in batch_y_ls])
y_mask_ls = [[1] * len(y) + [0] * (y_len - len(y)) for y in batch_y_ls]
batch_y_pad_ls = [y + [[0, -1, -1]] * (y_len - len(y)) for y in batch_y_ls] # 0 as padding idx
# tensorize
x = torch.tensor(batch_x_pad_ls)
x_mask = torch.tensor(x_mask_ls)
y = torch.tensor(batch_y_pad_ls)
y_mask = torch.tensor(y_mask_ls)
return (x, x_mask, y, y_mask, pre_examples)
| craftassist-master | acl2020_submission/model_training_code/utils_caip.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam, Adagrad
from transformers.modeling_bert import BertModel, BertOnlyMLMHead
from utils_caip import *
# --------------------------
# Transformer-based decoder module for sequence ans span prediction, computes the loss
# --------------------------
def my_xavier_init(m, gain=1):
for p in m.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain)
else:
nn.init.constant_(p, 0)
class HighwayLayer(torch.nn.Module):
def __init__(self, dim):
super(HighwayLayer, self).__init__()
self.gate_proj = nn.Linear(dim, dim, bias=True)
self.nlin_proj = nn.Linear(dim, dim, bias=True)
my_xavier_init(self.nlin_proj)
my_xavier_init(self.gate_proj)
nn.init.constant_(self.gate_proj.bias, -1)
def forward(self, x):
gate = torch.sigmoid(self.gate_proj(x))
nlin = torch.tanh(self.nlin_proj(x))
res = gate * nlin + (1 - gate) * x
return res
# single module to predict the output sequence and compute the
# loss if the target sequence is provided for convenience
class DecoderWithLoss(nn.Module):
def __init__(self, config, args, tokenizer):
super(DecoderWithLoss, self).__init__()
# model components
self.bert = BertModel(config)
self.lm_head = BertOnlyMLMHead(config)
self.span_b_proj = nn.ModuleList([HighwayLayer(768) for _ in range(args.num_highway)])
self.span_e_proj = nn.ModuleList([HighwayLayer(768) for _ in range(args.num_highway)])
# loss functions
if args.node_label_smoothing > 0:
self.lm_ce_loss = LabelSmoothingLoss(
args.node_label_smoothing, config.vocab_size, ignore_index=tokenizer.pad_token_id
)
else:
self.lm_ce_loss = torch.nn.CrossEntropyLoss(
ignore_index=tokenizer.pad_token_id, reduction="none"
)
self.span_ce_loss = torch.nn.CrossEntropyLoss(ignore_index=-1, reduction="none")
self.span_loss_lb = args.lambda_span_loss
# without loss, use at prediction time
# TODO: add previously computed y_rep
# y onlyhas the node indices (not the spans)
def step(self, y, y_mask, x_reps, x_mask):
y_rep = self.bert(
input_ids=y,
attention_mask=y_mask,
encoder_hidden_states=x_reps,
encoder_attention_mask=x_mask,
)[0]
y_mask_target = y_mask
lm_scores = self.lm_head(y_rep)
y_span_pre_b = y_rep
for hw in self.span_b_proj:
y_span_pre_b = hw(y_span_pre_b)
span_b_scores = (x_reps[:, None, :, :] * y_span_pre_b[:, :, None, :]).sum(dim=-1)
span_b_scores = (
span_b_scores + (1 - y_mask_target.type_as(span_b_scores))[:, :, None] * 1e9
)
y_span_pre_e = y_rep
for hw in self.span_e_proj:
y_span_pre_e = hw(y_span_pre_e)
span_e_scores = (x_reps[:, None, :, :] * y_span_pre_e[:, :, None, :]).sum(dim=-1)
span_e_scores = (
span_e_scores + (1 - y_mask_target.type_as(span_e_scores))[:, :, None] * 1e9
)
res = {
"lm_scores": torch.log_softmax(lm_scores, dim=-1).detach(),
"span_b_scores": torch.log_softmax(span_b_scores, dim=-1).detach(),
"span_e_scores": torch.log_softmax(span_e_scores, dim=-1).detach(),
}
return res
def forward(self, y, y_mask, x_reps, x_mask):
y_rep = self.bert(
input_ids=y[:, :-1, 0],
attention_mask=y_mask[:, :-1],
encoder_hidden_states=x_reps,
encoder_attention_mask=x_mask,
)[0]
y_mask_target = y_mask[:, 1:].contiguous()
# language modeling
lm_scores = self.lm_head(y_rep)
lm_lin_scores = lm_scores.view(-1, lm_scores.shape[-1])
lm_lin_targets = y[:, 1:, 0].contiguous().view(-1)
lm_lin_loss = self.lm_ce_loss(lm_lin_scores, lm_lin_targets)
lm_lin_mask = y_mask_target.view(-1)
lm_loss = lm_lin_loss.sum() / lm_lin_mask.sum()
# span prediction
## beginning of spans
y_span_pre_b = y_rep
for hw in self.span_b_proj:
y_span_pre_b = hw(y_span_pre_b)
span_b_scores = (x_reps[:, None, :, :] * y_span_pre_b[:, :, None, :]).sum(dim=-1)
span_b_scores = (
span_b_scores + (1 - y_mask_target.type_as(span_b_scores))[:, :, None] * 1e9
)
span_b_lin_scores = span_b_scores.view(-1, x_reps.shape[1])
span_b_lin_targets = y[:, 1:, 1].contiguous().view(-1)
span_b_lin_loss = self.span_ce_loss(span_b_lin_scores, span_b_lin_targets)
## end of spans
y_span_pre_e = y_rep
for hw in self.span_e_proj:
y_span_pre_e = hw(y_span_pre_e)
span_e_scores = (x_reps[:, None, :, :] * y_span_pre_e[:, :, None, :]).sum(dim=-1)
span_e_scores = (
span_e_scores + (1 - y_mask_target.type_as(span_e_scores))[:, :, None] * 1e9
)
span_e_lin_scores = span_e_scores.view(-1, span_e_scores.shape[-1])
span_e_lin_targets = y[:, 1:, 2].contiguous().view(-1)
span_e_lin_loss = self.span_ce_loss(span_e_lin_scores, span_e_lin_targets)
## joint span prediction
# TODO: predict full spans, enforce order
# combine
span_lin_loss = span_b_lin_loss + span_e_lin_loss
span_loss = span_lin_loss.sum() / (y[:, :, 1] >= 0).sum()
tot_loss = (1 - self.span_loss_lb) * lm_loss + self.span_loss_lb * span_loss
res = {
"lm_scores": lm_scores,
"span_b_scores": span_b_scores,
"span_e_scores": span_e_scores,
"loss": tot_loss,
}
return res
# combines DecoderWithLoss with pre-trained BERT encoder
class EncoderDecoderWithLoss(nn.Module):
def __init__(self, encoder, decoder, args):
super(EncoderDecoderWithLoss, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.train_encoder = args.train_encoder
def forward(self, x, x_mask, y, y_mask, x_reps=None):
if x_reps is None:
x_reps = self.encoder(input_ids=x, attention_mask=x_mask)[0]
if not self.train_encoder:
x_reps = x_reps.detach()
outputs = self.decoder(y, y_mask, x_reps, x_mask)
return outputs
# raw text input, tree output
# DEPRECATED: use beam search
def predict_tree(txt, model, tokenizer, dataset, ban_noop=False, noop_threshold=0.0):
model_device = model.decoder.lm_head.predictions.decoder.weight.device
# prepare batch
text, idx_maps = tokenize_mapidx(txt, tokenizer)
tree = [("<S>", -1, -1)]
text_idx_ls = [dataset.tokenizer._convert_token_to_id(w) for w in text.split()]
tree_idx_ls = [[dataset.tree_idxs[w], bi, ei] for w, bi, ei in tree]
pre_batch = [(text_idx_ls, tree_idx_ls, (text, txt, {}))]
batch = caip_collate(pre_batch, tokenizer)
batch = [t.to(model_device) for t in batch[:4]]
x, x_mask, y, y_mask = batch
y = y[:, :, 0]
x_reps = model.encoder(input_ids=x, attention_mask=x_mask)[0].detach()
res = [("<S>", -1, -1)]
next_id = -1
noop_predicted = False
for i in range(100):
if i > 0:
y = torch.cat([y, torch.LongTensor([[next_id]]).to(model_device)], dim=1)
y_mask = torch.cat(
[y_mask, torch.LongTensor([1]).unsqueeze(dim=0).to(model_device)], dim=1
)
outputs = model.decoder.step(y, y_mask, x_reps, x_mask)
# next word
lm_scores = outputs["lm_scores"]
s_lm_scores, s_lm_ids = lm_scores[0, -1].sort(dim=-1, descending=True)
next_id = s_lm_ids[0].item()
if "NOOP" in dataset.tree_voc[next_id]:
if ban_noop or s_lm_scores[0].item() < noop_threshold:
next_id = s_lm_ids[1].item()
noop_predicted = True
print("---- replacing NOOP with", dataset.tree_voc[next_id])
next_w = dataset.tree_voc[next_id]
# predicted span
span_b_scores = outputs["span_b_scores"]
span_e_scores = outputs["span_e_scores"]
_, s_sb_ids = span_b_scores[0, -1].sort(dim=-1, descending=True)
_, s_se_ids = span_e_scores[0, -1].sort(dim=-1, descending=True)
b_id = s_sb_ids[0].item()
e_id = s_se_ids[0].item()
res += [(next_w, b_id, e_id)]
if next_w == "</S>":
break
# only keep span predictions for span nodes, then map back to tree
res = [(w, b, e) if w.startswith("BE:") else (w, -1, -1) for w, b, e in res]
idx_rev_map = [(0, 0)] * len(text.split())
for line_id, idx_map in enumerate(idx_maps):
for pre_id, (a, b) in enumerate(idx_map):
idx_rev_map[a] = (line_id, pre_id)
idx_rev_map[b] = (line_id, pre_id)
idx_rev_map[-1] = idx_rev_map[-2]
res_tree, _ = seq_to_tree(dataset.full_tree, res[1:-1], idx_rev_map)
return (res_tree, noop_predicted, (text, res))
# beam prediction. Only uses node prediction scores (not the span scores)
def beam_search(txt, model, tokenizer, dataset, beam_size=5, well_formed_pen=1e2):
model_device = model.decoder.lm_head.predictions.decoder.weight.device
# prepare batch
text, idx_maps = tokenize_mapidx(txt, tokenizer)
idx_rev_map = [(0, 0)] * len(text.split())
for line_id, idx_map in enumerate(idx_maps):
for pre_id, (a, b) in enumerate(idx_map):
idx_rev_map[a] = (line_id, pre_id)
idx_rev_map[b] = (line_id, pre_id)
idx_rev_map[-1] = idx_rev_map[-2]
tree = [("<S>", -1, -1)]
text_idx_ls = [dataset.tokenizer._convert_token_to_id(w) for w in text.split()]
tree_idx_ls = [[dataset.tree_idxs[w], bi, ei] for w, bi, ei in tree]
pre_batch = [(text_idx_ls, tree_idx_ls, (text, txt, {}))]
batch = caip_collate(pre_batch, tokenizer)
batch = [t.to(model_device) for t in batch[:4]]
x, x_mask, y, y_mask = batch
x_reps = model.encoder(input_ids=x, attention_mask=x_mask)[0].detach()
x_mask = x_mask.expand(beam_size, -1)
x_reps = x_reps.expand(beam_size, -1, -1)
# start decoding
y = torch.LongTensor([[dataset.tree_idxs["<S>"]] for _ in range(beam_size)]).to(
model_device
) # B x 1
beam_scores = torch.Tensor([-1e9 for _ in range(beam_size)]).to(model_device) # B
beam_scores[0] = 0
beam_seqs = [[("<S>", -1, -1)] for _ in range(beam_size)]
finished = [False for _ in range(beam_size)]
pad_scores = torch.Tensor([-1e9] * len(dataset.tree_voc)).to(model_device)
pad_scores[dataset.tree_idxs["[PAD]"]] = 0
for i in range(100):
outputs = model.decoder.step(y, y_mask, x_reps, x_mask)
# next word
lm_scores = outputs["lm_scores"][:, -1, :] # B x V
for i, fshed in enumerate(finished):
if fshed:
lm_scores[i] = pad_scores
beam_lm_scores = lm_scores + beam_scores[:, None] # B x V
beam_lm_lin = beam_lm_scores.view(-1)
s_scores, s_ids = beam_lm_lin.sort(dim=-1, descending=True)
s_beam_ids = s_ids // beam_lm_scores.shape[-1]
s_word_ids = s_ids % beam_lm_scores.shape[-1]
# re-order and add next token
beam_scores = s_scores[:beam_size]
n_beam_ids = s_beam_ids[:beam_size]
n_word_ids = s_word_ids[:beam_size]
n_words = [dataset.tree_voc[nw_id.item()] for nw_id in n_word_ids]
y = torch.cat([y[n_beam_ids], n_word_ids[:, None]], dim=1)
# find out which of the beams are finished
pre_finished = [finished[b_id.item()] for b_id in n_beam_ids]
new_finished = [w_id.item() == dataset.tree_idxs["</S>"] for w_id in n_word_ids]
finished = [p or n for p, n in zip(pre_finished, new_finished)]
n_mask = 1 - torch.Tensor(finished).type_as(y_mask)
y_mask = torch.cat([y_mask[n_beam_ids], n_mask[:, None]], dim=1)
# predicted span
span_b_scores = outputs["span_b_scores"][:, -1, :][n_beam_ids] # B x T
span_e_scores = outputs["span_e_scores"][:, -1, :][n_beam_ids] # B x T
span_be_scores = span_b_scores[:, :, None] + span_e_scores[:, None, :]
invalid_scores = torch.tril(torch.ones(span_be_scores.shape), diagonal=-1) * -1e9
span_be_scores += invalid_scores.type_as(span_be_scores)
span_be_lin = span_be_scores.view(span_be_scores.shape[0], -1)
_, s_sbe_ids = span_be_lin.sort(dim=-1, descending=True)
s_sb_ids = s_sbe_ids[:, 0] // span_b_scores.shape[-1]
s_se_ids = s_sbe_ids[:, 0] % span_b_scores.shape[-1]
beam_b_ids = [bb_id.item() for bb_id in s_sb_ids]
beam_e_ids = [be_id.item() for be_id in s_se_ids]
# update beam_seq
beam_seqs = [
beam_seqs[n_beam_ids[i].item()] + [(n_words[i], beam_b_ids[i], beam_e_ids[i])]
for i in range(beam_size)
]
# penalize poorly formed trees
for i, seq in enumerate(beam_seqs):
if seq[-1][0] == "</S>":
_, well_formed = select_spans(seq)
if not well_formed:
beam_scores[i] -= well_formed_pen
# check whether all beams have reached EOS
if all(finished):
break
# only keep span predictions for span nodes, then map back to tree
beam_seqs = [
[(w, b, e) if w.startswith("BE:") else (w, -1, -1) for w, b, e in res if w != "[PAD]"]
for res in beam_seqs
]
# delinearize predicted sequences into tree
beam_trees = [seq_to_tree(dataset.full_tree, res[1:-1], idx_rev_map)[0] for res in beam_seqs]
pre_res = [
(tree, score.item(), seq) for tree, score, seq in zip(beam_trees, beam_scores, beam_seqs)
]
# sort one last time to have well-formed trees on top
res = sorted(pre_res, key=lambda x: x[1], reverse=True)
return res
# util function for validation and selecting hard examples
def compute_accuracy(outputs, y):
lm_targets = y[:, 1:, 0]
lm_preds = outputs["lm_scores"].max(dim=-1)[1]
lm_acc = ((lm_preds == lm_targets) * (lm_targets > 6)).sum(dim=1) == (lm_targets > 6).sum(
dim=1
)
sb_targets = y[:, 1:, 1]
sb_preds = outputs["span_b_scores"].max(dim=-1)[1]
sb_acc = ((sb_preds == sb_targets) * (sb_targets >= 0)).sum(dim=1) == (sb_targets >= 0).sum(
dim=1
)
se_targets = y[:, 1:, 2]
se_preds = outputs["span_e_scores"].max(dim=-1)[1]
se_acc = ((se_preds == se_targets) * (se_targets >= 0)).sum(dim=1) == (se_targets >= 0).sum(
dim=1
)
sp_acc = sb_acc * se_acc
full_acc = lm_acc * sp_acc
return (lm_acc, sp_acc, full_acc)
# --------------------------
# Custom wrapper for Adam optimizer,
# handles lr warmup and smaller lr for encoder fine-tuning
# --------------------------
class OptimWarmupEncoderDecoder(object):
def __init__(self, model, args):
self.encoder = model.encoder
self.decoder = model.decoder
self.lr = {"encoder": args.encoder_learning_rate, "decoder": args.decoder_learning_rate}
self.warmup_steps = {
"encoder": args.encoder_warmup_steps,
"decoder": args.decoder_warmup_steps,
}
if args.optimizer == "adam":
self.optimizers = {
"encoder": Adam(model.encoder.parameters(), lr=self.lr["encoder"]),
"decoder": Adam(model.decoder.parameters(), lr=self.lr["decoder"]),
}
elif args.optimizer == "adagrad":
self.optimizers = {
"encoder": Adagrad(model.encoder.parameters(), lr=self.lr["encoder"]),
"decoder": Adagrad(model.decoder.parameters(), lr=self.lr["decoder"]),
}
else:
raise NotImplementedError
self._step = 0
def _update_rate(self, stack):
return self.lr[stack] * min(
(self._step / self.warmup_steps[stack]), (self._step / self.warmup_steps[stack]) ** 0.5
)
def zero_grad(self):
self.optimizer_decoder.zero_grad()
self.optimizer_encoder.zero_grad()
def step(self):
self._step += 1
for stack, optimizer in self.optimizers.items():
new_rate = self._update_rate(stack)
for param_group in optimizer.param_groups:
param_group["lr"] = new_rate
optimizer.step()
# --------------------------
# Label smoothing loss
# --------------------------
class LabelSmoothingLoss(nn.Module):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing, tgt_vocab_size, ignore_index=-1):
assert 0.0 <= label_smoothing <= 1.0
super(LabelSmoothingLoss, self).__init__()
self.ignore_index = ignore_index
self.voc_size = tgt_vocab_size
if ignore_index >= 0:
self.smoothing = label_smoothing / (tgt_vocab_size - 2)
else:
self.smoothing = label_smoothing / (tgt_vocab_size - 1)
self.confidence = 1.0 - label_smoothing
def forward(self, output, target):
"""
output (FloatTensor): batch_size x n_classes
target (LongTensor): batch_size
"""
with torch.no_grad():
s_target = torch.zeros_like(output)
s_target.fill_(self.smoothing)
if self.ignore_index >= 0:
s_target[:, self.ignore_index] = 0
t_cap = target.masked_fill(target == self.ignore_index, 0)
s_target.scatter_(1, t_cap.unsqueeze(1), self.confidence)
kl_div = F.kl_div(output.log_softmax(dim=-1), s_target, reduction="none")
kl_mask = (target != self.ignore_index).type_as(kl_div).unsqueeze(1)
return (kl_div * kl_mask).sum(dim=-1)
| craftassist-master | acl2020_submission/model_training_code/utils_parsing.py |
import json
import math
import pickle
import torch
from transformers import AutoModel, AutoTokenizer, BertConfig
from utils_parsing import *
from utils_caip import *
from train_model import *
class TTADBertModel(object):
def __init__(self, model_dir, data_dir, model_name="caip_test_model"):
model_name = model_dir + model_name
args = pickle.load(open(model_name + "_args.pk", "rb"))
args.data_dir = data_dir
self.tokenizer = AutoTokenizer.from_pretrained(args.pretrained_encoder_name)
full_tree, tree_i2w = json.load(open(model_name + "_tree.json"))
self.dataset = CAIPDataset(
self.tokenizer, args, prefix="", full_tree_voc=(full_tree, tree_i2w)
)
enc_model = AutoModel.from_pretrained(args.pretrained_encoder_name)
bert_config = BertConfig.from_pretrained("bert-base-uncased")
bert_config.is_decoder = True
bert_config.vocab_size = len(tree_i2w) + 8
bert_config.num_hidden_layers = args.num_decoder_layers
dec_with_loss = DecoderWithLoss(bert_config, args, self.tokenizer)
self.encoder_decoder = EncoderDecoderWithLoss(enc_model, dec_with_loss, args)
map_location = None if torch.cuda.is_available() else torch.device("cpu")
self.encoder_decoder.load_state_dict(
torch.load(model_name + ".pth", map_location=map_location), strict=False
)
self.encoder_decoder = (
self.encoder_decoder.cuda()
if torch.cuda.is_available()
else self.encoder_decoder.cpu()
)
self.encoder_decoder.eval()
def parse(self, chat, noop_thres=0.95, beam_size=5, well_formed_pen=1e2):
btr = beam_search(
chat, self.encoder_decoder, self.tokenizer, self.dataset, beam_size, well_formed_pen
)
if btr[0][0].get("dialogue_type", "NONE") == "NOOP" and math.exp(btr[0][1]) < noop_thres:
tree = btr[1][0]
else:
tree = btr[0][0]
return tree
| craftassist-master | acl2020_submission/model_training_code/query_model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import scipy
from scipy.optimize import Bounds, LinearConstraint, minimize, SR1
import pdb
import math
import numpy.random
import time
from scipy.interpolate import UnivariateSpline, splrep, BSpline, splev
import torch
n = 500
#G = torch.tensor([1-i/(n+1) for i in range(n)])
G = torch.tensor([1.0 for i in range(n)])
# CIFAR10 approx pattern
#G = torch.concatenate((1.0*torch.ones(7*n//8), 0.5*torch.ones(n//8)))
# Imagenet like
#G = torch.tensor([1.0 + 1.0*i/n for i in range(n)])
#G = torch.tensor([1.0 - 0.5*i/n for i in range(n)])
#G = torch.tensor([min(0.1, 1.0/math.sqrt(i+1)) for i in range(n)])
#G = torch.concatenate((10.0*torch.tensor([1-i/(n+1) for i in range(n//4)]), 1.0*torch.tensor([1-i/(n+1) for i in range(n//4)]), 0.1*torch.ones(n//2)))
G = torch.concatenate((
torch.tensor([max(1, 10*(1-i/(n//10+1))) for i in range(n//10)]),
torch.tensor([1.0 for i in range(9*n//10)])))
# This one gives very promising shapes!
# It gives a learning rate warmup at the begining,
# with a fall-off thats more gradual and cosine like.
# G = torch.concatenate((
# torch.tensor([max(1, 10*(1-i/(n//10+1))) for i in range(n//10)]),
# torch.tensor([1.0 + (i/(9*n//10)) for i in range(9*n//10)])))
# No warmup version
#G = torch.tensor([1.0 + 1.0*i/n for i in range(n)])
# G = torch.concatenate((
# torch.tensor([((i+1)/(n//100+1)) for i in range(n//100)]),
# torch.tensor([1.0 + (i/((99*n)//100)) for i in range((99*n)//100)])))
# G = torch.concatenate((
# torch.tensor([max(1, 2*(1-i/(n//10+1))) for i in range(n//10)]),
# torch.tensor([1.0 - 0.3*(i/(9*n//10)) for i in range(9*n//10)])))
# spl = splrep(x=[0, n//10, n], y=[10, 1, 2], k=2)
# spl(range(n))
G = torch.tensor(scipy.ndimage.gaussian_filter1d(G, sigma=30))
constrain_decreasing = False
D = 1.0
Dsq = D**2
Gsq = G**2
numpy.random.seed(42)
mask = np.zeros(n)
mask[0] = 1
mask = torch.tensor(mask)
def lamb_from_increments_torch(x):
xmod = x.sub(x*mask) # Set first entry to 0
v = torch.exp(-xmod)
cexp = torch.cumprod(v, dim=0)
cexp_shift = cexp * x[0]
#pdb.set_trace()
return cexp_shift
def lamb_from_increments(xraw):
if not torch.is_tensor(xraw):
x = torch.tensor(xraw, dtype=torch.float64)
else:
x = xraw
result = lamb_from_increments_torch(x)
if torch.is_tensor(xraw):
return result
else:
return result.numpy()
def lamb_to_increments(yraw):
if not torch.is_tensor(yraw):
y = torch.tensor(yraw, dtype=torch.float64)
else:
y = yraw
def inv_cum_prod(v):
return torch.exp(torch.diff(torch.log(v)))
log_incs = -torch.log(inv_cum_prod(y))
result = torch.concatenate(
(torch.tensor([y[0]]), log_incs))
if torch.is_tensor(yraw):
return result
else:
return result.numpy()
y0 = np.flip(np.cumsum(np.abs(numpy.random.normal(size=n))))/n
x0 = lamb_to_increments(y0)
assert np.all(np.isclose(lamb_from_increments(x0), y0))
def func(x_raw):
if torch.is_tensor(x_raw):
x = x_raw
else:
x = torch.tensor(x_raw,
dtype=torch.float64,
requires_grad=True)
# Convert to cumulative value
lamb = lamb_from_increments_torch(x)
lamb_sq = lamb*lamb
lamb_flip = lamb.flip(dims=(0,))
lamb_sum = torch.sum(lamb)
lamb_sq_flip = lamb_flip*lamb_flip
Gsq_flip = Gsq.flip(dims=(0,))
t1 = 0.5*Dsq/lamb_sum # Distance error term
t2 = 0.5/lamb_sum # Gradient error term
t2 *= torch.sum(Gsq*lamb_sq)
inner_cumsum = torch.cumsum(Gsq_flip*lamb_sq_flip, dim=0)
denom_cumsum = torch.cumsum(lamb_flip, dim=0)
eval = lamb_flip[1:]*inner_cumsum[1:]/(denom_cumsum[1:]*(denom_cumsum[1:]-lamb_flip[1:]))
t3 = 0.5*torch.sum(eval)
fval = (t1+t2+t3) #/max(G/D,D/G)
fval.backward()
if torch.is_tensor(x_raw):
return fval.item()
else:
g = list(np.copy(x.grad.numpy()))
return (fval.item(), g)
# Test
fx0, fgx0 = func(x0)
start = time.time()
if constrain_decreasing:
bounds = [(1e-12, np.inf)] + [(0, 10) for _ in range(n-1)]
else:
bounds = [(1e-12, np.inf)] + [(-10, 10) for _ in range(n-1)]
print(f"Starting solve...")
xopt_inc, fopt, dopt = scipy.optimize.fmin_l_bfgs_b(
func, x0,
bounds = bounds,
iprint = 0,
factr = 10.0, # High accuracy
maxls = 100000,
maxfun = 100000,
pgtol=1e-10,
m=20,
)
end = time.time()
xopt = lamb_from_increments(xopt_inc)
assert dopt['warnflag'] == 0
print(f"Time taken: {end - start}")
print(f"Steps to convergence: {dopt['funcalls']}")
#print(f"grad: {dopt['grad']}")
#print(xopt)
print(f"xopt[0]: {xopt[0]}")
print(f"xopt[-1]: {xopt[-1]}")
print(f"xopt[0]/xopt[-1]: {xopt[0]/xopt[-1]}")
print(f"fval: {fopt}")
print(f"fval * sqrt(n): {fopt * math.sqrt(n)} ")
cosine_curve = [D/(math.sqrt(n)) * 0.5 * (1 + math.cos((i/n) * math.pi)) for i in range(n)]
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.titlesize'] = 5
mpl.rcParams['axes.labelsize'] = 5
mpl.rcParams['font.size'] = 4.2
mpl.rcParams['legend.fontsize'] = 4.2
linewidth = '0.2'
mpl.rcParams['lines.markersize'] = 1.0
mpl.rcParams['lines.linewidth'] = linewidth
mpl.rcParams['axes.linewidth'] = linewidth
mpl.rcParams['xtick.major.width'] = linewidth
mpl.rcParams['ytick.major.width'] = linewidth
fig = plt.figure(figsize=(4, 5))
ax = fig.add_subplot(3, 1, 1)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('lamb')
ax.set_title(f"Optimal step size sequence (final={xopt[-1]})")
ax.plot(range(1, n+1), xopt, 'k')
ax.plot(range(1, n+1), [(1-i/(n+1))*D/(math.sqrt(n)) for i in range(n)], color='purple')
ax.plot(range(1, n+1), cosine_curve, color='r')
ax.hlines(y=D/(math.sqrt(n)), xmin=1, xmax=n, color='b')
ax.hlines(y=(1-n/(n+1))*D/(math.sqrt(n)), xmin=1, xmax=n, color='y')
ax.plot(range(1, n+1), [((1-i/(n+1))**0.5)*D/(math.sqrt(n)) for i in range(n)], color='pink')
plt.tight_layout()
ax = fig.add_subplot(3, 1, 2)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('lamb')
ax.set_title(f"Optimal step size sequence")
ax.plot(range(1, n+1), xopt, 'k')
ax.plot(range(1, n+1), [(1-i/(n+1))*D/(math.sqrt(n)) for i in range(n)], color='purple')
ax.plot(range(1, n+1), cosine_curve, color='r')
ax.plot(range(1, n+1), [((1-i/(n+1))**0.5)*D/(math.sqrt(n)) for i in range(n)], color='pink')
ax.hlines(y=D/(math.sqrt(n)), xmin=1, xmax=n, color='b')
ax.hlines(y=(1-n/(n+1))*D/(math.sqrt(n)), xmin=1, xmax=n, color='y')
ax.set_yscale('log')
plt.tight_layout()
ax = fig.add_subplot(3, 1, 3)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('G')
ax.set_title(f"Gradient norm sequence")
ax.plot(range(1, n+1), G, 'k')
plt.tight_layout()
fname = "lamb_lbfgs_seq.png"
plt.savefig(fname, bbox_inches='tight', pad_inches=0, dpi=300)
print(f"Saved {fname}")
plt.close()
plt.close('all') | adaptive_scheduling-main | solve_gradient_seq.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import scipy
from scipy.optimize import Bounds, LinearConstraint, minimize, SR1
import pdb
import math
import numpy.random
import time
from scipy.interpolate import UnivariateSpline, splrep, BSpline, splev
import torch
n = 500
#G = torch.tensor([1-i/(n+1) for i in range(n)])
G = torch.tensor([1.0 for i in range(n)])
# CIFAR10 approx pattern
#G = torch.concatenate((1.0*torch.ones(7*n//8), 0.5*torch.ones(n//8)))
# Imagenet like
#G = torch.tensor([1.0 + 1.0*i/n for i in range(n)])
#G = torch.tensor([1.0 - 0.5*i/n for i in range(n)])
#G = torch.tensor([min(0.1, 1.0/math.sqrt(i+1)) for i in range(n)])
#G = torch.concatenate((10.0*torch.tensor([1-i/(n+1) for i in range(n//4)]), 1.0*torch.tensor([1-i/(n+1) for i in range(n//4)]), 0.1*torch.ones(n//2)))
G = torch.concatenate((
torch.tensor([max(1, 10*(1-i/(n//10+1))) for i in range(n//10)]),
torch.tensor([1.0 for i in range(9*n//10)])))
# This one gives very promising shapes!
# It gives a learning rate warmup at the begining,
# with a fall-off thats more gradual and cosine like.
# G = torch.concatenate((
# torch.tensor([max(1, 10*(1-i/(n//10+1))) for i in range(n//10)]),
# torch.tensor([1.0 + (i/(9*n//10)) for i in range(9*n//10)])))
# No warmup version
#G = torch.tensor([1.0 + 1.0*i/n for i in range(n)])
# G = torch.concatenate((
# torch.tensor([((i+1)/(n//100+1)) for i in range(n//100)]),
# torch.tensor([1.0 + (i/((99*n)//100)) for i in range((99*n)//100)])))
# G = torch.concatenate((
# torch.tensor([max(1, 2*(1-i/(n//10+1))) for i in range(n//10)]),
# torch.tensor([1.0 - 0.3*(i/(9*n//10)) for i in range(9*n//10)])))
# spl = splrep(x=[0, n//10, n], y=[10, 1, 2], k=2)
# spl(range(n))
#G = torch.tensor(scipy.ndimage.gaussian_filter1d(G, sigma=30))
D = 1.0
Dsq = D**2
Gsq = G**2
numpy.random.seed(42)
mask = np.zeros(n)
mask[0] = 1
mask = torch.tensor(mask)
x0 = np.array([D/(math.sqrt(n)) for _ in range(n)])
def func(x_raw):
if torch.is_tensor(x_raw):
x = x_raw
else:
x = torch.tensor(x_raw,
dtype=torch.float64,
requires_grad=True)
# Convert to cumulative value
lamb = x
lamb_sq = lamb*lamb
lamb_flip = lamb.flip(dims=(0,))
lamb_sum = torch.sum(lamb)
lamb_sq_flip = lamb_flip*lamb_flip
Gsq_flip = Gsq.flip(dims=(0,))
t1 = 0.5*Dsq/lamb_sum # Distance error term
t2 = 0.5/lamb_sum # Gradient error term
t2 *= torch.sum(Gsq*lamb_sq)
inner_cumsum = torch.cumsum(Gsq_flip*lamb_sq_flip, dim=0)
denom_cumsum = torch.cumsum(lamb_flip, dim=0)
eval = lamb_flip[1:]*inner_cumsum[1:]/(denom_cumsum[1:]*(denom_cumsum[1:]-lamb_flip[1:]))
t3 = 0.5*torch.sum(eval)
fval = (t1+t2+t3) #/max(G/D,D/G)
fval.backward()
if torch.is_tensor(x_raw):
return fval.item()
else:
g = list(np.copy(x.grad.numpy()))
return (fval.item(), g)
# Test
fx0, fgx0 = func(x0)
start = time.time()
bounds = [(1e-12, np.inf) for _ in range(n)]
print(f"Starting solve...")
xopt_inc, fopt, dopt = scipy.optimize.fmin_l_bfgs_b(
func, x0,
bounds = bounds,
iprint = 0,
factr = 10.0, # High accuracy
maxls = 100000,
maxfun = 100000,
pgtol=1e-10,
m=20,
)
end = time.time()
xopt = xopt_inc
assert dopt['warnflag'] == 0
print(f"Time taken: {end - start}")
print(f"Steps to convergence: {dopt['funcalls']}")
#print(f"grad: {dopt['grad']}")
#print(xopt)
print(f"xopt[0]: {xopt[0]}")
print(f"xopt[-1]: {xopt[-1]}")
print(f"xopt[0]/xopt[-1]: {xopt[0]/xopt[-1]}")
print(f"fval: {fopt}")
print(f"fval * sqrt(n): {fopt * math.sqrt(n)} ")
cosine_curve = [D/(math.sqrt(n)) * 0.5 * (1 + math.cos((i/n) * math.pi)) for i in range(n)]
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.titlesize'] = 5
mpl.rcParams['axes.labelsize'] = 5
mpl.rcParams['font.size'] = 4.2
mpl.rcParams['legend.fontsize'] = 4.2
linewidth = '0.2'
mpl.rcParams['lines.markersize'] = 1.0
mpl.rcParams['lines.linewidth'] = linewidth
mpl.rcParams['axes.linewidth'] = linewidth
mpl.rcParams['xtick.major.width'] = linewidth
mpl.rcParams['ytick.major.width'] = linewidth
fig = plt.figure(figsize=(4, 5))
ax = fig.add_subplot(3, 1, 1)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('lamb')
ax.set_title(f"Optimal step size sequence (final={xopt[-1]})")
ax.plot(range(1, n+1), xopt, 'k')
ax.plot(range(1, n+1), [(1-i/(n+1))*D/(math.sqrt(n)) for i in range(n)], color='purple')
ax.plot(range(1, n+1), cosine_curve, color='r')
ax.hlines(y=D/(math.sqrt(n)), xmin=1, xmax=n, color='b')
ax.hlines(y=(1-n/(n+1))*D/(math.sqrt(n)), xmin=1, xmax=n, color='y')
ax.plot(range(1, n+1), [((1-i/(n+1))**0.5)*D/(math.sqrt(n)) for i in range(n)], color='pink')
plt.tight_layout()
ax = fig.add_subplot(3, 1, 2)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('lamb')
ax.set_title(f"Optimal step size sequence")
ax.plot(range(1, n+1), xopt, 'k')
ax.plot(range(1, n+1), [(1-i/(n+1))*D/(math.sqrt(n)) for i in range(n)], color='purple')
ax.plot(range(1, n+1), cosine_curve, color='r')
ax.plot(range(1, n+1), [((1-i/(n+1))**0.5)*D/(math.sqrt(n)) for i in range(n)], color='pink')
ax.hlines(y=D/(math.sqrt(n)), xmin=1, xmax=n, color='b')
ax.hlines(y=(1-n/(n+1))*D/(math.sqrt(n)), xmin=1, xmax=n, color='y')
ax.set_yscale('log')
plt.tight_layout()
ax = fig.add_subplot(3, 1, 3)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('G')
ax.set_title(f"Gradient norm sequence")
ax.plot(range(1, n+1), G, 'k')
plt.tight_layout()
fname = "lamb_lbfgs_seq.png"
plt.savefig(fname, bbox_inches='tight', pad_inches=0, dpi=300)
print(f"Saved {fname}")
plt.close()
plt.close('all') | adaptive_scheduling-main | solve_gradient_simple.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import scipy
from scipy.optimize import Bounds, LinearConstraint, minimize, SR1
import pdb
import math
import numpy.random
import time
import torch
n = 1000
G = 1.0
D = 1.0
Gsq = G**2
Dsq = D**2
numpy.random.seed(42)
mask = np.zeros(n)
mask[0] = 1
mask = torch.tensor(mask)
def lamb_from_increments_torch(x):
xmod = x.sub(x*mask) # Set first entry to 0
v = torch.exp(-xmod)
cexp = torch.cumprod(v, dim=0)
cexp_shift = cexp * x[0]
#pdb.set_trace()
return cexp_shift
def lamb_from_increments(xraw):
if not torch.is_tensor(xraw):
x = torch.tensor(xraw, dtype=torch.float64)
else:
x = xraw
result = lamb_from_increments_torch(x)
if torch.is_tensor(xraw):
return result
else:
return result.numpy()
def lamb_to_increments(yraw):
if not torch.is_tensor(yraw):
y = torch.tensor(yraw, dtype=torch.float64)
else:
y = yraw
def inv_cum_prod(v):
return torch.exp(torch.diff(torch.log(v)))
log_incs = -torch.log(inv_cum_prod(y))
result = torch.concatenate(
(torch.tensor([y[0]]), log_incs))
if torch.is_tensor(yraw):
return result
else:
return result.numpy()
y0 = np.flip(np.cumsum(np.abs(numpy.random.normal(size=n))))/n
x0 = lamb_to_increments(y0)
assert np.all(np.isclose(lamb_from_increments(x0), y0))
def func(x_raw):
if torch.is_tensor(x_raw):
x = x_raw
else:
x = torch.tensor(x_raw,
dtype=torch.float64,
requires_grad=True)
lamb = lamb_from_increments_torch(x)
lamb_flip = lamb.flip(dims=(0,))
lamb_sum = torch.sum(lamb)
lamb_sq_flip = lamb_flip*lamb_flip
t1 = 0.5*Dsq/lamb_sum # Distance error term
t2 = 0.5*Gsq/lamb_sum # Gradient error term
t2 *= torch.sum(lamb_sq_flip)
inner_cumsum = torch.cumsum(lamb_sq_flip, dim=0)
denom_cumsum = torch.cumsum(lamb_flip, dim=0)
eval = lamb_flip[1:]*inner_cumsum[1:]/(denom_cumsum[1:]*(denom_cumsum[1:]-lamb_flip[1:]))
t3 = 0.5*Gsq*torch.sum(eval)
fval = (t1+t2+t3) #/max(G/D,D/G)
fval.backward()
if torch.is_tensor(x_raw):
return fval.item()
else:
g = list(np.copy(x.grad.numpy()))
return (fval.item(), g)
# Test
fx0, fgx0 = func(x0)
start = time.time()
bounds = [(1e-12, np.inf)] + [(0, 10) for _ in range(n-1)]
print(f"Starting solve...")
xopt_inc, fopt, dopt = scipy.optimize.fmin_l_bfgs_b(
func, x0,
bounds = bounds,
iprint = 0,
factr = 10.0, # High accuracy
maxls = 100000,
maxfun = 100000,
pgtol=1e-10,
m=20,
)
end = time.time()
xopt = lamb_from_increments(xopt_inc)
assert dopt['warnflag'] == 0
print(f"Time taken: {end - start}")
print(f"Steps to convergence: {dopt['funcalls']}")
#print(f"grad: {dopt['grad']}")
#print(xopt)
print(f"xopt[0]: {xopt[0]}")
print(f"xopt[-1]: {xopt[-1]}")
print(f"xopt[0]/xopt[-1]: {xopt[0]/xopt[-1]}")
print(f"fval: {fopt}")
print(f"fval * sqrt(n): {fopt * math.sqrt(n)} ")
def func1d(x_raw):
eta = torch.tensor(x_raw,
dtype=torch.float64,
requires_grad=True)
t1 = Dsq/(2*n*eta)
t2 = Gsq*eta/2
t3 = (Gsq*eta/2)*torch.sum(1/torch.arange(1, n))
fval = (t1+t2+t3)#/max(G/D,D/G)
fval.backward()
if torch.is_tensor(x_raw):
return fval.item()
else:
g = list(np.copy(eta.grad.numpy()))
return (fval.item(), g)
xopt_1d, fopt_1d, dopt_1d = scipy.optimize.fmin_l_bfgs_b(
func1d, np.array([y0[0]]), bounds = [(1e-8, 100)],
iprint = 0
)
assert dopt_1d['warnflag'] == 0
xopt_1d = xopt_1d[0]
print(f"1D grad: {dopt_1d['grad']}")
print(f"1D Steps to convergence: {dopt_1d['funcalls']}")
#print(f"grad: {dopt_1d['grad']}")
print(f"eta 1d: {xopt_1d}")
print(f"1D fval: {fopt_1d}")
theory_eta = D/(G*math.sqrt(n*(2+math.log(n-1))))
theory1d = (D*G*math.sqrt(2+math.log(n-1))/math.sqrt(n))#/max(G/D,D/G)
print(f"Theory eta: {theory_eta}")
print(f"theory 1d fval: {theory1d}")
print(f"1d/full ratio: {fopt_1d/fopt}")
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.titlesize'] = 5
mpl.rcParams['axes.labelsize'] = 5
mpl.rcParams['font.size'] = 4.2
mpl.rcParams['legend.fontsize'] = 4.2
linewidth = '0.2'
mpl.rcParams['lines.markersize'] = 1.0
mpl.rcParams['lines.linewidth'] = linewidth
mpl.rcParams['axes.linewidth'] = linewidth
mpl.rcParams['xtick.major.width'] = linewidth
mpl.rcParams['ytick.major.width'] = linewidth
fig = plt.figure(figsize=(4, 3))
ax = fig.add_subplot(2, 1, 1)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('lamb')
ax.set_title(f"Optimal step size sequence v.s. optimal flat Dsq={D} Gsq={G}")
ax.plot(range(1, n+1), xopt, 'k')
ax.hlines(y=xopt_1d, xmin=1, xmax=n, color='r')
ax.hlines(y=D/(G*math.sqrt(n)), xmin=1, xmax=n, color='b')
#ax.set_yscale('log')
plt.tight_layout()
ax = fig.add_subplot(2, 1, 2)
plt.tight_layout()
ax.set_xlabel('k')
ax.set_ylabel('lamb')
ax.set_title(f"Optimal step size sequence v.s. optimal flat D={D} G={G}")
ax.plot(range(1, n+1), xopt, 'k')
ax.hlines(y=xopt_1d, xmin=1, xmax=n, color='r')
ax.hlines(y=D/(G*math.sqrt(n)), xmin=1, xmax=n, color='b')
ax.set_yscale('log')
plt.tight_layout()
fname = "lamb_lbfgs.png"
plt.savefig(fname, bbox_inches='tight', pad_inches=0, dpi=300)
print(f"Saved {fname}")
plt.close()
plt.close('all') | adaptive_scheduling-main | solve_bound.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import logging
from typing import TYPE_CHECKING, Any, Callable, Optional
import torch
import torch.optim
import pdb
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
from fairseq.optim import FairseqOptimizer, register_optimizer
logger = logging.getLogger(__name__)
def gmean(input_x):
log_x = torch.log(input_x.flatten())
return torch.exp(torch.mean(log_x))
class AdaGradFlex(torch.optim.Optimizer):
"""
Adagrad with coordinate-wise flex statistics.
"""
def __init__(
self, params: _params_t,
lr: float = 1.0,
momentum: float = 0,
log_every: int = 0,
weight_decay: float = 0.0,
eps: float = 1e-20,
decouple: bool = True,
):
if lr <= 0:
raise ValueError(f"Learning rate {lr} must be positive")
if momentum < 0:
raise ValueError(f"Momentum {momentum} must be non-negative")
print(f"Weight decay: {weight_decay}")
defaults = dict(lr=lr,
momentum=momentum,
eps=eps,
weight_decay=weight_decay,
log_every=log_every,
k = 0,
numerator_weighted=0.0,
decouple=decouple)
super().__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return False
@property
def supports_flat_params(self):
return True
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
group = self.param_groups[0]
momentum = group['momentum']
ck = 1 - momentum
log_every = group['log_every']
for group in self.param_groups:
eps = group["eps"]
k = group['k']
decay = group['weight_decay']
decouple = group['decouple']
lr = group['lr']
below_one = 0
total = 0
######
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
if "alphak" not in state:
state["alphak"] = torch.zeros_like(p.data).detach()
#state["gsq"] = torch.zeros_like(p.data).detach()
state["gmax"] = torch.zeros_like(p.data).detach()
state['sk'] = torch.zeros_like(p.data).detach()
if momentum > 0:
state["z"] = torch.clone(p.data).detach()
state['flex'] = torch.zeros_like(p.data).detach()
sk = state['sk']
#gsq = state['gsq']
alphak = state['alphak']
gmax = state['gmax']
flex = state['flex']
if grad.is_sparse:
grad = grad.to_dense()
if decay != 0 and not decouple:
grad.add_(p.data, alpha=decay)
flex.add_(grad*grad).sub_(grad * sk)
alphak.copy_(alphak.fmax(flex))
gmax.copy_(gmax.fmax(grad.abs()))
sk.add_(grad)
if decay != 0 and decouple:
p_old = p.data.clone()
if momentum > 0:
z = state['z']
z.sub_(grad.div(torch.sqrt(gmax*gmax + alphak) + eps), alpha=lr)
p.data.mul_(1-ck).add_(z, alpha=ck)
if decay != 0 and decouple:
z.add_(p_old, alpha=-decay * lr)
else:
p.data.sub_(grad.div(torch.sqrt(gmax*gmax + alphak) + eps), alpha=lr)
if decay != 0 and decouple:
p.data.add_(p_old, alpha=-decay * lr)
### Logging
# below_one += ((alphak+eps)/(gmax*gmax + eps) < 1).sum().item()
# total += grad.numel()
# if k % 50 == 0 and k > 0:
# print(f"fraction below 1: {below_one/total}")
# ratio = (alphak+eps)/(gmax*gmax + eps)
# print(f"mean: {ratio.mean()} gmean: {gmean(ratio)} std: {ratio.std()}")
# qs = [0.0, 0.05, 0.10, 0.25, 0.50, 0.75, 0.90, 0.95, 1.0]
# quantiles = torch.quantile(ratio, q=torch.tensor(qs).cuda())
# print(f"quantiles: {list(zip(qs, quantiles))}")
group['k'] = k + 1
return loss
| adaptive_scheduling-main | adagrad_flex.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import platform
import re
import sys
from glob import glob
from pybind11.setup_helpers import build_ext, Pybind11Extension
from setuptools import find_packages, setup
REQUIRED_MAJOR = 3
REQUIRED_MINOR = 7
INSTALL_REQUIRES = [
"arviz>=0.12.1",
"astor>=0.7.1",
"black==22.3.0",
"botorch>=0.5.1",
"gpytorch>=1.3.0, <1.9.0",
"graphviz>=0.17",
"netCDF4<=1.5.8; python_version<'3.8'",
"numpy>=1.18.1",
"pandas>=0.24.2",
"plotly>=2.2.1",
"scipy>=0.16",
"statsmodels>=0.12.0",
"torch>=1.9.0, <2.0",
"tqdm>=4.46.0",
"typing-extensions>=3.10",
"xarray>=0.16.0",
]
TEST_REQUIRES = ["pytest>=7.0.0", "pytest-cov"]
TUTORIALS_REQUIRES = [
"bokeh",
"cma",
"ipywidgets",
"jupyter",
"lxml>=4.9",
"matplotlib",
"mdformat",
"mdformat-myst",
"scikit-learn>=1.0.0",
"seaborn",
"tabulate",
"torchvision",
]
DEV_REQUIRES = (
TEST_REQUIRES
+ TUTORIALS_REQUIRES
+ [
"flake8==4.0.1",
"libcst==0.4.1",
"nbval",
"sphinx==4.2.0",
"sphinx-autodoc-typehints",
"sphinx_rtd_theme",
"toml>=0.10.2",
# `black` is included in `INSTALL_REQUIRES` above.
"ufmt==1.3.2",
"usort==1.0.2",
]
)
if platform.system() == "Windows":
CPP_COMPILE_ARGS = [
"/WX",
"/permissive-",
"/std:c++20",
# Ignore utils.h(365) warning C4244 conversion from '__int64' to 'int'.
"/wd4244",
]
else:
CPP_COMPILE_ARGS = ["-std=c++2a", "-Werror"]
# Check for python version
if sys.version_info < (REQUIRED_MAJOR, REQUIRED_MINOR):
error = (
"Your version of python ({major}.{minor}) is too old. You need "
"python >= {required_major}.{required_minor}."
).format(
major=sys.version_info.major,
minor=sys.version_info.minor,
required_minor=REQUIRED_MINOR,
required_major=REQUIRED_MAJOR,
)
sys.exit(error)
# get version string from module
current_dir = os.path.dirname(os.path.abspath(__file__))
init_file = os.path.join(current_dir, "src", "beanmachine", "__init__.py")
version_regexp = r"__version__ = ['\"]([^'\"]*)['\"]"
with open(init_file, "r") as f:
version = re.search(version_regexp, f.read(), re.M).group(1)
# read in README.md as the long description
with open("README.md", "r") as fh:
long_description = fh.read()
# Use absolute path to the src directory
INCLUDE_DIRS = [os.path.join(current_dir, "src")]
# check if we're installing in a conda environment
if "CONDA_PREFIX" in os.environ:
conda_inc = "Library/include" if platform.system() == "Windows" else "include"
conda_include_dir = os.path.join(os.environ["CONDA_PREFIX"], conda_inc)
INCLUDE_DIRS.extend([conda_include_dir, os.path.join(conda_include_dir, "eigen3")])
INCLUDE_DIRS.extend([conda_include_dir, os.path.join(conda_include_dir, "boost")])
if sys.platform.startswith("linux"):
INCLUDE_DIRS.extend(
[
"/usr/include",
"/usr/include/eigen3",
"/usr/include/boost169/",
"/usr/include/x86_64-linux-gnu",
]
)
elif sys.platform.startswith("darwin"):
# MacOS dependencies installed through HomeBrew
INCLUDE_DIRS.extend(
glob("/usr/local/Cellar/eigen/*/include/eigen3")
+ glob("/usr/local/Cellar/boost/*/include")
)
# Add range-v3 'include' directory to configuration
RANGE_V3_INCLUDE_DIR_CANDIDATES = [
c for c in [os.environ.get("RANGE_V3_INCLUDE_DIR")] if c is not None
]
if sys.platform.startswith("linux"):
RANGE_V3_INCLUDE_DIR_CANDIDATES.extend(
[
os.path.join(current_dir, "vcpkg/packages/range-v3_x64-linux/include"),
"/usr/include/range-v3",
]
)
elif sys.platform.startswith("darwin"):
RANGE_V3_INCLUDE_DIR_CANDIDATES.extend(
[
os.path.join(current_dir, "vcpkg/packages/range-v3_x64-osx/include"),
*glob("/usr/local/Cellar/range-v3/*/include"), # Homebrew
]
)
elif platform.system() == "Windows":
RANGE_V3_INCLUDE_DIR_CANDIDATES.extend(
[
os.path.join(current_dir, "vcpkg/packages/range-v3_x86-windows/include"),
# The following option was observed being used on GitHub Actions runner:
"C:/vcpkg/packages/range-v3_x86-windows/include",
]
)
print(
"Checking directories for range-v3 'include':\n",
"\n".join(RANGE_V3_INCLUDE_DIR_CANDIDATES),
)
selected_range_v3_include_dirs = [
candidate
for candidate in RANGE_V3_INCLUDE_DIR_CANDIDATES
if os.path.isdir(candidate)
]
print(
"Existing candidate directories for range-v3 'include':\n",
"\n".join(selected_range_v3_include_dirs),
)
if len(selected_range_v3_include_dirs) == 0:
if os.environ.get("RANGE_V3_INCLUDE_DIR"):
message = (
"Could not find 'include' directory for range-v3 library dependency "
+ f"either at {os.environ.get('RANGE_V3_INCLUDE_DIR')}\n"
+ "as indicated in environment variable RANGE_V3_INCLUDE_DIR, "
+ "nor in some other common locations.\n"
+ "Please make sure library is installed (see README.md) and "
+ "set RANGE_V3_INCLUDE_DIR environment variable to the right directory."
)
else:
message = (
"Could not find 'include' directory for range-v3 library dependency "
+ "in some common locations.\n"
+ "Please make sure library is installed (see README.md). "
+ "You can also manually indicate the correct 'include' directory by "
+ "setting the environment variable RANGE_V3_INCLUDE_DIR environment "
+ "variable to the right directory."
)
message += "Here are the directories we checked:\n " + "\n".join(
RANGE_V3_INCLUDE_DIR_CANDIDATES
)
sys.exit(message)
else:
print(
"Using the following directory for range-v3 'include':\n",
selected_range_v3_include_dirs[0],
)
INCLUDE_DIRS.append(selected_range_v3_include_dirs[0])
setup(
name="beanmachine",
version=version,
description="Probabilistic Programming Language for Bayesian Inference",
author="Meta Platforms, Inc.",
license="MIT",
url="https://beanmachine.org",
project_urls={
"Documentation": "https://beanmachine.org",
"Source": "https://github.com/facebookresearch/beanmachine",
},
keywords=[
"Probabilistic Programming Language",
"Bayesian Inference",
"Statistical Modeling",
"MCMC",
"Variational Inference",
"PyTorch",
],
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
],
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">={}.{}".format(REQUIRED_MAJOR, REQUIRED_MINOR),
install_requires=INSTALL_REQUIRES,
packages=find_packages("src"),
package_dir={"": "src"},
package_data={"beanmachine/ppl": ["py.typed"]},
ext_modules=[
Pybind11Extension(
name="beanmachine.graph",
sources=sorted(
set(glob("src/beanmachine/graph/**/*.cpp", recursive=True))
- set(glob("src/beanmachine/graph/**/*_test.cpp", recursive=True))
),
include_dirs=INCLUDE_DIRS,
extra_compile_args=CPP_COMPILE_ARGS,
)
],
cmdclass={"build_ext": build_ext},
extras_require={
"dev": DEV_REQUIRES,
"test": TEST_REQUIRES,
"tutorials": TUTORIALS_REQUIRES,
},
)
| beanmachine-main | setup.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# -- Project information -----------------------------------------------------
project = "Bean Machine"
copyright = "2022, Meta Platforms, Inc."
author = "Meta Platforms, Inc."
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
master_doc = "index"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
| beanmachine-main | website/sphinx/conf.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import re
import shutil
import uuid
import warnings
from pathlib import Path
from typing import Any, Dict, List, Tuple, Union
import mdformat # @manual=fbsource//third-party/pypi/mdformat:mdformat
import nbformat
import pandas as pd
from lxml import etree # pyre-ignore
from nbformat.notebooknode import NotebookNode
try:
from libfb.py.fbcode_root import get_fbcode_dir # pyre-ignore
except ImportError:
SCRIPTS_DIR = Path(__file__).parent.resolve()
LIB_DIR = SCRIPTS_DIR.parent.parent.resolve()
else:
LIB_DIR = (Path(get_fbcode_dir()) / "beanmachine").resolve()
WEBSITE_DIR = LIB_DIR.joinpath("website")
DOCS_DIR = LIB_DIR.joinpath("docs")
OVERVIEW_DIR = DOCS_DIR.joinpath("overview")
TUTORIALS_DIR = OVERVIEW_DIR.joinpath("tutorials")
# Data display priority. Below lists the priority for displaying data from cell outputs.
# Cells can output many different items, and some will output a fallback display, e.g.
# text/plain if text/html is not working. The below priorities help ensure the output in
# the MDX file shows the best representation of the cell output.
priorities = [
"text/markdown",
"image/png", # matplotlib output.
"application/vnd.jupyter.widget-view+json", # tqdm progress bars.
"application/vnd.bokehjs_load.v0+json", # Bokeh loading output.
"application/vnd.bokehjs_exec.v0+json", # Bokeh `show` outputs.
"application/vnd.plotly.v1+json", # Plotly
"text/html",
"stream",
"text/plain",
]
def load_nb_metadata() -> Dict[str, Dict[str, str]]:
"""
Load the metadata and list of notebooks that are to be converted to MDX.
Args:
None
Returns:
Dict[str, Dict[str, str]]: A dictionary of metadata needed to convert notebooks
to MDX. Only those notebooks that are listed in the `tutorials.json` file
will be included in the Docusaurus MDX output.
"""
tutorials_json_path = WEBSITE_DIR.joinpath("tutorials.json")
with tutorials_json_path.open("r") as f:
tutorials_data = json.load(f)
return tutorials_data
def load_notebook(path: Path) -> NotebookNode:
"""
Load the given notebook into memory.
Args:
path (Path): Path to the Jupyter notebook.
Returns:
NotebookNode: `nbformat` object, which contains all the notebook cells in it.
"""
with path.open("r") as f:
nb_str = f.read()
nb = nbformat.reads(nb_str, nbformat.NO_CONVERT)
return nb
def create_folders(path: Path) -> Tuple[str, Path]:
"""
Create asset folders for the tutorial.
Args:
path (Path): Path to the Jupyter notebook.
Returns:
Tuple[str, Path]: Returns a tuple with the filename to use for the MDX file
and the path for the MDX assets folder.
"""
tutorial_folder_name = path.stem
filename = "".join([token.title() for token in tutorial_folder_name.split("_")])
tutorial_folder = TUTORIALS_DIR.joinpath(tutorial_folder_name)
assets_folder = tutorial_folder / "assets"
img_folder = assets_folder / "img"
plot_data_folder = assets_folder / "plot_data"
if not tutorial_folder.exists():
tutorial_folder.mkdir(parents=True, exist_ok=True)
if not img_folder.exists():
img_folder.mkdir(parents=True, exist_ok=True)
if not plot_data_folder.exists():
plot_data_folder.mkdir(parents=True, exist_ok=True)
return filename, assets_folder
def create_frontmatter(path: Path, nb_metadata: Dict[str, Dict[str, str]]) -> str:
"""
Create frontmatter for the resulting MDX file.
The frontmatter is the data between the `---` lines in an MDX file.
Args:
path (Path): Path to the Jupyter notebook.
nb_metadata (Dict[str, Dict[str, str]]): The metadata associated with the given
notebook. Metadata is defined in the `tutorials.json` file.
Returns:
str: MDX formatted frontmatter.
"""
# Add the frontmatter to the MDX string. This is the part between the `---` lines
# that define the tutorial sidebar_label information.
frontmatter_delimiter = ["---"]
frontmatter = [
f"{key}: {value}"
for key, value in nb_metadata.get(
path.stem,
{
"title": "",
"sidebar_label": "",
"path": "",
"nb_path": "",
"github": "",
"colab": "",
},
).items()
]
frontmatter = "\n".join(frontmatter_delimiter + frontmatter + frontmatter_delimiter)
mdx = mdformat.text(frontmatter, options={"wrap": 88}, extensions={"myst"})
return f"{mdx}\n"
def create_imports() -> str:
"""
Create the imports needed for displaying buttons, and interactive plots in MDX.
Returns:
str: MDX formatted imports.
"""
link_btn = "../../../../website/src/components/LinkButtons.jsx"
cell_out = "../../../../website/src/components/CellOutput.jsx"
plot_out = "../../../../website/src/components/Plotting.jsx"
imports = f'import LinkButtons from "{link_btn}";\n'
imports += f'import CellOutput from "{cell_out}";\n'
imports += f'import {{BokehFigure, PlotlyFigure}} from "{plot_out}";\n'
return f"{imports}\n"
def create_buttons(
nb_metadata: Dict[str, Dict[str, str]],
tutorial_folder_name: str,
) -> str:
"""
Create buttons that link to Colab and GitHub for the tutorial.
Args:
nb_metadata (Dict[str, Dict[str, str]]): Metadata for the tutorial.
tutorial_folder_name (str): The name of the tutorial folder where the MDX
converted files exist. This is typically just the name of the Jupyter
notebook file.
Returns:
str: MDX formatted buttons.
"""
github_url = nb_metadata[tutorial_folder_name]["github"]
colab_url = nb_metadata[tutorial_folder_name]["colab"]
return f'<LinkButtons\n githubUrl="{github_url}"\n colabUrl="{colab_url}"\n/>\n\n'
def handle_images_found_in_markdown(
markdown: str,
new_img_dir: Path,
lib_dir: Path,
) -> str:
"""
Update image paths in the Markdown, and copy the image to the docs location.
The pattern we search for in the Markdown is
```` with two groups:
- group 1 = path/to/image.png
- group 2 = "title"
The first group (the path to the image from the original notebook) will be replaced
with ``assets/img/{name}`` where the name is `image.png` from the example above. The
original image will also be copied to the new location
``{new_img_dir}/assets/img/{name}``, which can be directly read into the MDX file.
Args:
markdown (str): Markdown where we look for Markdown flavored images.
new_img_dir (Path): Path where images are copied to for display in the
MDX file.
lib_dir (Path): The location for the Bean Machine repo.
Returns:
str: The original Markdown with new paths for images.
"""
markdown_image_pattern = re.compile(r"""!\[[^\]]*\]\((.*?)(?=\"|\))(\".*\")?\)""")
searches = list(re.finditer(markdown_image_pattern, markdown))
# Return the given Markdown if no images are found.
if not searches:
return markdown
# Convert the given Markdown to a list so we can delete the old path with the new
# standard path.
markdown_list = list(markdown)
for search in searches:
# Find the old image path and replace it with the new one.
old_path, _ = search.groups()
start = 0
end = 0
search = re.search(old_path, markdown)
if search is not None:
start, end = search.span()
old_path = Path(old_path)
name = old_path.name.strip()
new_path = f"assets/img/{name}"
del markdown_list[start:end]
markdown_list.insert(start, new_path)
# Copy the original image to the new location.
if old_path.exists():
old_img_path = old_path
else:
# Here we assume the original image exists in the same directory as the
# notebook, which should be in the tutorials folder of the library.
old_img_path = (lib_dir / "tutorials" / old_path).resolve()
new_img_path = str(new_img_dir / name)
shutil.copy(str(old_img_path), new_img_path)
return "".join(markdown_list)
def transform_style_attributes(markdown: str) -> str:
"""
Convert HTML style attributes to something React can consume.
Args:
markdown (str): Markdown where we look for HTML style attributes.
Returns:
str: The original Markdown with new React style attributes.
"""
# Finds all instances of `style="attr: value; ..."`.
token = "style="
pattern = re.compile(f"""{token}["'`]([^"]*)["'`]""")
found_patterns = re.findall(pattern, markdown)
if not found_patterns:
return markdown
for found_pattern in found_patterns:
# Step 1: splits "attr: value; ..." to
# ["attr: value", ..."].
step1 = [token.strip() for token in found_pattern.split(";") if token]
# Step 2: splits ["attr: value", ...] to
# [["attr", "value"], ...].
step2 = [[token.strip() for token in tokens.split(":")] for tokens in step1]
# Step 3: converts [["attr", "value"], ...] to
# '{"attr": "value", ...}'.
step3 = json.dumps(dict(step2))
# Step 4 wraps the JSON object in {}, so we end up with a string of the form;
# '{{"attr": "value", ...}}'.
step4 = f"{{{step3}}}"
# Step 5 replaces the old style data with the React style data, and clean the
# string for inclusion in the final Markdown.
markdown = markdown.replace(found_pattern, step4)
markdown = markdown.replace('"{{', "{{").replace('}}"', "}}")
return markdown
def handle_markdown_cell(
cell: NotebookNode,
new_img_dir: Path,
lib_dir: Path,
) -> str:
"""
Handle the given Jupyter Markdown cell and convert it to MDX.
Args:
cell (NotebookNode): Jupyter Markdown cell object.
new_img_dir (Path): Path where images are copied to for display in the
Markdown cell.
lib_dir (Path): The location for the Bean Machine library.
Returns:
str: Transformed Markdown object suitable for inclusion in MDX.
"""
markdown = cell["source"]
# Update image paths in the Markdown and copy them to the Markdown tutorials folder.
markdown = handle_images_found_in_markdown(markdown, new_img_dir, lib_dir)
# We will attempt to handle inline style attributes written in HTML by converting
# them to something React can consume.
markdown = transform_style_attributes(markdown)
# Remove any HTML comments from the Markdown. They are fine to keep in the
# notebooks, but are not really useful in the MDX.
markdown = re.sub("(<!--.*?-->)", "", markdown, flags=re.DOTALL)
mdx = mdformat.text(markdown, options={"wrap": 88}, extensions={"myst"})
return f"{mdx}\n"
def handle_cell_input(cell: NotebookNode, language: str) -> str:
"""
Create a Markdown cell block using the given cell source, and the language.
The given language will determine cell input syntax styles. Docusaurus uses Prism as
the syntax highlighter, https://prismjs.com. See the Docusaurus documentation for
more information on code blocks
https://docusaurus.io/docs/markdown-features/code-blocks.
Args:
cell (NotebookNode): A notebook cell.
language (str): Language specifier for syntax highlighting.
Returns:
str: Code block formatted Markdown string.
"""
cell_source = cell.get("source", "")
return f"```{language}\n{cell_source}\n```\n\n"
def transform_bokeh_json(json_data: Dict[str, Any]) -> Dict[str, Any]:
"""
Transform Bokeh JSON found in a cell output to something BokehJS can consume.
Args:
json_data (Dict[str, Any]): JSON data found in a notebook's cell output that is
for Bokeh.
Returns:
Dict[str, Any]: Reorganized JSON output for BokehJS.
"""
key = list(json_data.keys())[0]
data = json_data[key]
json_tx = {}
json_tx["target_id"] = key
json_tx["root_id"] = data["roots"]["root_ids"][0]
json_tx["doc"] = {
"defs": data["defs"],
"roots": data["roots"],
"title": data["title"],
"version": data["version"],
}
json_tx["version"] = data["version"]
return json_tx
def handle_bokeh(
values: List[Dict[str, Union[int, str, NotebookNode]]],
plot_data_folder: Path,
) -> List[Tuple[int, str]]:
"""
Convert Bokeh `show` outputs and Applications to MDX.
Args:
values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell
outputs.
plot_data_folder (Path): Path to the folder where plot data should be
stored.
Returns:
List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is
the index where the output occurred from the cell, and the second entry of
the tuple is the MDX formatted string.
"""
output = []
for value in values:
index = int(value["index"])
data = str(value["data"])
app_flag = data.startswith("<!DOCTYPE html>")
json_data = {}
# Handle Bokeh `show` outputs.
if not app_flag:
# Parse the JavaScript for the Bokeh JSON data. The BokehJS output is
# standardized, so we can make the following assumption for finding the
# right spot to for the JSON data. Also, this is pure JavaScript so
# parsing it with lxml is not an option.
json_string = list(
filter(
lambda line: line.startswith("const docs_json = "),
[line.strip() for line in data.splitlines() if line],
),
)[0]
# Ignore the `const` definition and the ending `;` from the line.
json_string = json_string[len("const docs_json = ") : -1]
json_data = json.loads(json_string)
# Handle Bokeh Applications.
if app_flag:
# Bokeh Application objects are rendered in the notebook as HTML. This
# HTML is saved in the output cell, which we parse below using lxml and
# xpaths.
doc = etree.HTML(data) # pyre-ignore
scripts = doc.xpath("//body/script[@type='application/json']")
script = scripts[0]
script = "".join(script.itertext())
# Unescape characters. If we skip this step, then the JSON read in by
# the React BokehFigure object will error in the browser.
script = script.replace("&", "&")
script = script.replace("<", "<")
script = script.replace(">", ">")
script = script.replace(""", '"')
script = script.replace("'", "'")
script = script.replace("`", "`")
json_data = json.loads(script)
# Shuffle the data so we can save it in a format BokehJS will be able to
# consume later.
js = transform_bokeh_json(json_data)
file_name = js["target_id"]
# Save the Bokeh JSON data to disk. It will be read by React when loaded in
# Docusaurus.
file_path = plot_data_folder / f"{file_name}.json"
with file_path.open("w") as f:
json.dump(js, f, indent=2)
# Add the Bokeh figure to the MDX output.
path_to_data = f"./assets/plot_data/{file_name}.json"
output.append(
(index, f"<BokehFigure data={{require('{path_to_data}')}} />\n\n"),
)
return output
def handle_image(
values: List[Dict[str, Union[int, str, NotebookNode]]],
) -> List[Tuple[int, str]]:
"""
Convert embedded images to string MDX can consume.
Args:
values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell
outputs.
Returns:
List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is
the index where the output occurred from the cell, and the second entry of
the tuple is the MDX formatted string.
"""
output = []
for value in values:
index = value["index"]
mime_type = value["mime_type"]
img = value["data"]
output.append((index, f"\n\n"))
return output
def handle_markdown(
values: List[Dict[str, Union[int, str, NotebookNode]]],
) -> List[Tuple[int, str]]:
"""
Convert and format Markdown for MDX.
Args:
values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell
outputs.
Returns:
List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is
the index where the output occurred from the cell, and the second entry of
the tuple is the MDX formatted string.
"""
output = []
for value in values:
index = int(value["index"])
markdown = str(value["data"])
markdown = mdformat.text(markdown, options={"wrap": 88}, extensions={"myst"})
output.append((index, f"{markdown}\n\n"))
return output
def handle_pandas(
values: List[Dict[str, Union[int, str, NotebookNode]]],
) -> List[Tuple[int, str]]:
"""
Handle how to display pandas DataFrames.
There is a scoped style tag in the DataFrame output that uses the class name
`dataframe` to style the output. We will use this token to determine if a pandas
DataFrame is being displayed.
Args:
values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell
outputs.
Returns:
List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is
the index where the output occurred from the cell, and the second entry of
the tuple is the MDX formatted string.
"""
output = []
for value in values:
index = int(value["index"])
data = str(value["data"])
df = pd.read_html(data, flavor="lxml")
# NOTE: The return is a list of dataframes and we only care about the first
# one.
md_df = df[0]
for column in md_df.columns:
if column.startswith("Unnamed"):
md_df.rename(columns={column: ""}, inplace=True)
# Remove the index if it is just a range, and output to markdown.
mdx = ""
if isinstance(md_df.index, pd.RangeIndex):
# Ignore FutureWarning: 'showindex' is deprecated.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
mdx = md_df.to_markdown(showindex=False)
elif not isinstance(md_df.index, pd.RangeIndex):
mdx = md_df.to_markdown()
output.append((index, f"\n{mdx}\n\n"))
return output
def handle_plain(
values: List[Dict[str, Union[int, str, NotebookNode]]],
) -> List[Tuple[int, str]]:
"""
Handle how to plain cell output should be displayed in MDX.
Args:
values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell
outputs.
Returns:
List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is
the index where the output occurred from the cell, and the second entry of
the tuple is the MDX formatted string.
"""
output = []
for value in values:
index = int(value["index"])
data = str(value["data"])
data = [line.strip() for line in data.splitlines() if line]
data = [datum for datum in data if datum]
if data:
data = "\n".join([line for line in str(value["data"]).splitlines() if line])
output.append(
(index, f"<CellOutput>\n{{\n `{data}`\n}}\n</CellOutput>\n\n"),
)
return output
def handle_plotly(
values: List[Dict[str, Union[int, str, NotebookNode]]],
plot_data_folder: Path,
) -> List[Tuple[int, str]]:
"""
Convert Plotly outputs to MDX.
Args:
values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell
outputs.
plot_data_folder (Path): Path to the folder where plot data should be
stored.
Returns:
List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is
the index where the output occurred from the cell, and the second entry of
the tuple is the MDX formatted string.
"""
output = []
for value in values:
index = value["index"]
data = value["data"]
file_name = str(uuid.uuid4())
file_path = plot_data_folder / f"{file_name}.json"
path_to_data = f"./assets/plot_data/{file_name}.json"
output.append(
(index, f"<PlotlyFigure data={{require('{path_to_data}')}} />\n\n"),
)
with file_path.open("w") as f:
json.dump(data, f, indent=2)
return output
def handle_tqdm(
values: List[Dict[str, Union[int, str, NotebookNode]]],
) -> List[Tuple[int, str]]:
"""
Handle the output of tqdm.
tqdm will be displayed as separate CellOutput React components if we do not
aggregate them all into a single CellOutput object, which is what this method does.
Args:
values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell
outputs.
Returns:
List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is
the index where the output occurred from the cell, and the second entry of
the tuple is the MDX formatted string.
"""
output = sorted(values, key=lambda item: item["index"])
index = int(output[0]["index"])
md = "\n".join([str(item["data"]) for item in output if item["data"]])
return [(index, f"<CellOutput>\n{{\n `{md}`\n}}\n</CellOutput>\n\n")]
CELL_OUTPUTS_TO_PROCESS = Dict[
str,
List[Dict[str, Union[int, str, NotebookNode]]],
]
def aggregate_mdx(
cell_outputs_to_process: CELL_OUTPUTS_TO_PROCESS,
plot_data_folder: Path,
) -> str:
"""
Aggregate the `cell_outputs_to_process` into MDX.
Args:
cell_outputs_to_process (CELL_OUTPUTS_TO_PROCESS): A dictionary of cell outputs
that need further processing.
plot_data_folder (Path): Path to where plot data should be stored for the
tutorial.
Returns:
str: MDX formatted string.
"""
processed_mdx = []
for key, values in cell_outputs_to_process.items():
if not values:
continue
if key == "bokeh":
processed_mdx.extend(handle_bokeh(values, plot_data_folder))
if key == "image":
processed_mdx.extend(handle_image(values))
if key == "markdown":
processed_mdx.extend(handle_markdown(values))
if key == "pandas":
processed_mdx.extend(handle_pandas(values))
if key == "plain":
processed_mdx.extend(handle_plain(values))
if key == "plotly":
processed_mdx.extend(handle_plotly(values, plot_data_folder))
if key == "tqdm":
processed_mdx.extend(handle_tqdm(values))
# Ensure the same ordering of the MDX happens as was found in the original cell
# output.
processed_mdx = sorted(processed_mdx, key=lambda item: item[0])
mdx = "\n".join([item[1] for item in processed_mdx])
return mdx
def prioritize_dtypes(
cell_outputs: List[NotebookNode],
) -> Tuple[List[List[str]], List[bool]]:
"""
Prioritize cell output data types.
Args:
cell_outputs (List[NotebookNode]): A list of cell outputs.
Returns:
Tuple[List[List[str]], List[bool]]: Return two items in the tuple; the first is
a list of prioritized data types and the second is a list boolean values
associated with the cell output having Plotly information in it or not.
"""
cell_output_dtypes = [
list(cell_output["data"].keys())
if "data" in cell_output
else [cell_output["output_type"]]
for cell_output in cell_outputs
]
prioritized_cell_output_dtypes = [
sorted(
set(dtypes).intersection(set(priorities)),
key=lambda dtype: priorities.index(dtype),
)
for dtypes in cell_output_dtypes
]
prioritized_cell_output_dtypes = [
[str(item) for item in items] for items in prioritized_cell_output_dtypes
]
plotly_flags = [
any(["plotly" in output for output in outputs])
for outputs in cell_output_dtypes
]
return prioritized_cell_output_dtypes, plotly_flags
def aggregate_bokeh(
prioritized_data_dtype: str,
cell_output: NotebookNode,
data: NotebookNode,
cell_outputs_to_process: CELL_OUTPUTS_TO_PROCESS,
i: int,
) -> None:
"""
Aggregate Bokeh cell outputs.
Args:
prioritized_data_dtype (str): The prioritized cell output data type.
cell_output (NotebookNode): The actual cell output from the notebook.
data (NotebookNode): The data of the cell output.
cell_outputs_to_process (CELL_OUTPUTS_TO_PROCESS): Dictionary containing
aggregated cell output objects.
i (int): Index for the cell output in the list of cell output objects.
Returns:
None: Does not return anything, instead adds values to the
cell_outputs_to_process if applicable.
"""
if prioritized_data_dtype == "application/vnd.bokehjs_load.v0+json":
pass
# Bokeh `show` outputs.
if prioritized_data_dtype == "application/vnd.bokehjs_exec.v0+json":
data = cell_output["data"]["application/javascript"]
cell_outputs_to_process["bokeh"].append({"index": i, "data": data})
# Bokeh applications.
if prioritized_data_dtype == "text/html" and "Bokeh Application" in data:
cell_outputs_to_process["bokeh"].append({"index": i, "data": data})
def aggregate_images_and_plotly(
prioritized_data_dtype: str,
cell_output: NotebookNode,
data: NotebookNode,
plotly_flags: List[bool],
cell_outputs_to_process: CELL_OUTPUTS_TO_PROCESS,
i: int,
) -> None:
"""
Aggregates images or Plotly cell outputs into an appropriate bucket.
Args:
prioritized_data_dtype (str): The prioritized cell output data type.
cell_output (NotebookNode): The actual cell output from the notebook.
data (NotebookNode): The data of the cell output.
plotly_flags (List[bool]): True if a Plotly plot was found in the cell outputs
else False.
cell_outputs_to_process (CELL_OUTPUTS_TO_PROCESS): Dictionary containing
aggregated cell output objects.
i (int): Index for the cell output in the list of cell output objects.
Returns:
None: Does not return anything, instead adds values to the
cell_outputs_to_process if applicable.
"""
if prioritized_data_dtype.startswith("image"):
if not plotly_flags[i]:
cell_outputs_to_process["image"].append(
{"index": i, "data": data, "mime_type": prioritized_data_dtype},
)
# Plotly outputs a static image, but we can use the JSON in the cell
# output to create interactive plots using a React component.
if plotly_flags[i]:
data = cell_output["data"]["application/vnd.plotly.v1+json"]
cell_outputs_to_process["plotly"].append({"index": i, "data": data})
def aggregate_plain_output(
prioritized_data_dtype: str,
cell_output: NotebookNode,
data: NotebookNode,
cell_outputs_to_process: CELL_OUTPUTS_TO_PROCESS,
i: int,
) -> None:
"""
Aggregate plain text cell outputs together.
Args:
prioritized_data_dtype (str): The prioritized cell output data type.
cell_output (NotebookNode): The actual cell output from the notebook.
data (NotebookNode): The data of the cell output.
cell_outputs_to_process (CELL_OUTPUTS_TO_PROCESS): Dictionary containing
aggregated cell output objects.
i (int): Index for the cell output in the list of cell output objects.
Returns:
None: Does not return anything, instead adds values to the
cell_outputs_to_process if applicable.
"""
# Ignore error outputs.
if "name" in cell_output and cell_output["name"] == "stderr":
pass
# Ignore matplotlib legend text output.
if prioritized_data_dtype == "text/plain" and "matplotlib" in data:
pass
cell_outputs_to_process["plain"].append({"index": i, "data": data})
def aggregate_output_types(cell_outputs: List[NotebookNode]) -> CELL_OUTPUTS_TO_PROCESS:
"""
Aggregate cell outputs into a dictionary for further processing.
Args:
cell_outputs (List[NotebookNode]): List of cell outputs.
Returns:
CELL_OUTPUTS_TO_PROCESS: Dictionary containing aggregated cell output objects.
"""
# We will use the below cell output data types for prioritizing the output shown in
# the MDX file.
prioritized_cell_output_dtypes, plotly_flags = prioritize_dtypes(cell_outputs)
cell_outputs_to_process = {
"bokeh": [],
"image": [],
"markdown": [],
"pandas": [],
"plain": [],
"plotly": [],
"tqdm": [],
}
for i, cell_output in enumerate(cell_outputs):
prioritized_data_dtype = prioritized_cell_output_dtypes[i][0]
# If there is no `data` key in the cell_output, then it may be an error that
# needs to be handled. Even if it is not an error, the data is stored in a
# different key if no `data` key is found.
data = (
cell_output["data"][prioritized_data_dtype]
if "data" in cell_output
else cell_output["text"]
)
bokeh_check = "bokeh" in prioritized_data_dtype or (
prioritized_data_dtype == "text/html" and "Bokeh Application" in data
)
if bokeh_check:
aggregate_bokeh(
prioritized_data_dtype,
cell_output,
data,
cell_outputs_to_process,
i,
)
image_check = prioritized_data_dtype.startswith("image")
if image_check:
aggregate_images_and_plotly(
prioritized_data_dtype,
cell_output,
data,
plotly_flags,
cell_outputs_to_process,
i,
)
plain_check = prioritized_data_dtype in ["text/plain", "stream"]
if plain_check:
aggregate_plain_output(
prioritized_data_dtype,
cell_output,
data,
cell_outputs_to_process,
i,
)
if prioritized_data_dtype == "text/markdown":
cell_outputs_to_process["markdown"].append({"index": i, "data": data})
if "dataframe" in data:
cell_outputs_to_process["pandas"].append({"index": i, "data": data})
if prioritized_data_dtype == "application/vnd.jupyter.widget-view+json":
data = cell_output["data"]["text/plain"]
cell_outputs_to_process["tqdm"].append({"index": i, "data": data})
return cell_outputs_to_process
def handle_cell_outputs(cell: NotebookNode, plot_data_folder: Path) -> str:
"""
Handle cell outputs and convert to MDX.
Args:
cell (NotebookNode): The cell where the outputs need converting.
plot_data_folder (Path): Path to the folder where plot data should be
stored.
Returns:
str: MDX formatted cell output.
"""
mdx = ""
# Return an empty string if there are no actual cell outputs.
cell_outputs = cell.get("outputs", [])
if not cell_outputs:
return mdx
# We will loop over all cell outputs and bucket them into the appropriate key in the
# dictionary below for further processing. Doing it in this way helps aggregate like
# outputs together e.g. tqdm outputs.
cell_outputs_to_process = aggregate_output_types(cell_outputs)
# Now we process all aggregated cell outputs into a single output for the type.
md = aggregate_mdx(cell_outputs_to_process, plot_data_folder)
return md
def handle_code_cell(cell: NotebookNode, plot_data_folder: Path) -> str:
"""
Handle code cells in Jupyter notebooks and convert them to MDX.
Args:
cell (NotebookNode): A Jupyter notebook cell that contains code.
plot_data_folder (Path): Path to the folder where plot data should be
stored.
Returns:
str: MDX formatted code cell.
"""
cell_input_mdx = handle_cell_input(cell, "python")
cell_output_mdx = handle_cell_outputs(cell, plot_data_folder)
return cell_input_mdx + cell_output_mdx
def transform_notebook(path: Path) -> str:
"""
Transform a notebook located at the given path into MDX.
Args:
path (Path): Path to the Jupyter notebook tutorial.
Returns:
str: MDX formatted string.
"""
filename, assets_folder = create_folders(path)
img_folder = assets_folder / "img"
plot_data_folder = assets_folder / "plot_data"
save_folder = assets_folder.joinpath("..").resolve()
nb = load_notebook(path)
nb_metadata = load_nb_metadata()
mdx = ""
mdx += create_frontmatter(path, nb_metadata)
mdx += create_imports()
mdx += create_buttons(nb_metadata, path.stem)
for cell in nb["cells"]:
cell_type = cell["cell_type"]
# Handle a Markdown cell.
if cell_type == "markdown":
mdx += handle_markdown_cell(cell, img_folder, LIB_DIR)
# Handle a code cell.
if cell_type == "code":
mdx += handle_code_cell(cell, plot_data_folder)
# Write the MDX file to disk.
save_path = save_folder / f"{filename}.mdx"
with save_path.open("w") as f:
f.write(mdx)
# Return the string for debugging purposes.
return mdx
if __name__ == "__main__":
tutorials_metadata = load_nb_metadata()
print("--------------------------------------------")
print("Converting tutorial notebooks into mdx files")
print("--------------------------------------------")
for _, value in tutorials_metadata.items():
path = (LIB_DIR / value["nb_path"]).resolve()
print(f"{path.stem}")
mdx = transform_notebook(path)
print("")
| beanmachine-main | website/scripts/convert_ipynb_to_mdx.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch.distributions as dist
from torch import tensor
class ToplevelSmokeTest(unittest.TestCase):
def test_toplevel_package_imports(self):
# these decorators should execute without error
@bm.random_variable
def foo(i):
return dist.Bernoulli(0.5)
@bm.functional
def foo_sum(n):
return sum(foo(i) for i in range(n))
@bm.random_variable
def bar():
return dist.Normal(0, 1)
# exercise invocation from top-level package directly
# Compositional Inference
samples = bm.CompositionalInference().infer(
[foo_sum(3)], {foo(0): tensor(0.0)}, 100, 1
)
bm.Diagnostics(samples)
# NUTS
samples = bm.SingleSiteNoUTurnSampler().infer(
[bar()], {foo(0): tensor(0.0)}, 100, 1, num_adaptive_samples=100
)
bm.Diagnostics(samples)
| beanmachine-main | tests/ppl/smoke_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import pytest
import torch.distributions as dist
@pytest.fixture(autouse=True)
def fix_random_seed():
"""Fix the random state for every test in the test suite."""
bm.seed(0)
@pytest.fixture(autouse=True)
def disable_torch_distribution_validation():
"""Disables validation of Torch distribution arguments."""
dist.Distribution.set_default_validate_args(False)
| beanmachine-main | tests/ppl/conftest.py |
beanmachine-main | tests/ppl/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import pytest
import torch
from beanmachine.ppl.experimental.torch_jit_backend import get_backend, TorchJITBackend
from ..inference.inference_test import SampleModel
def test_get_backend():
with pytest.warns(
UserWarning, match="The support of TorchInductor is experimental"
):
# test if switching to inductor triggers the warning
backend = get_backend(nnc_compile=False, experimental_inductor_compile=True)
assert backend is TorchJITBackend.INDUCTOR
backend = get_backend(nnc_compile=True, experimental_inductor_compile=False)
assert backend is TorchJITBackend.NNC
backend = get_backend(nnc_compile=False, experimental_inductor_compile=False)
assert backend is TorchJITBackend.NONE
@pytest.mark.skip(reason="The CPU backend of TorchInductor isn't working in fbcode yet")
def test_inductor_compile():
model = SampleModel()
queries = [model.foo()]
observations = {model.bar(): torch.tensor(0.5)}
num_samples = 30
num_chains = 2
# verify that Inductor can run through
samples = bm.GlobalNoUTurnSampler(experimental_inductor_compile=True).infer(
queries,
observations,
num_samples,
num_adaptive_samples=num_samples,
num_chains=num_chains,
)
# sanity check: make sure that the samples are valid
assert not torch.isnan(samples[model.foo()]).any()
| beanmachine-main | tests/ppl/experimental/torch_jit_backend_test.py |
beanmachine-main | tests/ppl/experimental/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.split_rule import (
CompositeRules,
DimensionalRule,
Operator,
SplitRule,
)
@pytest.fixture
def grow_dim():
return 1
@pytest.fixture
def grow_val():
return 2.1
def test_dimensional_rule_addition(grow_dim, grow_val):
lax_rule = SplitRule(
grow_dim=grow_dim, grow_val=grow_val + 10, operator=Operator.le
)
existing_dimensional_rule = DimensionalRule(
grow_dim=grow_dim, min_val=grow_val - 20, max_val=grow_val
)
assert (
existing_dimensional_rule.max_val
== existing_dimensional_rule.add_rule(lax_rule).max_val
)
assert (
existing_dimensional_rule.min_val
== existing_dimensional_rule.add_rule(lax_rule).min_val
)
restrictive_rule_le = SplitRule(
grow_dim=grow_dim, grow_val=grow_val - 10, operator=Operator.le
)
assert (
existing_dimensional_rule.max_val
> existing_dimensional_rule.add_rule(restrictive_rule_le).max_val
)
assert (
existing_dimensional_rule.min_val
== existing_dimensional_rule.add_rule(restrictive_rule_le).min_val
)
restrictive_rule_gt = SplitRule(
grow_dim=grow_dim, grow_val=grow_val - 10, operator=Operator.gt
)
assert (
existing_dimensional_rule.max_val
== existing_dimensional_rule.add_rule(restrictive_rule_gt).max_val
)
assert (
existing_dimensional_rule.min_val
< existing_dimensional_rule.add_rule(restrictive_rule_gt).min_val
)
@pytest.fixture
def all_dims():
return [0, 2]
@pytest.fixture
def all_split_rules(all_dims):
all_rules = []
for dim in all_dims:
all_rules.append(SplitRule(grow_dim=dim, grow_val=5, operator=Operator.le))
return all_rules
@pytest.fixture
def X():
return torch.Tensor([[1.0, 3.0, 7.0], [-1.1, 100, 5]])
def test_composite_rules(all_dims, all_split_rules, X):
composite_rule = CompositeRules(all_dims=all_dims, all_split_rules=all_split_rules)
X_cond = X[composite_rule.condition_on_rules(X)]
for dim in all_dims:
assert torch.all(X_cond[:, dim] > composite_rule.dimensional_rules[dim].min_val)
assert torch.all(
X_cond[:, dim] <= composite_rule.dimensional_rules[dim].max_val
)
invalid_split_rule = SplitRule(
grow_dim=max(all_dims) + 1, grow_val=12, operator=Operator.le
)
with pytest.raises(ValueError):
_ = composite_rule.add_rule(invalid_split_rule)
valid_split_rule = SplitRule(
grow_dim=max(all_dims), grow_val=1000.0, operator=Operator.gt
)
valid_new_composite_rule = composite_rule.add_rule(valid_split_rule)
assert valid_new_composite_rule.most_recent_split_rule() == valid_split_rule
| beanmachine-main | tests/ppl/experimental/bart/bart_split_rule_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.scalar_samplers import (
NoiseStandardDeviation,
)
@pytest.fixture
def X():
return torch.Tensor([[3.0, 1.0], [4.0, 1.0], [1.5, 1.0], [-1.0, 1.0]])
@pytest.fixture
def residual(X):
return X * 0.1
@pytest.fixture
def sigma():
return NoiseStandardDeviation(prior_concentration=0.1, prior_rate=0.2)
def test_sigma_sampling(sigma, X, residual):
prev_val = sigma.val
sample = sigma.sample(X=X, residual=residual)
assert not prev_val == sigma.val
assert sigma.val == sample
| beanmachine-main | tests/ppl/experimental/bart/bart_scalar_sampler_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.exceptions import (
GrowError,
PruneError,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.mutation import (
GrowMutation,
PruneMutation,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.node import (
LeafNode,
SplitNode,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.split_rule import (
CompositeRules,
Operator,
SplitRule,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.tree import Tree
@pytest.fixture
def X():
return torch.Tensor(
[[3.0], [4.0], [1.5], [-1.0]]
) # only r1 containing all positive entries is growable
@pytest.fixture
def l1_non_growable():
return LeafNode(
depth=1,
composite_rules=CompositeRules(
all_dims=[0],
all_split_rules=[SplitRule(grow_dim=0, grow_val=0, operator=Operator.le)],
),
val=-10,
)
@pytest.fixture
def l2_non_growable():
return LeafNode(
depth=2,
composite_rules=CompositeRules(
all_dims=[0],
all_split_rules=[
SplitRule(grow_dim=0, grow_val=1.5, operator=Operator.le),
SplitRule(grow_dim=0, grow_val=0, operator=Operator.gt),
],
),
val=15,
)
@pytest.fixture
def r2_growable():
return LeafNode(
depth=2,
composite_rules=CompositeRules(
all_dims=[0],
all_split_rules=[
SplitRule(grow_dim=0, grow_val=1.5, operator=Operator.gt),
SplitRule(grow_dim=0, grow_val=0, operator=Operator.gt),
],
),
val=15,
)
@pytest.fixture
def r1_grown(r2_growable, l2_non_growable):
return SplitNode(
depth=1,
left_child=l2_non_growable,
right_child=r2_growable,
composite_rules=CompositeRules(
all_dims=[0],
all_split_rules=[SplitRule(grow_dim=0, grow_val=0, operator=Operator.gt)],
),
)
@pytest.fixture
def root(l1_non_growable, r1_grown):
return SplitNode(
depth=0,
left_child=l1_non_growable,
right_child=r1_grown,
composite_rules=CompositeRules(all_dims=[0]),
)
@pytest.fixture
def tree(root, r1_grown, l1_non_growable, r2_growable, l2_non_growable):
"""
root_node
/\
(x1 <= 0)l1 r1 (x1 > 0)
/ \
(x1 <= 1.5) l2 r2 (x1 > 1.5)
The tree is made such that all positive input gets a positive prediciton and vice-versa.
"""
tree_ = Tree(nodes=[root, l1_non_growable, r1_grown, l2_non_growable, r2_growable])
return tree_
def test_num_nodes(tree):
assert tree.num_nodes() == 5
def test_leaf_split_nodes(tree):
for node in tree.split_nodes():
assert isinstance(node, SplitNode)
for node in tree.split_nodes():
assert isinstance(node, SplitNode)
def test_prunable_split_nodes(tree):
for node in tree.prunable_split_nodes():
assert isinstance(node.left_child, LeafNode)
assert isinstance(node.left_child, LeafNode)
assert len(tree.prunable_split_nodes()) == tree.num_prunable_split_nodes()
def test_growable_leaves(tree, r2_growable, l1_non_growable, X):
assert tree.num_growable_leaf_nodes(X) == 1
growable_leaves = tree.growable_leaf_nodes(X)
assert len(tree.growable_leaf_nodes(X)) == len(growable_leaves)
assert r2_growable in growable_leaves
assert l1_non_growable not in growable_leaves
assert l2_non_growable not in growable_leaves
def test_prediction(tree, X):
for x1 in X:
x1 = x1.reshape(1, 1)
assert float(x1) * tree.predict(x1) >= 0
def test_mutate_prune(tree, root, l1_non_growable, r1_grown):
old_tree_len = tree.num_nodes()
pruned_r1 = SplitNode.prune_node(r1_grown)
# pruning an internal node
with pytest.raises(PruneError):
_ = PruneMutation(old_node=root, new_node=l1_non_growable)
mutation = PruneMutation(old_node=r1_grown, new_node=pruned_r1)
tree.mutate(mutation)
assert tree.num_nodes() == old_tree_len - 2
def test_mutate_grow(tree, r2_growable):
old_tree_len = tree.num_nodes()
l3 = LeafNode(
depth=3,
composite_rules=CompositeRules(
all_dims=[0],
all_split_rules=[SplitRule(grow_dim=0, grow_val=3, operator=Operator.le)],
),
val=15,
)
r3 = LeafNode(
depth=3,
composite_rules=CompositeRules(
all_dims=[0],
all_split_rules=[SplitRule(grow_dim=0, grow_val=1.5, operator=Operator.gt)],
),
val=15,
)
r2_grown = SplitNode(
depth=2,
left_child=l3,
right_child=r3,
composite_rules=CompositeRules(
all_dims=[0],
all_split_rules=[SplitRule(grow_dim=0, grow_val=1.5, operator=Operator.gt)],
),
)
# growing an internal node
with pytest.raises(GrowError):
_ = GrowMutation(old_node=r2_grown, new_node=r2_growable)
mutation = GrowMutation(old_node=r2_growable, new_node=r2_grown)
tree.mutate(mutation)
assert tree.num_nodes() == old_tree_len + 2
| beanmachine-main | tests/ppl/experimental/bart/bart_tree_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import deepcopy
import pytest
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.exceptions import (
PruneError,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.node import (
BaseNode,
LeafNode,
SplitNode,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.split_rule import (
CompositeRules,
Operator,
SplitRule,
)
@pytest.fixture
def composite_rule():
all_rules = []
all_dims = [0, 1, 2]
for dim in all_dims:
all_rules.append(SplitRule(grow_dim=dim, grow_val=0, operator=Operator.le))
composite_rule = CompositeRules(all_dims=all_dims, all_split_rules=all_rules)
return composite_rule
@pytest.fixture
def left_rule():
return SplitRule(grow_dim=0, grow_val=-0.5, operator=Operator.le)
@pytest.fixture
def right_rule():
return SplitRule(grow_dim=0, grow_val=-0.5, operator=Operator.gt)
@pytest.fixture
def all_pass_composite_rule():
all_rules = []
all_dims = [0, 1, 2]
for dim in all_dims:
all_rules.append(
SplitRule(grow_dim=dim, grow_val=float("inf"), operator=Operator.le)
)
composite_rule = CompositeRules(all_dims=all_dims, all_split_rules=all_rules)
return composite_rule
@pytest.fixture
def X():
return torch.Tensor([[1.0, 3.0, 7.0], [-1.1, -1, -5]])
def test_conditioning(X, composite_rule):
base_node = BaseNode(depth=0, composite_rules=composite_rule)
assert torch.all(
base_node.data_in_node(X) == X[composite_rule.condition_on_rules(X)]
)
def test_leaf_node_prediction(composite_rule):
val = 10
leaf_node = LeafNode(composite_rules=composite_rule, depth=0, val=val)
assert leaf_node.predict() == val
@pytest.fixture
def leaf_node(composite_rule):
return LeafNode(composite_rules=composite_rule, depth=0)
@pytest.fixture
def loose_leaf(all_pass_composite_rule):
return LeafNode(composite_rules=all_pass_composite_rule, depth=0)
def test_growable_dims(leaf_node, loose_leaf, X):
assert leaf_node.get_num_growable_dims(X) == 0 # only one row of X passes the test
assert loose_leaf.get_num_growable_dims(X) == X.shape[-1] # everything passes
assert len(loose_leaf.get_growable_dims(X)) == loose_leaf.get_num_growable_dims(X)
def test_is_grow(leaf_node, loose_leaf, X):
assert not leaf_node.is_growable(X) # no splittable_dims. Cannot grow.
assert loose_leaf.is_growable(X)
def test_grow_node(leaf_node, left_rule, right_rule, X):
grown_leaf = LeafNode.grow_node(
leaf_node, left_rule=left_rule, right_rule=right_rule
)
assert isinstance(grown_leaf, SplitNode)
assert grown_leaf.left_child is not None
assert grown_leaf.right_child is not None
assert grown_leaf.most_recent_rule() == left_rule
def test_prune_node(leaf_node, composite_rule):
split_node = SplitNode(
left_child=leaf_node,
right_child=deepcopy(leaf_node),
depth=1,
composite_rules=composite_rule,
)
grandfather_node = SplitNode(
left_child=leaf_node,
right_child=split_node,
depth=0,
composite_rules=composite_rule,
)
assert split_node.is_prunable()
assert not grandfather_node.is_prunable()
assert isinstance(SplitNode.prune_node(split_node), LeafNode)
with pytest.raises(PruneError):
SplitNode.prune_node(grandfather_node)
def test_partition_of_split(loose_leaf, X):
grow_val = X[0, 0]
growable_vals = loose_leaf.get_growable_vals(X=X, grow_dim=0)
assert torch.isclose(
torch.tensor(
[loose_leaf.get_partition_of_split(X=X, grow_dim=0, grow_val=grow_val)]
),
torch.mean(
(growable_vals == grow_val.item()).to(torch.float), dtype=torch.float
),
)
| beanmachine-main | tests/ppl/experimental/bart/bart_node_test.py |
beanmachine-main | tests/ppl/experimental/bart/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.exceptions import (
PruneError,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.grow_prune_tree_proposer import (
GrowPruneTreeProposer,
MutationKind,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.node import (
LeafNode,
SplitNode,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.split_rule import (
CompositeRules,
Operator,
SplitRule,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.tree import Tree
@pytest.fixture(autouse=True)
def seed():
torch.manual_seed(5)
@pytest.fixture
def X():
return torch.Tensor([[3.0, 1.0], [4.0, 1.0], [1.5, 1.0], [-1.0, 1.0]])
@pytest.fixture
def root_node(X):
return SplitNode(
depth=0,
composite_rules=CompositeRules(all_dims=list(range(X.shape[-1]))),
)
@pytest.fixture
def single_node_tree(X):
leaf_root = LeafNode(
depth=0,
composite_rules=CompositeRules(all_dims=list(range(X.shape[-1]))),
)
tree_ = Tree(nodes=[leaf_root])
return tree_
@pytest.fixture
def r1_growable(X):
return LeafNode(
depth=1,
composite_rules=CompositeRules(
all_dims=list(range(X.shape[-1])),
all_split_rules=[SplitRule(grow_dim=0, grow_val=0, operator=Operator.gt)],
),
val=-10,
)
@pytest.fixture
def l1_non_growable(X):
return LeafNode(
depth=1,
composite_rules=CompositeRules(
all_dims=list(range(X.shape[-1])),
all_split_rules=[SplitRule(grow_dim=0, grow_val=0, operator=Operator.le)],
),
val=-10,
)
@pytest.fixture
def single_layer_tree(root_node, r1_growable, l1_non_growable):
"""
root_node
/\
(x1 <= 0)l1 r1 (x1 > 0)
The tree is made such that all positive input gets a positive prediciton and vice-versa.
"""
root_node._left_child = l1_non_growable
root_node._right_child = r1_growable
tree_ = Tree(nodes=[root_node, l1_non_growable, r1_growable])
return tree_
@pytest.fixture
def l2_non_growable(X):
return LeafNode(
depth=2,
composite_rules=CompositeRules(
all_dims=list(range(X.shape[-1])),
all_split_rules=[SplitRule(grow_dim=0, grow_val=3, operator=Operator.le)],
),
val=-10,
)
@pytest.fixture
def r2_growable(X):
return LeafNode(
depth=1,
composite_rules=CompositeRules(
all_dims=list(range(X.shape[-1])),
all_split_rules=[SplitRule(grow_dim=0, grow_val=0, operator=Operator.gt)],
),
val=-10,
)
@pytest.fixture
def r1_grown(X):
return SplitNode(
depth=1,
composite_rules=CompositeRules(
all_dims=list(range(X.shape[-1])),
all_split_rules=[SplitRule(grow_dim=0, grow_val=3, operator=Operator.gt)],
),
)
@pytest.fixture
def double_layer_tree(
root_node, r1_grown, l1_non_growable, r2_growable, l2_non_growable
):
"""
root_node
/\
(x1 <= 0)l1 r1 (x1 > 0)
/\
(<=3)l2 r2 (>3)
"""
root_node._left_child = l1_non_growable
root_node._right_child = r1_grown
r1_grown._left_child = l2_non_growable
r1_grown._right_child = r2_growable
tree_ = Tree(
nodes=[root_node, l1_non_growable, r1_grown, l2_non_growable, r2_growable]
)
return tree_
@pytest.fixture
def proposer():
return GrowPruneTreeProposer()
def test_new_mutation(proposer, single_node_tree, X):
assert proposer._get_new_mutation(X=X, tree=single_node_tree) == MutationKind.grow
def test_select_root_to_grow(proposer, single_node_tree, X):
assert (
proposer._select_leaf_to_grow(single_node_tree, X) == single_node_tree._nodes[0]
)
def test_select_leaf_to_grow(proposer, single_layer_tree, X, r1_growable):
assert proposer._select_leaf_to_grow(single_layer_tree, X) == r1_growable
def test_select_dim_to_grow(proposer, single_node_tree, X):
assert proposer._select_grow_dim(leaf_to_grow=single_node_tree._nodes[0], X=X) == 0
def test_select_node_to_prune(proposer, single_node_tree, double_layer_tree, r1_grown):
assert proposer._select_split_node_to_prune(tree=double_layer_tree) == r1_grown
with pytest.raises(PruneError):
_ = proposer._select_split_node_to_prune(tree=single_node_tree)
def test_propose(proposer, single_node_tree, X):
proposed_tree = proposer.propose(
tree=single_node_tree,
X=X,
partial_residual=torch.zeros(X.shape[0], 1),
alpha=0.5,
beta=0.5,
sigma_val=0.01,
leaf_mean_prior_scale=1,
)
assert isinstance(proposed_tree, Tree)
assert abs(proposed_tree.num_nodes() - single_node_tree.num_nodes()) in [
0,
2,
] # 2: grow or prune, 0 for no change
| beanmachine-main | tests/ppl/experimental/bart/bart_tree_proposer_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.bart_model import (
LeafMean,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.grow_from_root_tree_proposer import (
GrowFromRootTreeProposer,
SortedInvariants,
)
from beanmachine.ppl.experimental.causal_inference.models.bart.node import LeafNode
from beanmachine.ppl.experimental.causal_inference.models.bart.split_rule import (
CompositeRules,
)
@pytest.fixture(autouse=True)
def seed():
torch.manual_seed(5)
@pytest.fixture
def gfr_proposer():
gfr = GrowFromRootTreeProposer()
gfr.num_cuts = 2
gfr.num_null_cuts = 1
return gfr
@pytest.fixture
def X():
return torch.Tensor([[3.0, 1.0], [4.0, 1.0], [1.5, 1.0], [-1.0, 1.0]])
@pytest.fixture
def w(X):
num_vars = X.shape[-1]
weights = torch.Tensor([1 / num_vars for _ in range(num_vars - 1)])
return weights
def test_sample_variables(gfr_proposer, w):
num_vars_to_sample = max(len(w) - 1, 1)
assert (
len(gfr_proposer._sample_variables(num_vars_to_sample, w)) == num_vars_to_sample
)
impossible_num_vars_to_sample = len(w) + 1
assert len(gfr_proposer._sample_variables(impossible_num_vars_to_sample, w)) == len(
w
)
def test_presort(gfr_proposer, X):
O_ = gfr_proposer._presort(X)
num_observations, num_dims = X.shape
for inp_dim in range(num_dims):
for obs in range(1, num_observations):
assert X[O_[inp_dim, obs - 1], inp_dim] <= X[O_[inp_dim, obs], inp_dim]
def test_get_uniq_elems(gfr_proposer, X):
O_ = gfr_proposer._presort(X)
uniq_vals, val_counts = gfr_proposer._get_uniq_elems(X=X, O_=O_)
num_observations, num_dims = X.shape
for inp_dim in range(num_dims):
dim_val_counts = val_counts[inp_dim]
assert sum(dim_val_counts.values()) == num_observations
for id_, uniq_val in enumerate(uniq_vals[inp_dim]):
assert dim_val_counts[uniq_val] > 0
if id_ > 0:
assert uniq_val >= uniq_vals[inp_dim][id_ - 1]
assert set(uniq_vals[inp_dim]) == {_.item() for _ in X[:, inp_dim]}
@pytest.fixture
def invariants(gfr_proposer, X):
O_ = gfr_proposer._presort(X)
uniq_vals, val_counts = gfr_proposer._get_uniq_elems(X=X, O_=O_)
return SortedInvariants(O_=O_, uniq_vals=uniq_vals, val_counts=val_counts)
def test_select_cutpoints(gfr_proposer, X, invariants):
num_observations, num_dims = X.shape
cutpoints = gfr_proposer._select_cutpoints(
candidate_dims=list(range(num_dims)), uniq_vals=invariants.uniq_vals
)
num_dim_cuts = 0
for point_id, point in enumerate(cutpoints):
assert (
point.cut_val < invariants.uniq_vals[point.dim][-1]
) # no degenerate splits
if point_id > 0 and cutpoints[point_id - 1].dim == point.dim:
assert cutpoints[point_id - 1].cut_val < point.cut_val
num_dim_cuts += 1
elif point_id > 0 and cutpoints[point_id - 1].dim != point.dim:
assert num_dim_cuts <= gfr_proposer.num_cuts
num_dim_cuts = 0
else:
num_dim_cuts += 1
@pytest.fixture
def partial_residual(X):
return torch.ones((len(X), 1)) * 0.2
@pytest.fixture
def sigma_val():
return 0.1
@pytest.fixture
def leaf_sampler():
return LeafMean(prior_loc=0.0, prior_scale=0.1)
@pytest.fixture
def current_node(X):
return LeafNode(
depth=0,
val=0.1,
composite_rules=CompositeRules(all_dims=list(range(X.shape[-1]))),
)
@pytest.fixture
def alpha():
return 0.95
@pytest.fixture
def beta():
return 1.25
@pytest.fixture
def cut_points(gfr_proposer, invariants):
num_dims = invariants.O_.shape[0]
return gfr_proposer._select_cutpoints(
candidate_dims=list(range(num_dims)), uniq_vals=invariants.uniq_vals
)
def test_sample_cut_point(
gfr_proposer,
X,
invariants,
cut_points,
partial_residual,
sigma_val,
leaf_sampler,
current_node,
alpha,
beta,
):
num_observations, num_dims = X.shape
num_trials = 10
all_sampled_cutpoints = []
for _ in range(num_trials):
all_sampled_cutpoints.append(
gfr_proposer._sample_cut_point(
candidate_cut_points=cut_points,
partial_residual=partial_residual,
invariants=invariants,
sigma_val=sigma_val,
leaf_sampler=leaf_sampler,
current_node=current_node,
alpha=alpha,
beta=beta,
)
)
for point in all_sampled_cutpoints:
if point is not None:
assert point in cut_points
def test_sift(
gfr_proposer,
X,
invariants,
cut_points,
partial_residual,
sigma_val,
leaf_sampler,
current_node,
alpha,
beta,
):
cut_point = gfr_proposer._sample_cut_point(
candidate_cut_points=cut_points,
partial_residual=partial_residual,
invariants=invariants,
sigma_val=sigma_val,
leaf_sampler=leaf_sampler,
current_node=current_node,
alpha=alpha,
beta=beta,
)
left_invariants, right_invariants = gfr_proposer._sift(
X=X, cut_point=cut_point, invariants=invariants
)
assert (
invariants.O_.shape[0] == left_invariants.O_.shape[0]
and invariants.O_.shape[0] == right_invariants.O_.shape[0]
) # num dims shouldnt change
assert (
invariants.O_.shape[1]
== left_invariants.O_.shape[1] + right_invariants.O_.shape[1]
)
for dim in range(invariants.O_.shape[0]):
assert set(invariants.uniq_vals[dim]) == set(
left_invariants.uniq_vals[dim]
).union(set(right_invariants.uniq_vals[dim]))
for val in invariants.uniq_vals[dim]:
assert (
invariants.val_counts[dim][val]
== left_invariants.val_counts[dim][val]
+ right_invariants.val_counts[dim][val]
)
def test_propose(
X,
invariants,
cut_points,
partial_residual,
sigma_val,
leaf_sampler,
current_node,
alpha,
beta,
w,
):
proposer = GrowFromRootTreeProposer()
tree_, variable_counts = proposer.propose(
X=X,
partial_residual=partial_residual,
m=X.shape[-1],
w=w,
sigma_val=sigma_val,
leaf_sampler=leaf_sampler,
alpha=alpha,
beta=beta,
root_node=current_node,
num_cuts=2,
num_null_cuts=1,
)
all_leaves = tree_.leaf_nodes()
assert len(all_leaves) > 0
if len(all_leaves) > 0:
assert sum(variable_counts) > 0
assert tree_.predict(X).shape == partial_residual.shape
| beanmachine-main | tests/ppl/experimental/bart/xbart_grow_from_root_proposer_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from beanmachine.ppl.experimental.causal_inference.models.bart.bart_model import (
BART,
XBART,
)
@pytest.fixture
def X():
return torch.Tensor([[3.0, 1.0], [4.0, 1.0], [1.5, 1.0], [-1.0, 1.0]])
@pytest.fixture
def y(X):
return X[:, 0] + X[:, 1]
@pytest.fixture
def bart(X, y):
return BART(num_trees=1).fit(X=X, y=y, num_burn=1, num_samples=39)
@pytest.fixture
def X_test():
return torch.Tensor([[3.1, 2.5]])
@pytest.fixture
def y_test(X_test):
return X_test[:, 0] + X_test[:, 1]
def test_predict(X_test, y_test, bart):
y_pred = bart.predict(X_test)
assert len(X_test) == len(y_pred)
assert len(y_test) == len(y_pred)
def test_predict_with_quantiles_bart(X_test, bart):
quantiles = torch.Tensor([0.5])
y_pred, qvals = bart.predict_with_quantiles(X_test, quantiles=quantiles)
posterior_samples = bart.get_posterior_predictive_samples(X_test)
# median for even number of samples is not unique
assert (1 - bart.num_samples % 2) or torch.all(
torch.median(posterior_samples, dim=1)[0] == qvals
)
@pytest.fixture
def xbart(X, y):
return XBART(num_trees=1).fit(X=X, y=y, num_burn=1, num_samples=9)
def test_predict_xbart(X_test, y_test, xbart):
y_pred = xbart.predict(X_test)
assert len(X_test) == len(y_pred)
assert len(y_test) == len(y_pred)
def test_predict_with_quantiles_xbart(X_test, xbart):
quantiles = torch.Tensor([0.5])
y_pred, qvals = xbart.predict_with_quantiles(X_test, quantiles=quantiles)
posterior_samples = xbart.get_posterior_predictive_samples(X_test)
# median for even number of samples is not unique
assert (1 - xbart.num_samples % 2) or torch.all(
torch.median(posterior_samples, dim=1)[0] == qvals
)
| beanmachine-main | tests/ppl/experimental/bart/bart_model_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
from beanmachine.ppl.experimental.gp import (
bm_sample_from_prior,
make_prior_random_variables,
)
from beanmachine.ppl.experimental.gp.models import BoTorchGP, SimpleGP
from botorch.posteriors.gpytorch import GPyTorchPosterior
from gpytorch import kernels, likelihoods
from gpytorch.means import ConstantMean
from gpytorch.priors import GammaPrior, UniformPrior
class ModelTest(unittest.TestCase):
def setUp(self):
x = torch.randn(3, 1)
y = torch.randn(3)
mean = ConstantMean(constant_prior=UniformPrior(-1, 1))
kernel = kernels.MaternKernel(lengthscale_prior=GammaPrior(0.5, 0.5))
lik = likelihoods.GaussianLikelihood()
self.model = SimpleGP(x, y, mean, kernel, lik)
self.bo_model = BoTorchGP(x, y, mean, kernel, lik)
self.name_to_rv = make_prior_random_variables(self.model)
@bm.random_variable
def y():
sampled_model = bm_sample_from_prior(
self.model.to_pyro_random_module(),
self.name_to_rv,
)
return sampled_model.likelihood(sampled_model(x))
self.y = y
def test_infer(self):
self.model.train()
bm.GlobalNoUTurnSampler().infer(
list(self.name_to_rv.values()), {}, num_samples=2, num_chains=1
)
def test_load_and_predict(self):
self.model.eval()
d = {
"kernel.lengthscale_prior": torch.ones(1),
"mean.mean_prior": torch.tensor(1.0),
}
self.model.bm_load_samples(d)
assert self.model.kernel.lengthscale.item() == 1.0
assert isinstance(self.model(torch.randn(3, 1)), dist.MultivariateNormal)
def test_posterior(self):
self.bo_model.eval()
d = {
"kernel.lengthscale_prior": torch.ones(1),
"mean.mean_prior": torch.tensor(1.0),
}
self.bo_model.bm_load_samples(d)
assert isinstance(self.bo_model.posterior(torch.randn(3, 1)), GPyTorchPosterior)
obs_noise = torch.ones(1, 1)
mvn = self.bo_model.posterior(torch.randn(3, 1), obs_noise)
assert isinstance(mvn, GPyTorchPosterior)
| beanmachine-main | tests/ppl/experimental/gp/models_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import unittest
import beanmachine.ppl as bm
import gpytorch
import torch
from beanmachine.ppl.experimental.gp import (
bm_sample_from_prior,
make_prior_random_variables,
)
from beanmachine.ppl.experimental.gp.models import SimpleGP
from gpytorch import likelihoods
from gpytorch.distributions import MultivariateNormal
from gpytorch.kernels import PeriodicKernel, ScaleKernel
from gpytorch.priors import UniformPrior
class Regression(SimpleGP):
def __init__(self, x_train, y_train, kernel, likelihood, *args, **kwargs):
mean = gpytorch.means.ConstantMean()
super().__init__(x_train, y_train, mean, kernel, likelihood)
def forward(self, data):
if data.dim() > 2:
data_shape = data.shape[1]
else:
data_shape = data.shape[0]
jitter = torch.eye(data_shape, data_shape)
for _ in range(data.dim() - 1):
jitter = jitter.unsqueeze(0)
mean = self.mean(data)
cov = self.kernel(data) + jitter
if cov.dim() > mean.dim() + 1:
cov = cov.squeeze(0)
return MultivariateNormal(mean, cov)
class InferenceTests(unittest.TestCase):
def test_simple_regression(self):
torch.manual_seed(1)
n_samples = 100
x_train = torch.linspace(0, 1, 10)
y_train = torch.sin(x_train * (2 * math.pi))
kernel = ScaleKernel(
base_kernel=PeriodicKernel(
period_length_prior=UniformPrior(0.5, 1.5),
lengthscale_prior=UniformPrior(0.01, 1.5),
),
outputscale_prior=UniformPrior(0.01, 2.0),
)
likelihood = likelihoods.GaussianLikelihood()
likelihood.noise = 1e-4
gp = Regression(x_train, y_train, kernel, likelihood)
name_to_rv = make_prior_random_variables(gp)
@bm.random_variable
def y():
sampled_model = bm_sample_from_prior(gp.to_pyro_random_module(), name_to_rv)
return sampled_model.likelihood(sampled_model(x_train))
queries = list(name_to_rv.values())
obs = {y(): y_train}
samples = bm.GlobalNoUTurnSampler(nnc_compile=False).infer(
queries, obs, n_samples, num_chains=1
)
# get predictives
x_test = torch.linspace(0, 1, 21).unsqueeze(-1)
y_test = torch.sin(x_test * (2 * math.pi)).squeeze(0)
gp.eval()
s = samples.get_chain(0)
lengthscale_samples = s[name_to_rv["kernel.base_kernel.lengthscale_prior"]]
outputscale_samples = s[name_to_rv["kernel.outputscale_prior"]]
period_length_samples = s[name_to_rv["kernel.base_kernel.period_length_prior"]]
gp.pyro_load_from_samples(
{
"kernel.outputscale_prior": outputscale_samples,
"kernel.base_kernel.lengthscale_prior": lengthscale_samples,
"kernel.base_kernel.period_length_prior": period_length_samples,
}
)
expanded_x_test = x_test.unsqueeze(0).repeat(n_samples, 1, 1)
output = gp.likelihood(gp(expanded_x_test.detach()))
assert (
(y_test.squeeze() - output.mean.squeeze().mean(0)).abs().mean() < 1.0
).item()
| beanmachine-main | tests/ppl/experimental/gp/inference_test.py |
beanmachine-main | tests/ppl/experimental/gp/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch.distributions as dist
from beanmachine.ppl.world.utils import get_default_transforms, initialize_value
def test_get_default_transforms():
bernoulli = dist.Bernoulli(0.1)
transforms = get_default_transforms(bernoulli)
assert dist.transforms.identity_transform == transforms
normal = dist.Normal(0, 1)
transforms = get_default_transforms(normal)
assert dist.transforms.identity_transform == transforms
gamma = dist.Gamma(1, 1)
transforms = get_default_transforms(gamma)
assert transforms.bijective
def test_initialize_value():
distribution = dist.Normal(0, 1)
value = initialize_value(distribution)
assert value.item() == pytest.approx(0, abs=1e-5)
first_sample = initialize_value(distribution, True)
second_sample = initialize_value(distribution, True)
assert first_sample.item() != second_sample.item()
| beanmachine-main | tests/ppl/world/utils_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
from beanmachine.ppl.world import World
class SampleModel:
@bm.random_variable
def foo(self):
return dist.Uniform(0.0, 1.0)
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), 1.0)
class DiscreteModel:
@bm.random_variable
def foo(self):
return dist.Categorical(torch.ones(3))
@bm.random_variable
def bar(self):
return dist.Normal(self.foo().float(), torch.tensor(1.0))
class DynamicModel:
@bm.random_variable
def foo(self):
return dist.Bernoulli(0.5)
@bm.random_variable
def bar(self, i: int):
return dist.Normal(0.0, 1.0)
@bm.random_variable
def baz(self):
mu = self.bar(int(self.foo()))
return dist.Normal(mu, 1.0)
class ChangeSupportModel:
@bm.random_variable
def foo(self):
return dist.Bernoulli(0.3)
@bm.random_variable
def bar(self):
if self.foo():
return dist.Categorical(logits=torch.rand((3,)))
else:
return dist.Normal(0.0, 1.0)
@bm.random_variable
def baz(self):
return dist.Bernoulli(self.foo())
def test_basic_operations():
model = SampleModel()
observations = {model.bar(): torch.rand(())}
world = World(observations=observations)
assert world.observations == observations
assert len(world.latent_nodes) == 0
assert len(world) == 0
with world:
model.bar() # this will add bar() and its parent foo() to world
assert len(world) == 2
assert model.bar() in world
assert world.latent_nodes == {model.foo()}
# edge connection
assert model.foo() in world.get_variable(model.bar()).parents
assert model.bar() in world.get_variable(model.foo()).children
assert len(world.get_variable(model.bar()).children) == 0
assert len(world.get_variable(model.foo()).parents) == 0
assert world.get_variable(model.foo()).value == world.call(model.foo())
def test_initialization():
model = SampleModel()
with World():
val1 = model.bar()
with World():
val2 = model.bar()
assert val1 != val2
def test_log_prob():
model = SampleModel()
world1 = World(observations={model.foo(): torch.tensor(0.0)})
world1.call(model.bar())
log_prob1 = world1.log_prob()
# set to a value with extremely small probability
world2 = world1.replace({model.bar(): torch.tensor(100.0)})
log_prob2 = world2.log_prob()
assert log_prob1 > log_prob2
def test_enumerate():
model = DiscreteModel()
world = World(observations={model.bar(): torch.tensor(0.0)})
with world:
model.bar()
assert (torch.tensor([0.0, 1.0, 2.0]) == world.enumerate_node(model.foo())).all()
def test_change_parents():
model = DynamicModel()
world = World(initialize_fn=lambda d: torch.zeros_like(d.sample()))
with world:
model.baz()
assert model.foo() in world.get_variable(model.baz()).parents
assert model.bar(0) in world.get_variable(model.baz()).parents
assert model.bar(1) not in world.get_variable(model.baz()).parents
assert model.baz() in world.get_variable(model.bar(0)).children
world2 = world.replace({model.foo(): torch.tensor(1.0)})
assert model.bar(0) not in world2.get_variable(model.baz()).parents
assert model.bar(1) in world2.get_variable(model.baz()).parents
assert model.baz() in world2.get_variable(model.bar(1)).children
assert model.baz() not in world2.get_variable(model.bar(0)).children
def test_distribution_and_log_prob_update():
model = ChangeSupportModel()
with World(observations={model.baz(): torch.tensor(1.0)}) as world:
model.bar()
model.baz()
world = world.replace({model.foo(): torch.tensor(0.0)})
world2 = world.replace({model.foo(): torch.tensor(1.0)})
bar_var = world.get_variable(model.bar())
assert isinstance(bar_var.distribution, dist.Normal)
bar_var2 = world2.get_variable(model.bar())
assert isinstance(bar_var2.distribution, dist.Categorical)
# verify that the children's log prob is recomputed when foo gets updated
baz_var = world.get_variable(model.baz()) # Bernoulli(0.0)
baz_var2 = world2.get_variable(model.baz()) # Bernoulli(1.0)
# recall that baz() is observed to be 1.0
assert baz_var.log_prob < baz_var2.log_prob
| beanmachine-main | tests/ppl/world/world_test.py |
beanmachine-main | tests/ppl/world/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.world.initialize_fn import init_from_prior, init_to_uniform
@pytest.mark.parametrize("init_fn", [init_from_prior, init_to_uniform])
@pytest.mark.parametrize(
"distribution",
[
dist.Uniform(0.0, 1.0),
dist.Normal(0.0, 1.0).expand((3,)),
dist.Bernoulli(0.5),
dist.Exponential(1.0),
dist.Dirichlet(torch.tensor([0.5, 0.5])),
dist.Categorical(logits=torch.randn(5, 10)),
dist.Bernoulli(0.5).expand((3, 5, 7)),
dist.Poisson(rate=2.0),
],
)
def test_initialize_validness(init_fn, distribution):
value = init_fn(distribution)
# make sure values are initialize within the constraint
assert torch.all(distribution.support.check(value))
assert not torch.any(torch.isnan(distribution.log_prob(value)))
assert value.size() == distribution.sample().size()
| beanmachine-main | tests/ppl/world/initialize_fn_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.world.variable import Variable
def test_log_prob():
var1 = Variable(value=torch.zeros(3), distribution=dist.Bernoulli(0.8))
# verify that the cached property `log_prob` is recomputed when we replace the
# fields of a Variable
var2 = var1.replace(value=torch.ones(3))
assert var1.log_prob.sum() < var2.log_prob.sum()
var3 = var1.replace(distribution=dist.Normal(0.0, 1.0))
assert var1.log_prob.sum() < var3.log_prob.sum()
# Expects an error here because support doesn't match
var4 = var1.replace(distribution=dist.Categorical(logits=torch.rand(2, 4)))
with pytest.raises(RuntimeError):
var4.log_prob
var5 = Variable(
value=torch.tensor(10).double(),
distribution=dist.Uniform(
torch.tensor(0.0).double(), torch.tensor(1.0).double()
),
)
# Check that the log prob has the right dtype
assert var5.log_prob.dtype == torch.double
assert torch.isinf(var5.log_prob)
var6 = var5.replace(value=torch.tensor(1))
assert torch.isinf(var6.log_prob)
| beanmachine-main | tests/ppl/world/variable_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from beanmachine.ppl.utils import tensorops
class TensorOpsTest(unittest.TestCase):
def test_gradients(self) -> None:
for type_ in [torch.float32, torch.float64]:
x = torch.randn(3, requires_grad=True, dtype=type_)
prec = torch.Tensor([[1, 0.1, 0], [0.1, 2, 0.5], [0, 0.5, 3]]).to(type_)
mu = torch.randn(3, dtype=type_)
# first gradient is `-(x - mu) @ prec`, second gradient is `- prec`
f = -(x - mu) @ prec @ (x - mu) / 2
grad, hess = tensorops.gradients(f, x)
self.assertTrue(grad.allclose(-(x - mu) @ prec))
self.assertTrue(hess.allclose(-prec))
self.assertEqual(grad.dtype, type_, "gradient dtype must match input")
self.assertEqual(hess.dtype, type_, "hessian dtype must match input")
def test_simplex_gradients(self) -> None:
for type_ in [torch.float32, torch.float64]:
x = torch.randn(3, requires_grad=True, dtype=type_)
prec = torch.Tensor([[1, 0.1, 0], [0.1, 2, 0.5], [0, 0.5, 3]]).to(type_)
prec_diag = torch.Tensor([1.0, 1.9, 3.0]).to(type_)
mu = torch.randn(3, dtype=type_)
# first gradient is `-(x - mu) @ prec`, second gradient is `- prec`
f = -(x - mu) @ prec @ (x - mu) / 2
grad, hess = tensorops.simplex_gradients(f, x)
self.assertTrue(grad.allclose(-(x - mu) @ prec))
self.assertTrue(hess.allclose(-prec_diag))
self.assertEqual(grad.dtype, type_, "gradient dtype must match input")
self.assertEqual(hess.dtype, type_, "hessian dtype must match input")
def test_halfspace_gradients(self) -> None:
for type_ in [torch.float32, torch.float64]:
x = torch.randn(3, requires_grad=True, dtype=type_)
prec = torch.Tensor([[1, 0.1, 0], [0.1, 2, 0.5], [0, 0.5, 3]]).to(type_)
prec_diag = torch.Tensor([1.0, 2.0, 3.0]).to(type_)
mu = torch.randn(3, dtype=type_)
# first gradient is `-(x - mu) @ prec`, second gradient is `- prec`
f = -(x - mu) @ prec @ (x - mu) / 2
grad, hess = tensorops.halfspace_gradients(f, x)
self.assertTrue(grad.allclose(-(x - mu) @ prec))
self.assertTrue(hess.allclose(-prec_diag))
self.assertEqual(grad.dtype, type_, "gradient dtype must match input")
self.assertEqual(hess.dtype, type_, "hessian dtype must match input")
def test_gradients_negative(self) -> None:
# output must have one element
x = torch.randn(3, requires_grad=True)
with self.assertRaises(ValueError) as cm:
tensorops.gradients(2 * x, x)
self.assertTrue(
"output tensor must have exactly one element" in str(cm.exception)
)
| beanmachine-main | tests/ppl/utils/tensorops_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from beanmachine.ppl.utils.set_of_tensors import SetOfTensors
from torch import tensor
class SetOfTensorsTest(unittest.TestCase):
def test_set_of_tensors_1(self) -> None:
self.maxDiff = None
# Show that we deduplicate these tensors.
t = [
tensor(1.0),
tensor([]),
tensor([1.0]),
tensor([1.0, 2.0]),
tensor([1.0, 2.0, 3.0, 4.0]),
tensor([[1.0]]),
tensor([[1.0], [2.0]]),
tensor([[1.0, 2.0]]),
tensor([[1.0, 2.0], [3.0, 4.0]]),
tensor(1.0),
tensor([]),
tensor([1.0]),
tensor([1.0, 2.0]),
tensor([1.0, 2.0, 3.0, 4.0]),
tensor([[1.0]]),
tensor([[1.0], [2.0]]),
tensor([[1.0, 2.0]]),
tensor([[1.0, 2.0], [3.0, 4.0]]),
]
s = SetOfTensors(t)
self.assertEqual(9, len(s))
observed = str(s)
expected = """
tensor(1.)
tensor([1., 2., 3., 4.])
tensor([1., 2.])
tensor([1.])
tensor([[1., 2.],
[3., 4.]])
tensor([[1., 2.]])
tensor([[1.],
[2.]])
tensor([[1.]])
tensor([])"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/utils/set_of_tensors_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from beanmachine.ppl.utils.multidictionary import MultiDictionary
class MultiDictionaryTest(unittest.TestCase):
def test_multidictionary(self) -> None:
d = MultiDictionary()
d.add(1, "alpha")
d.add(1, "bravo")
d.add(2, "charlie")
d.add(2, "delta")
self.assertEqual(2, len(d))
self.assertEqual(2, len(d[1]))
self.assertEqual(2, len(d[2]))
self.assertEqual(0, len(d[3]))
self.assertTrue("alpha" in d[1])
self.assertTrue("alpha" not in d[2])
expected = """
{1:{alpha,
bravo}
2:{charlie,
delta}}"""
self.assertEqual(expected.strip(), str(d).strip())
| beanmachine-main | tests/ppl/utils/multidictionary_test.py |
beanmachine-main | tests/ppl/utils/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for Graph from graph.py"""
import unittest
from beanmachine.ppl.utils.graph import Graph
class SimpleNode(object):
name: str
label: int
def __init__(self, name: str, label: int):
self.name = name
self.label = label
class GraphTest(unittest.TestCase):
def test_graph(self) -> None:
self.maxDiff = None
"""Tests for Graph from graph.py"""
g: Graph[str] = Graph(str, str)
p1 = g.with_plate()
p1.with_edge("a1", "a2").with_edge("a2", "a3")
p2 = p1.with_plate()
p2.with_edge("a0", "a1").with_edge("a3", "a0")
p3 = g.with_plate()
p3.with_edge("b0", "b1").with_edge("b1", "b2").with_edge("b2", "b3")
p3.with_edge("b2", "a3").with_edge("a1", "b3")
g.with_edge("start", "a0").with_edge("start", "b0")
g.with_edge("a3", "end").with_edge("b3", "end")
observed = g.to_dot()
expected = """
digraph "graph" {
a0;
a1;
a2;
a3;
b0;
b1;
b2;
b3;
end;
start;
a0 -> a1;
a1 -> a2;
a1 -> b3;
a2 -> a3;
a3 -> a0;
a3 -> end;
b0 -> b1;
b1 -> b2;
b2 -> a3;
b2 -> b3;
b3 -> end;
start -> a0;
start -> b0;
subgraph cluster__0 {
a1;
a2;
a3;
subgraph cluster__0_0 {
a0;
}
}
subgraph cluster__1 {
b0;
b1;
b2;
b3;
}
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_isomorphism(self) -> None:
self.maxDiff = None
# a1 b1 c1
# | |
# a2 b2
# / \ / \
# a5 s3 b5
# |
# s4
#
# a1 and b1 are isomorphic, a1 and c1 are not
a1 = SimpleNode("a1", 1)
b1 = SimpleNode("b1", 1)
c1 = SimpleNode("c1", 1)
a2 = SimpleNode("a2", 2)
a5 = SimpleNode("a5", 5)
b2 = SimpleNode("b2", 2)
b5 = SimpleNode("b5", 5)
s3 = SimpleNode("s3", 3)
s4 = SimpleNode("s4", 4)
g: Graph[SimpleNode] = Graph(
lambda x: x.name, lambda x: str(x.label), lambda x: str(x.label)
)
g = g.with_edge(a1, a2).with_edge(a2, a5).with_edge(a2, s3)
g = g.with_edge(b1, b2).with_edge(b2, s3).with_edge(b2, b5)
g = g.with_edge(s3, s4)
g = g.with_node(c1)
self.assertTrue(g.are_dags_isomorphic(a1, b1))
self.assertTrue(g.are_dags_isomorphic(a2, b2))
self.assertFalse(g.are_dags_isomorphic(a1, c1))
self.assertFalse(g.are_dags_isomorphic(a1, b2))
reachable = ",".join(sorted(str(n.label) for n in g.reachable(b2)))
self.assertEqual(reachable, "2,3,4,5")
g.merge_isomorphic(a2, b2)
# After merging b2 into a2:
# a1 b1 c1
# \ /
# a2
# / | \
# a5 s3 b5
# |
# s4
observed = g.to_dot()
expected = """
digraph "graph" {
a1[label=1];
a2[label=2];
a5[label=5];
b1[label=1];
b5[label=5];
c1[label=1];
s3[label=3];
s4[label=4];
a1 -> a2;
a2 -> a5;
a2 -> b5;
a2 -> s3;
b1 -> a2;
s3 -> s4;
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_merge(self) -> None:
self.maxDiff = None
# s1
# / | \
# a2 b2 c2
# / \ / \ / \
# a3 a4 b3 b4
# | |
# a5 b5
#
# The three "2" nodes are isomorphic.
s1 = SimpleNode("s1", 1)
a2 = SimpleNode("a2", 2)
b2 = SimpleNode("b2", 2)
c2 = SimpleNode("c2", 2)
a3 = SimpleNode("a3", 3)
a4 = SimpleNode("a4", 4)
b3 = SimpleNode("b3", 3)
b4 = SimpleNode("b4", 4)
a5 = SimpleNode("a5", 5)
b5 = SimpleNode("b5", 5)
g: Graph[SimpleNode] = Graph(
lambda x: x.name, lambda x: str(x.label), lambda x: str(x.label)
)
g = g.with_edge(s1, a2).with_edge(s1, b2).with_edge(s1, c2)
g = g.with_edge(a2, a3).with_edge(a2, a4).with_edge(b2, a4)
g = g.with_edge(b2, b3).with_edge(c2, b3).with_edge(c2, b4)
g = g.with_edge(a4, a5).with_edge(b4, b5)
observed = g.to_dot()
expected = """
digraph "graph" {
a2[label=2];
a3[label=3];
a4[label=4];
a5[label=5];
b2[label=2];
b3[label=3];
b4[label=4];
b5[label=5];
c2[label=2];
s1[label=1];
a2 -> a3;
a2 -> a4;
a4 -> a5;
b2 -> a4;
b2 -> b3;
b4 -> b5;
c2 -> b3;
c2 -> b4;
s1 -> a2;
s1 -> b2;
s1 -> c2;
}
"""
self.assertEqual(observed.strip(), expected.strip())
g.merge_isomorphic_many([a2, b2, c2])
observed = g.to_dot()
# s1
# |
# a2
# / | | \
# a3 a4 b3 b4
# | |
# a5 b5
expected = """
digraph "graph" {
a2[label=2];
a3[label=3];
a4[label=4];
a5[label=5];
b3[label=3];
b4[label=4];
b5[label=5];
s1[label=1];
a2 -> a3;
a2 -> a4;
a2 -> b3;
a2 -> b4;
a4 -> a5;
b4 -> b5;
s1 -> a2;
}
"""
self.assertEqual(observed.strip(), expected.strip())
g.merge_isomorphic_children(a2)
# s1
# |
# a2
# / \
# a3 a4
# / \
# a5 b5
# Note that the isomorphic 5 nodes are not recursively merged.
observed = g.to_dot()
expected = """
digraph "graph" {
a2[label=2];
a3[label=3];
a4[label=4];
a5[label=5];
b5[label=5];
s1[label=1];
a2 -> a3;
a2 -> a4;
a4 -> a5;
a4 -> b5;
s1 -> a2;
}
"""
self.assertEqual(observed.strip(), expected.strip())
| beanmachine-main | tests/ppl/utils/graph_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for item_counter.py"""
import unittest
from beanmachine.ppl.utils.item_counter import ItemCounter
class ItemCounterTest(unittest.TestCase):
def test_item_counter(self) -> None:
i = ItemCounter()
self.assertTrue("a" not in i.items)
self.assertTrue("b" not in i.items)
i.add_item("a")
i.add_item("a")
i.add_item("b")
i.add_item("b")
self.assertEqual(i.items["a"], 2)
self.assertEqual(i.items["b"], 2)
i.remove_item("b")
i.remove_item("a")
i.remove_item("a")
self.assertTrue("a" not in i.items)
self.assertEqual(i.items["b"], 1)
| beanmachine-main | tests/ppl/utils/item_counter_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for print_tree from treeprinter.py"""
import unittest
from beanmachine.ppl.utils.treeprinter import print_tree
class TreePrinterTest(unittest.TestCase):
def test_print_tree(self) -> None:
"""Tests for print_tree from treeprinter.py"""
d = {"foo": 2, "bar": {"blah": [2, 3, {"abc": (6, 7, (5, 5, 6))}]}}
observed = print_tree(d, unicode=False)
expected = """dict
+-foo
| +-2
+-bar
+-blah
+-2
+-3
+-dict
+-abc
+-6
+-7
+-tuple
+-5
+-5
+-6
"""
self.assertEqual(observed, expected)
| beanmachine-main | tests/ppl/utils/treeprinter_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for memoize.py"""
import unittest
from beanmachine.ppl.utils.memoize import memoize
count1 = 0
def fib(n):
global count1
count1 = count1 + 1
return 1 if n <= 1 else fib(n - 1) + fib(n - 2)
count2 = 0
@memoize
def fib_mem(n):
global count2
count2 = count2 + 1
return 1 if n <= 1 else fib_mem(n - 1) + fib_mem(n - 2)
class MemoizeTest(unittest.TestCase):
"""Tests for memoize.py"""
def test_memoize(self) -> None:
"""Tests for memoize.py"""
global count1
global count2
f10 = fib(10)
self.assertEqual(f10, 89)
self.assertEqual(count1, 177)
f10 = fib_mem(10)
self.assertEqual(f10, 89)
self.assertEqual(count2, 11)
| beanmachine-main | tests/ppl/utils/memoize_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for partition functions from equivalence.py"""
import unittest
from typing import Any, Iterable
from beanmachine.ppl.utils.equivalence import partition_by_kernel, partition_by_relation
def _brace(s: str) -> str:
return "{" + s + "}"
def _comma(s: Iterable[str]) -> str:
return ",".join(s)
def _set_str(items: Iterable[Any]) -> str:
return _brace(_comma(sorted({str(item) for item in items})))
def _set_set_str(results: Iterable[Any]) -> str:
return _set_str([_set_str(eqv) for eqv in results])
class PartitionTest(unittest.TestCase):
def test_partition_(self) -> None:
"""Tests for partition_kernel from equivalence.py"""
def three_kernel(x: int) -> int:
return (x % 3 + 3) % 3
def three_relation(x: int, y: int) -> bool:
return (x - y) % 3 == 0
expected = """{{-1,-4,-7,2,5,8},{-2,-5,-8,1,4,7},{-3,-6,-9,0,3,6,9}}"""
s = set(range(-9, 10))
observed1 = _set_set_str(partition_by_relation(s, three_relation))
observed2 = _set_set_str(partition_by_kernel(s, three_kernel))
self.assertEqual(observed1, expected)
self.assertEqual(observed2, expected)
| beanmachine-main | tests/ppl/utils/equivalence_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for print_graph from dotbuilder.py"""
import unittest
from typing import Any, Dict
from beanmachine.ppl.utils.dotbuilder import DotBuilder, print_graph
class GraphPrinterTest(unittest.TestCase):
def test_print_tree(self) -> None:
"""Tests for print_graph from dotbuilder.py"""
bar = {"blah": [2, 3, {"abc": (6, 7, (5, 5, 6))}]}
d: Dict[Any, Any] = {"foo": 2, "bar1": bar, "bar2": bar}
d["self"] = d # type: ignore
observed = print_graph([d])
expected = """
digraph "graph" {
N0[label=dict];
N10[label=5];
N1[label=2];
N2[label=dict];
N3[label=list];
N4[label=3];
N5[label=dict];
N6[label=tuple];
N7[label=6];
N8[label=7];
N9[label=tuple];
N0 -> N0[label=self];
N0 -> N1[label=foo];
N0 -> N2[label=bar1];
N0 -> N2[label=bar2];
N2 -> N3[label=blah];
N3 -> N1[label=0];
N3 -> N4[label=1];
N3 -> N5[label=2];
N5 -> N6[label=abc];
N6 -> N7[label=0];
N6 -> N8[label=1];
N6 -> N9[label=2];
N9 -> N10[label=0];
N9 -> N10[label=1];
N9 -> N7[label=2];
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_builder(self) -> None:
self.maxDiff = None
db = DotBuilder("my_graph")
db.with_comment("comment")
db.start_subgraph("my_subgraph", True)
db.with_label("graph_label")
db.with_node("A1", "A")
db.with_node("A2", "A")
db.with_edge("A1", "A2", "edge_label")
db.end_subgraph()
observed = str(db)
expected = """
digraph my_graph {
// comment
subgraph cluster_my_subgraph {
label=graph_label
A1[label=A];
A2[label=A];
A1 -> A2[label=edge_label];
}
}
"""
self.assertEqual(observed.strip(), expected.strip())
| beanmachine-main | tests/ppl/utils/dotbuilder_test.py |
beanmachine-main | tests/ppl/testlib/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for hypothesis_testing.py"""
import unittest
from beanmachine.ppl.testlib.hypothesis_testing import (
inverse_chi2_cdf,
inverse_normal_cdf,
mean_equality_hypothesis_confidence_interval,
mean_equality_hypothesis_test,
variance_equality_hypothesis_confidence_interval,
variance_equality_hypothesis_test,
)
from torch import tensor
class HypothesisTestingTest(unittest.TestCase):
def test_hypothesis_test_inverse_normal_cdf(self) -> None:
"""Minimal test for inverse normal CDF used to calculate z values"""
# Check that the median has the probability we expect
median = inverse_normal_cdf(0.5)
self.assertEqual(
median, 0.0, msg="Unexpected value for median of normal distribution"
)
# Record and check the values we get for z_0.01
expected_z_one_percent = -2.3263478740408408
observed_z_one_percent = inverse_normal_cdf(0.01)
self.assertEqual(
observed_z_one_percent,
expected_z_one_percent,
msg="Expected value for z_0.01",
)
# Record and check the values we get for z_0.99
expected_z_99_percent = 2.3263478740408408
observed_z_99_percent = inverse_normal_cdf(1 - 0.01)
self.assertEqual(
observed_z_99_percent,
expected_z_99_percent,
msg="Expected value for z_0.99",
)
# Record and check the values we get for z_0.005
expected_z_half_percent = -2.575829303548901
observed_z_half_percent = inverse_normal_cdf(0.005)
self.assertEqual(
observed_z_half_percent,
expected_z_half_percent,
msg="Expected value for z_0.005",
)
# This example shows why 1-p can be problematic
# Compare this value to -expected_z_half_percent
expected_z_995_thousandths = 2.5758293035489004
observed_z_995_thousandths = inverse_normal_cdf(0.995)
self.assertTrue(
not (expected_z_995_thousandths == -expected_z_half_percent),
msg="Numerical z_p is usually not exactly -z_(1-p)",
)
self.assertEqual(
observed_z_995_thousandths,
expected_z_995_thousandths,
msg="Expected value for z_0.005",
)
def test_hypothesis_test_mean(self) -> None:
"""Minimal test for mean equality hypothesis test"""
sample_mean = tensor(10)
true_mean = tensor(0)
true_std = tensor(1)
sample_size = tensor(1)
p_value = 0.01
observed_result = mean_equality_hypothesis_test(
sample_mean, true_mean, true_std, sample_size, p_value
)
self.assertFalse(observed_result, msg="Mean is not within confidence interval")
sample_mean = tensor(0)
true_mean = tensor(0)
true_std = tensor(1)
sample_size = tensor(1)
p_value = 0.01
observed_result = mean_equality_hypothesis_test(
sample_mean, true_mean, true_std, sample_size, p_value
)
self.assertTrue(observed_result, msg="Mean is not within confidence interval")
# This test case is at the edge of acceptable.
# It should pass because of the = in <= in the
# mean_equality_hypothesis_test method
expected_z_995_thousandths = 2.5758293035489004
sample_mean = tensor(expected_z_995_thousandths)
true_mean = tensor(0)
true_std = tensor(1)
sample_size = tensor(1)
p_value = 0.01
observed_result = mean_equality_hypothesis_test(
sample_mean, true_mean, true_std, sample_size, p_value
)
self.assertTrue(observed_result, msg="Mean is not within confidence interval")
# The following two tests are pushing the edge case around what
# should be acceptable to the test. It is strange that the one
# slighly larger than the alpha value does not fail.
# TODO: Investigate and explain why this passes when it should be
# just outside the acceptable boundary.
expected_z_995_thousandths = 2.5758293035489004
sample_mean = tensor(expected_z_995_thousandths * 1.00000001)
true_mean = tensor(0)
true_std = tensor(1)
sample_size = tensor(1)
p_value = 0.01
observed_result = mean_equality_hypothesis_test(
sample_mean, true_mean, true_std, sample_size, p_value
)
self.assertTrue(observed_result, msg="Mean is not within confidence interval")
# This one, with bigger multiplierf, finally returns False
expected_z_995_thousandths = 2.5758293035489004
sample_mean = tensor(expected_z_995_thousandths * 1.0000001)
true_mean = tensor(0)
true_std = tensor(1)
sample_size = tensor(1)
p_value = 0.01
observed_result = mean_equality_hypothesis_test(
sample_mean, true_mean, true_std, sample_size, p_value
)
self.assertFalse(observed_result, msg="Mean is not within confidence interval")
def test_confidence_interval_mean(self) -> None:
"""Minimal test for mean confidence interval"""
sample_mean = tensor(2)
true_mean = tensor(0)
true_std = tensor(1)
sample_size = tensor(1)
p_value = 0.05
lower_bound, upper_bound = mean_equality_hypothesis_confidence_interval(
true_mean, true_std, sample_size, p_value
)
observed_result = lower_bound <= sample_mean <= upper_bound
self.assertFalse(observed_result, msg="Mean is not within confidence interval")
sample_mean = tensor(1.95)
true_mean = tensor(0)
true_std = tensor(1)
sample_size = tensor(1)
p_value = 0.05
lower_bound, upper_bound = mean_equality_hypothesis_confidence_interval(
true_mean, true_std, sample_size, p_value
)
observed_result = lower_bound <= sample_mean <= upper_bound
self.assertTrue(observed_result, msg="Mean is not within confidence interval")
def test_hypothesis_test_inverse_chi2_cdf(self) -> None:
"""Minimal test for inverse chi-squared CDF used to calculate chi2 values"""
# Check that the median has the probability we expect
# A rule of thumb for chi2 is that median is df-0.7
# in this test we pick a more specific value from test run
median = inverse_chi2_cdf(100, 0.5)
self.assertEqual(
median,
99.33412923598846,
msg="Unexpected value for median of chi square distribution",
)
# Record and check the values we get for chi2_0.01
# From C.M. Thompson tables from 1941, we expect 70.0648
# more specific value reflects results from test run
# NB: Test run appears to contradict least significant
# digit in table the table cited above, but not if we take
# into account p used for lookup in distribution
# is 0.990, which suggests only on 4 digits are valid.
expected_chi2_one_percent = 70.06489492539978
observed_chi2_one_percent = inverse_chi2_cdf(100, 0.01)
self.assertEqual(
observed_chi2_one_percent,
expected_chi2_one_percent,
msg="Unexpected value for chi2_0.01",
)
# Record and check the values we get for chi2_0.99
# Table above predicts 135.807
expected_chi2_99_percent = 135.80672317102676
observed_chi2_99_percent = inverse_chi2_cdf(100, 1 - 0.01)
self.assertEqual(
observed_chi2_99_percent,
expected_chi2_99_percent,
msg="Unexpected value for chi2_0.99",
)
# Record and check the values we get for chi2_0.005
# Table above predicts 67.3276
expected_chi2_half_percent = 67.32756330547916
observed_chi2_half_percent = inverse_chi2_cdf(100, 0.005)
self.assertEqual(
observed_chi2_half_percent,
expected_chi2_half_percent,
msg="Unexpected value for chi2_0.005",
)
def test_hypothesis_test_variance(self) -> None:
"""Minimal test for variance equality hypothesis test"""
# Based on solved example in Scheaffer & McClave, 1986, Pg 300
sample_std = tensor(0.0003) ** 0.5
true_std = tensor(0.0002) ** 0.5
degrees_of_freedom = tensor(9)
alpha = 0.05
observed_result = variance_equality_hypothesis_test(
sample_std, true_std, degrees_of_freedom, alpha
)
self.assertTrue(observed_result, msg="Variance is within confidence interval")
sample_std = tensor(0.002) ** 0.5
true_std = tensor(0.0002) ** 0.5
degrees_of_freedom = tensor(9)
alpha = 0.05
observed_result = variance_equality_hypothesis_test(
sample_std, true_std, degrees_of_freedom, alpha
)
self.assertFalse(
observed_result, msg="Variance is not within confidence interval"
)
# Based on lookup of chi-squared table values
# The interval for chi-square at p=0.1 split over both distribution ends is
# approximately [77.9, 124.3]
# First, we check the lower bound
sample_std = tensor(78.0 / 100.0) ** 0.5
true_std = tensor(1.0)
degrees_of_freedom = tensor(100)
alpha = 0.1
observed_result = variance_equality_hypothesis_test(
sample_std, true_std, degrees_of_freedom, alpha
)
self.assertTrue(observed_result, msg="Variance is within confidence interval")
sample_std = tensor(77.0 / 100.0) ** 0.5
true_std = tensor(1.0)
degrees_of_freedom = tensor(100)
alpha = 0.1
observed_result = variance_equality_hypothesis_test(
sample_std, true_std, degrees_of_freedom, alpha
)
self.assertFalse(
observed_result, msg="Variance is not within confidence interval"
)
# Second, we check the upper bound
sample_std = tensor(124.0 / 100.0) ** 0.5
true_std = tensor(1.0)
degrees_of_freedom = tensor(100)
alpha = 0.1
observed_result = variance_equality_hypothesis_test(
sample_std, true_std, degrees_of_freedom, alpha
)
self.assertTrue(observed_result, msg="Variance is within confidence interval")
sample_std = tensor(125.0 / 100.0) ** 0.5
true_std = tensor(1.0)
degrees_of_freedom = tensor(100)
alpha = 0.1
observed_result = variance_equality_hypothesis_test(
sample_std, true_std, degrees_of_freedom, alpha
)
self.assertFalse(
observed_result, msg="Variance is not within confidence interval"
)
def test_confidence_interval_variance(self) -> None:
"""Minimal test for variance confidence interval"""
true_std = tensor(1.0)
degrees_of_freedom = tensor(100)
alpha = 0.05
observed_interval = variance_equality_hypothesis_confidence_interval(
true_std, degrees_of_freedom, alpha
)
observed_lower, observed_upper = observed_interval
expected_std_lower1 = tensor(0.86)
expected_std_lower2 = tensor(0.87)
expected_std_upper1 = tensor(1.13)
expected_std_upper2 = tensor(1.14)
self.assertLessEqual(expected_std_lower1, observed_lower, "Lower bound too low")
self.assertLessEqual(
observed_lower, expected_std_lower2, "Lower bound too high"
)
self.assertLessEqual(expected_std_upper1, observed_upper, "Upper bound too low")
self.assertLessEqual(
observed_upper, expected_std_upper2, "Upper bound too high"
)
| beanmachine-main | tests/ppl/testlib/hypothesis_testing_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import unittest
import warnings
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
import torch.utils._pytree as pytree
@bm.random_variable
def foo():
return dist.Normal(torch.tensor(0.0), torch.tensor(1.0))
class RVIdentifierTest(unittest.TestCase):
class SampleModel:
@staticmethod
@bm.random_variable
def foo():
return dist.Normal(torch.tensor(0.0), torch.tensor(1.0))
@bm.random_variable
def bar(self, sigma: float):
return dist.Normal(self.foo(), torch.tensor(sigma))
@bm.random_variable
def baz(self):
return dist.Normal(self.foo(), self.bar(1.0))
class SampleModelWithEq:
@bm.random_variable
def foo(self):
return dist.Normal(torch.tensor(0.0), torch.tensor(1.0))
def __eq__(self, other):
return isinstance(other, RVIdentifierTest.SampleModelWithEq)
class SampleModelWithIndex:
@bm.random_variable
def foo(self, u: int):
return dist.Normal(torch.tensor(0.0), torch.tensor(1.0))
def test_indexed_model_rv_identifier(self):
model = self.SampleModelWithIndex()
# RVs indexed using primitives should not show a user warning
with warnings.catch_warnings():
warnings.simplefilter("error")
model.foo(1)
with self.assertWarns(UserWarning) as context:
model.foo(torch.tensor(1))
self.assertEqual(
"PyTorch tensors are hashed by memory address instead of value. "
"Therefore, it is not recommended to use tensors as indices of random variables.",
str(context.warning),
msg="RVs indexed using tensor should show the correct user warning",
)
def test_pickle_unbound_rv_identifier(self):
original_foo_key = foo()
foo_bytes = pickle.dumps(foo())
reloaded_foo_key = pickle.loads(foo_bytes)
# reloaded RVIdentifier should be equivalent to the original copy
self.assertEqual(original_foo_key, reloaded_foo_key)
self.assertEqual(reloaded_foo_key, foo())
# In fact, when unpickling, it will recover the reference to the decorated
# function
self.assertIs(reloaded_foo_key.wrapper, foo)
# ^ this requires the function to be available when unpickling
def test_pickle_rv_with_same_name(self):
rv_bytes = pickle.dumps((foo(), self.SampleModel.foo()))
foo_key_1, foo_key_2 = pickle.loads(rv_bytes)
self.assertEqual(foo(), foo_key_1)
self.assertEqual(self.SampleModel.foo(), foo_key_2)
# the two 'foo' functions with same name are not equivalent
self.assertNotEqual(foo_key_1, foo_key_2)
def test_pickle_bound_rv_identifier(self):
model = self.SampleModel()
bar_key = model.bar(3.0)
# we should dump the model and RVIdentifier together if we want to recover the
# reference
model_and_rv_bytes = pickle.dumps((model, bar_key))
reloaded_model, reloaded_bar_key = pickle.loads(model_and_rv_bytes)
# We should be able to use the reloaded model to generate new RVIdentifier that
# are equivalent to the unpickled ones
self.assertEqual(reloaded_model.bar(3.0), reloaded_bar_key)
# However, notice that the reloaded model is a copy of the original model with
# the same value, so unless __eq__ is defined on the model, Python will compare
# object by address (so the reloaded model & identifier are not equal to the
# original ones)
self.assertNotEqual(reloaded_model, model)
self.assertNotEqual(bar_key, reloaded_bar_key)
def test_pickle_bound_rv_in_model_with_eq_operator(self):
model = self.SampleModelWithEq()
foo_key = model.foo()
model_and_rv_bytes = pickle.dumps((model, foo_key))
reloaded_model, reloaded_foo_key = pickle.loads(model_and_rv_bytes)
self.assertEqual(reloaded_model, model)
self.assertEqual(foo_key, reloaded_foo_key)
self.assertEqual(model.foo(), reloaded_foo_key)
# Though instead of defining __eq__ and maintain multiple copies of the model,
# it might be better to just use the unpickled model in a new session, i.e.
del model # mock the case where model is not defined in the new session yet
model, bar_key = pickle.loads(model_and_rv_bytes)
self.assertEqual(model.foo(), foo_key)
# For global scope random variables, the definition of functions have to be
# available when unpickling. Similarly, for class cope random variables, the
# definition of class also needs to be available.
def test_pickle_multiple_models(self):
model1 = self.SampleModel()
model2 = self.SampleModel()
self.assertNotEqual(model1.baz(), model2.baz())
rv_set = {model1.baz(), model2.baz(), model2.bar(1.5)}
# the following will be similar to how
serialized_bytes = pickle.dumps(
{"model1": model1, "model2": model2, "values_to_keep": rv_set}
)
# notice that we can also dump the two models separately as long as they don't
# cross reference each other
# delete current variables and "start a new session"
del model1
del model2
del rv_set
restored_state = pickle.loads(serialized_bytes)
model1 = restored_state.get("model1")
model2 = restored_state.get("model2")
rv_set = restored_state.get("values_to_keep")
self.assertNotEqual(model1.baz(), model2.baz())
self.assertIn(model1.baz(), rv_set)
self.assertIn(model2.baz(), rv_set)
self.assertNotIn(model1.bar(1.5), rv_set)
self.assertIn(model2.bar(1.5), rv_set)
def test_sorting_rv_identifier(self):
model = self.SampleModel()
observations = {
model.foo(): torch.tensor(1.0),
model.bar(0.5): torch.tensor(1.0),
model.baz(): torch.tensor(1.0),
}
# make sure the following doesn't raise
sorted(observations.keys())
pytree.tree_flatten(observations)
| beanmachine-main | tests/ppl/model/rv_identifier_test.py |
beanmachine-main | tests/ppl/model/__init__.py |
|
beanmachine-main | tests/ppl/diagnostics/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Dict
import beanmachine.ppl as bm
import beanmachine.ppl.diagnostics.common_statistics as common_statistics
import numpy as np
import pandas as pd
import torch
import torch.distributions as dist
from beanmachine.ppl.diagnostics.diagnostics import Diagnostics
from statsmodels.tsa.stattools import acf
diri_dis = dist.Dirichlet(
torch.tensor([[1.0, 2.0, 3.0], [2.0, 1.0, 3.0], [2.0, 3.0, 1.0]])
)
beta_dis = dist.Beta(torch.tensor([1.0, 2.0, 3.0]), torch.tensor([9.0, 8.0, 7.0]))
normal_dis = dist.Normal(torch.tensor([0.0, 1.0, 2.0]), torch.tensor([0.5, 1.0, 1.5]))
@bm.random_variable
def diri(i, j):
return diri_dis
@bm.random_variable
def beta(i):
return beta_dis
@bm.random_variable
def normal():
return normal_dis
@bm.random_variable
def foo():
return dist.Normal(0, 1)
@bm.random_variable
def bar():
return dist.Normal(torch.randn(3, 1, 2), torch.ones(3, 1, 2))
def dist_summary_stats() -> Dict[str, torch.tensor]:
exact_mean = {
"beta": beta_dis.mean.reshape(-1),
"diri": diri_dis.mean.reshape(-1),
"normal": normal_dis.mean.reshape(-1),
}
exact_std = {
"beta": torch.sqrt(beta_dis.variance.reshape(-1)),
"diri": torch.sqrt(diri_dis.variance.reshape(-1)),
"normal": torch.sqrt(normal_dis.variance.reshape(-1)),
}
exact_CI_2_5 = {"normal": normal_dis.mean - 1.96 * torch.sqrt(normal_dis.variance)}
exact_CI_50 = {"normal": normal_dis.mean}
exact_CI_97_5 = {"normal": normal_dis.mean + 1.96 * torch.sqrt(normal_dis.variance)}
exact_stats = {
"avg": exact_mean,
"std": exact_std,
"2.5%": exact_CI_2_5,
"50%": exact_CI_50,
"97.5%": exact_CI_97_5,
}
return exact_stats
class DiagnosticsTest(unittest.TestCase):
def test_basic_diagnostics(self):
def _inference_evaulation(summary: pd.DataFrame):
exact_stats = dist_summary_stats()
for col in summary.columns:
if not (col in exact_stats):
continue
for dis, res in exact_stats[col].items():
query_res = summary.loc[summary.index.str.contains(f"^{dis}")]
for i, val in enumerate(query_res[col].values):
self.assertAlmostEqual(
val,
res[i].item(),
msg=f"query {query_res.index[i]} for {col}",
delta=0.5,
)
def _test_plot_object(diag, query, query_samples):
plot_object = diag.plot([query])
trace_object = diag.trace([query])
index = 0
num_samples = query_samples[0].numel()
# test the trace plot over the first chain of beta(0)
for i in range(num_samples):
assert all(
a == b
for a, b in zip(
plot_object[0]["data"][index]["y"], query_samples[:, i]
)
), f"plot object for {diag._stringify_query(query)} is not correct"
assert all(
a == b
for a, b in zip(
trace_object[0]["data"][index]["y"], query_samples[:, i]
)
), f"trace object for {diag._stringify_query(query)} {i} is not correct"
index += 2
def _test_autocorr_object(diag, query, query_samples):
autocorr_object = diag.autocorr([query])
index = 0
num_samples = query_samples[0].numel()
# test the autocorr results over the first chain of beta(0)
for i in range(num_samples):
expected_acf = acf(
query_samples[:, i].detach().numpy(),
True,
nlags=num_samples - 1,
fft=False,
)
for ns in range(num_samples):
self.assertAlmostEqual(
autocorr_object[0]["data"][index]["y"][ns],
expected_acf[ns],
msg=f"autocorr data for {diag._stringify_query(query)}\
is not correct",
delta=0.3,
)
index += 1
np.random.seed(123)
torch.manual_seed(123)
mh = bm.SingleSiteAncestralMetropolisHastings()
query_list = [beta(0), diri(1, 5), normal()]
num_chains = 2
samples = mh.infer(query_list, {}, 200, num_chains)
out_df = Diagnostics(samples).summary()
_inference_evaulation(out_df)
out_df = Diagnostics(samples).summary([diri(1, 5), beta(0)])
_inference_evaulation(out_df)
out_df = Diagnostics(samples).summary(query_list=[diri(1, 5)], chain=1)
_inference_evaulation(out_df)
self.assertRaises(ValueError, Diagnostics(samples).summary, [diri(1, 3)])
self.assertRaises(ValueError, Diagnostics(samples).summary, [diri(1, 5), foo()])
query = beta(0)
query_samples = samples[query][0]
_test_plot_object(Diagnostics(samples), query, query_samples)
_test_autocorr_object(Diagnostics(samples), query, query_samples)
def test_r_hat_one_chain(self):
mh = bm.SingleSiteAncestralMetropolisHastings()
samples = mh.infer([normal()], {}, 5, 1)
diagnostics = Diagnostics(samples)
with self.assertWarns(UserWarning):
results = diagnostics.split_r_hat([normal()])
self.assertTrue(results.empty)
def test_r_hat_column(self):
mh = bm.SingleSiteAncestralMetropolisHastings()
samples = mh.infer([normal()], {}, 5, 2)
diagnostics = Diagnostics(samples)
out_df = diagnostics.summary()
self.assertTrue("r_hat" in out_df.columns)
out_df = diagnostics.summary(chain=0)
self.assertTrue("r_hat" not in out_df.columns)
def test_r_hat_no_column(self):
mh = bm.SingleSiteAncestralMetropolisHastings()
samples = mh.infer([normal()], {}, 5, 1)
out_df = Diagnostics(samples).summary()
self.assertTrue("r_hat" not in out_df.columns)
def test_r_hat(self):
samples = torch.tensor([[0.0, 1.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0]])
self.assertAlmostEqual(common_statistics.r_hat(samples), 2.3558, delta=0.001)
self.assertAlmostEqual(
common_statistics.split_r_hat(samples), 3.7193, delta=0.001
)
def test_r_hat_additional_dimension(self):
samples = torch.tensor(
[
[[0.0, 2.0], [2.0, 4.0], [4.0, 8.0], [6.0, 0.0]],
[[8.0, 12.0], [10.0, 6.0], [12.0, 1.0], [14.0, 2.0]],
[[16.0, -5.0], [18.0, 4.0], [20.0, 2.0], [22.0, 4.0]],
]
)
dim1, dim2 = common_statistics.r_hat(samples)
self.assertAlmostEqual(dim1, 3.2171, delta=0.001)
self.assertAlmostEqual(dim2, 0.9849, delta=0.001)
dim1, dim2 = common_statistics.split_r_hat(samples)
self.assertAlmostEqual(dim1, 5.3385, delta=0.001)
self.assertAlmostEqual(dim2, 1.0687, delta=0.001)
def test_effective_sample_size(self):
samples = torch.tensor(
[[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]], dtype=torch.double
)
n_eff = common_statistics.effective_sample_size(samples)
self.assertAlmostEqual(n_eff, 2.6114, delta=0.001)
def test_effective_sample_size_additional_dimension(self):
samples = torch.tensor(
[
[[0.0, 2.0], [2.0, 4.0], [4.0, 8.0], [6.0, 0.0]],
[[8.0, 12.0], [10.0, 6.0], [12.0, 1.0], [14.0, 2.0]],
[[16.0, -5.0], [18.0, 4.0], [20.0, 2.0], [22.0, 4.0]],
]
)
dim1, dim2 = common_statistics.effective_sample_size(samples)
self.assertAlmostEqual(dim1, 1.9605, delta=0.001)
self.assertAlmostEqual(dim2, 15.1438, delta=0.001)
def test_effective_sample_size_columns(self):
mh = bm.SingleSiteAncestralMetropolisHastings()
samples = mh.infer([normal()], {}, 5, 2)
out_df = Diagnostics(samples).summary()
self.assertTrue("n_eff" in out_df.columns)
def test_singleton_dims(self):
mh = bm.SingleSiteAncestralMetropolisHastings()
obs = {bar(): torch.ones(3, 1, 2)}
samples = mh.infer([bar()], obs, 5, 2)
diagnostics = Diagnostics(samples)
out_df = diagnostics.summary()
self.assertTrue("r_hat" in out_df.columns)
| beanmachine-main | tests/ppl/diagnostics/diagnostics_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
class SingleSiteNewtonianMonteCarloConjugateTest(
unittest.TestCase, AbstractConjugateTests
):
# TODO: Decrease the num_samples; num_samples>=2000 to get n_eff>=30 is
# unreasonable. It currently fails for num_samples=1000 because because
# hessian (for transform proposer) is extremely close to 0
def test_beta_binomial_conjugate_run(self):
nw = bm.SingleSiteNewtonianMonteCarlo()
self.beta_binomial_conjugate_run(nw, num_samples=2000)
def test_gamma_gamma_conjugate_run(self):
nw_transform = bm.SingleSiteNewtonianMonteCarlo()
self.gamma_gamma_conjugate_run(nw_transform, num_samples=200)
def test_gamma_normal_conjugate_run(self):
nw = bm.SingleSiteNewtonianMonteCarlo()
self.gamma_normal_conjugate_run(nw, num_samples=600)
def test_normal_normal_conjugate_run(self):
nw = bm.SingleSiteNewtonianMonteCarlo()
self.normal_normal_conjugate_run(nw, num_samples=500)
def test_dirichlet_categorical_conjugate_run(self):
nw = bm.SingleSiteNewtonianMonteCarlo()
self.dirichlet_categorical_conjugate_run(nw, num_samples=2000)
| beanmachine-main | tests/ppl/inference/single_site_newtonian_monte_carlo_conjugate_test_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import unittest
import beanmachine.ppl as bm
import numpy as np
import torch
import torch.distributions as dist
import xarray as xr
from beanmachine.ppl.inference.monte_carlo_samples import merge_dicts, MonteCarloSamples
class MonteCarloSamplesTest(unittest.TestCase):
class SampleModel(object):
@bm.random_variable
def foo(self):
return dist.Normal(torch.tensor(0.0), torch.tensor(1.0))
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), torch.tensor(1.0))
def test_default_four_chains(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
foo_key = model.foo()
mcs = mh.infer([foo_key], {}, 10)
self.assertEqual(mcs[foo_key].shape, torch.zeros(4, 10).shape)
self.assertEqual(mcs.get_variable(foo_key).shape, torch.zeros(4, 10).shape)
self.assertEqual(mcs.get_chain(3)[foo_key].shape, torch.zeros(10).shape)
self.assertEqual(mcs.num_chains, 4)
self.assertCountEqual(mcs.keys(), [foo_key])
mcs = mh.infer([foo_key], {}, 7, num_adaptive_samples=3)
self.assertEqual(mcs.num_adaptive_samples, 3)
self.assertEqual(mcs[foo_key].shape, torch.zeros(4, 7).shape)
self.assertEqual(mcs.get_variable(foo_key).shape, torch.zeros(4, 7).shape)
self.assertEqual(
mcs.get_variable(foo_key, True).shape, torch.zeros(4, 10).shape
)
self.assertEqual(mcs.get_chain(3)[foo_key].shape, torch.zeros(7).shape)
self.assertEqual(mcs.num_chains, 4)
self.assertCountEqual(mcs.keys(), [foo_key])
def test_one_chain(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
foo_key = model.foo()
bar_key = model.bar()
mcs = mh.infer([foo_key, bar_key], {}, 10, 1)
self.assertEqual(mcs[foo_key].shape, torch.zeros(1, 10).shape)
self.assertEqual(mcs.get_variable(foo_key).shape, torch.zeros(1, 10).shape)
self.assertEqual(mcs.get_chain()[foo_key].shape, torch.zeros(10).shape)
self.assertEqual(mcs.num_chains, 1)
self.assertCountEqual(mcs.keys(), [foo_key, bar_key])
mcs = mh.infer([foo_key, bar_key], {}, 7, 1, num_adaptive_samples=3)
self.assertEqual(mcs.num_adaptive_samples, 3)
self.assertEqual(mcs[foo_key].shape, torch.zeros(1, 7).shape)
self.assertEqual(mcs.get_variable(foo_key).shape, torch.zeros(1, 7).shape)
self.assertEqual(
mcs.get_variable(foo_key, True).shape, torch.zeros(1, 10).shape
)
self.assertEqual(mcs.get_chain()[foo_key].shape, torch.zeros(7).shape)
self.assertEqual(mcs.num_chains, 1)
self.assertCountEqual(mcs.keys(), [foo_key, bar_key])
def test_chain_exceptions(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
foo_key = model.foo()
mcs = mh.infer([foo_key], {}, 10)
with self.assertRaisesRegex(IndexError, r"Please specify a valid chain"):
mcs.get_chain(-1)
with self.assertRaisesRegex(IndexError, r"Please specify a valid chain"):
mcs.get_chain(4)
with self.assertRaisesRegex(
ValueError,
r"The current MonteCarloSamples object has already"
r" been restricted to a single chain",
):
one_chain = mcs.get_chain()
one_chain.get_chain()
def test_num_adaptive_samples(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
foo_key = model.foo()
mcs = mh.infer([foo_key], {}, 10, num_adaptive_samples=3)
self.assertEqual(mcs[foo_key].shape, torch.zeros(4, 10).shape)
self.assertEqual(mcs.get_variable(foo_key).shape, torch.zeros(4, 10).shape)
self.assertEqual(
mcs.get_variable(foo_key, include_adapt_steps=True).shape,
torch.zeros(4, 13).shape,
)
self.assertEqual(mcs.get_num_samples(), 10)
self.assertEqual(mcs.get_num_samples(include_adapt_steps=True), 13)
def test_dump_and_restore_samples(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
foo_key = model.foo()
samples = mh.infer([foo_key], {}, num_samples=10, num_chains=2)
self.assertEqual(samples[foo_key].shape, (2, 10))
dumped = pickle.dumps((model, samples))
# delete local variables and pretend that we are starting from a new session
del model
del mh
del foo_key
del samples
# reload from dumped bytes
reloaded_model, reloaded_samples = pickle.loads(dumped)
# check the values still exist and have the correct shape
self.assertEqual(reloaded_samples[reloaded_model.foo()].shape, (2, 10))
def test_get_rv_with_default(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
foo_key = model.foo()
samples = mh.infer([foo_key], {}, num_samples=10, num_chains=2)
self.assertIn(model.foo(), samples)
self.assertIsInstance(samples.get(model.foo()), torch.Tensor)
self.assertIsNone(samples.get(model.bar()))
self.assertEqual(samples.get(model.foo(), chain=0).shape, (10,))
def test_merge_dicts(self):
model = self.SampleModel()
chain_lists = [{model.foo(): torch.rand(3)}, {model.foo(): torch.rand(3)}]
rv_dict = merge_dicts(chain_lists)
self.assertIn(model.foo(), rv_dict)
self.assertEqual(rv_dict.get(model.foo()).shape, (2, 3))
chain_lists.append({model.bar(): torch.rand(3)})
with self.assertRaises(ValueError):
merge_dicts(chain_lists)
def test_type_conversion(self):
model = self.SampleModel()
samples = MonteCarloSamples(
[{model.foo(): torch.rand(5), model.bar(): torch.rand(5)}],
num_adaptive_samples=3,
)
xr_dataset = samples.to_xarray()
self.assertIsInstance(xr_dataset, xr.Dataset)
self.assertIn(model.foo(), xr_dataset)
assert np.allclose(samples[model.bar()].numpy(), xr_dataset[model.bar()])
xr_dataset = samples.to_xarray(include_adapt_steps=True)
self.assertEqual(xr_dataset[model.foo()].shape, (1, 5))
inference_data = samples.to_inference_data()
self.assertIn(model.foo(), inference_data.posterior)
def test_get_variable(self):
model = self.SampleModel()
samples = MonteCarloSamples(
[{model.foo(): torch.arange(10)}], num_adaptive_samples=3
).get_chain(0)
self.assertTrue(
torch.all(samples.get_variable(model.foo()) == torch.arange(3, 10))
)
self.assertTrue(
torch.all(samples.get_variable(model.foo(), True) == torch.arange(10))
)
def test_get_log_likehoods(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
foo_key = model.foo()
bar_key = model.bar()
mcs = mh.infer(
[foo_key],
{bar_key: torch.tensor(4.0)},
num_samples=5,
num_chains=2,
)
self.assertTrue(hasattr(mcs, "log_likelihoods"))
self.assertIn(bar_key, mcs.log_likelihoods)
self.assertTrue(hasattr(mcs, "adaptive_log_likelihoods"))
self.assertIn(bar_key, mcs.adaptive_log_likelihoods)
self.assertEqual(
mcs.get_log_likelihoods(bar_key).shape, torch.zeros(2, 5).shape
)
mcs = mcs.get_chain(0)
self.assertEqual(mcs.get_log_likelihoods(bar_key).shape, torch.zeros(5).shape)
mcs = mh.infer(
[foo_key],
{bar_key: torch.tensor(4.0)},
num_samples=5,
num_chains=2,
num_adaptive_samples=3,
)
self.assertEqual(
mcs.get_log_likelihoods(bar_key).shape, torch.zeros(2, 5).shape
)
self.assertEqual(
mcs.adaptive_log_likelihoods[bar_key].shape, torch.zeros(2, 3).shape
)
self.assertEqual(
mcs.get_chain(0).get_log_likelihoods(bar_key).shape, torch.zeros(5).shape
)
self.assertEqual(
mcs.get_log_likelihoods(bar_key, True).shape, torch.zeros(2, 8).shape
)
self.assertEqual(
mcs.get_chain(0).adaptive_log_likelihoods[bar_key].shape,
torch.zeros(1, 3).shape,
)
def test_thinning(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
samples = mh.infer([model.foo()], {}, num_samples=20, num_chains=1)
self.assertEqual(samples.get(model.foo(), chain=0).shape, (20,))
self.assertEqual(samples.get(model.foo(), chain=0, thinning=4).shape, (5,))
def test_add_group(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
samples = mh.infer([model.foo()], {}, num_samples=20, num_chains=1)
bar_samples = MonteCarloSamples(samples.samples, default_namespace="bar")
bar_samples.add_groups(samples)
self.assertEqual(samples.observations, bar_samples.observations)
self.assertEqual(samples.log_likelihoods, bar_samples.log_likelihoods)
self.assertIn("posterior", bar_samples.namespaces)
def test_to_inference_data(self):
model = self.SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
samples = mh.infer([model.foo()], {}, num_samples=10, num_chains=1)
az_xarray = samples.to_inference_data()
self.assertNotIn("warmup_posterior", az_xarray)
samples = mh.infer(
[model.foo()], {}, num_samples=10, num_adaptive_samples=2, num_chains=1
)
az_xarray = samples.to_inference_data(include_adapt_steps=True)
self.assertIn("warmup_posterior", az_xarray)
| beanmachine-main | tests/ppl/inference/monte_carlo_samples_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
class SingleSiteUniformMetropolisHastingsConjugateTest(
unittest.TestCase, AbstractConjugateTests
):
def setUp(self):
self.mh = bm.SingleSiteUniformMetropolisHastings()
def test_beta_binomial_conjugate_run(self):
self.beta_binomial_conjugate_run(self.mh)
def test_gamma_gamma_conjugate_run(self):
self.gamma_gamma_conjugate_run(self.mh, random_seed=123)
def test_gamma_normal_conjugate_run(self):
self.gamma_normal_conjugate_run(self.mh, num_samples=7500)
def test_normal_normal_conjugate_run(self):
self.normal_normal_conjugate_run(self.mh, num_samples=5000)
def test_dirichlet_categorical_conjugate_run(self):
self.dirichlet_categorical_conjugate_run(self.mh, num_samples=5000)
| beanmachine-main | tests/ppl/inference/single_site_uniform_mh_conjugate_test_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
class SingleSiteAdaptiveRandomWalkConjugateTest(
unittest.TestCase, AbstractConjugateTests
):
def setUp(self):
self.mh = bm.SingleSiteRandomWalk(step_size=5.0)
def test_beta_binomial_conjugate_run(self):
self.mh = bm.SingleSiteRandomWalk(step_size=1.0)
self.beta_binomial_conjugate_run(
self.mh, num_samples=3000, num_adaptive_samples=1600
)
@unittest.skip("Known to fail. Investigating in T77865889.")
def test_gamma_gamma_conjugate_run(self):
self.mh = bm.SingleSiteRandomWalk(step_size=3.0)
self.gamma_gamma_conjugate_run(
self.mh, num_samples=5000, num_adaptive_samples=7000
)
def test_gamma_normal_conjugate_run(self):
self.mh = bm.SingleSiteRandomWalk(step_size=5.0)
self.gamma_normal_conjugate_run(
self.mh, num_samples=6000, num_adaptive_samples=5000
)
@unittest.skip("Known to fail. Investigating in T77865889.")
def test_normal_normal_conjugate_run(self):
self.normal_normal_conjugate_run(
self.mh, num_samples=2000, num_adaptive_samples=2000
)
@unittest.skip("Known to fail. Investigating in T77865889.")
def test_dirichlet_categorical_conjugate_run(self):
self.dirichlet_categorical_conjugate_run(
self.mh, num_samples=2000, num_adaptive_samples=2000
)
| beanmachine-main | tests/ppl/inference/single_site_random_walk_adaptive_conjugate_test_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
class SampleModel:
@bm.random_variable
def foo(self):
return dist.Normal(torch.tensor(0.0), torch.tensor(1.0))
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), torch.tensor(1.0))
class ReproducibleModel:
@bm.random_variable
def K_minus_one(self):
return dist.Poisson(rate=2.0)
@bm.functional
def K(self):
return self.K_minus_one() + 1
@bm.random_variable
def mu(self):
return dist.Normal(0, 1)
def test_single_site_ancestral_mh():
model = SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
foo_key = model.foo()
bar_key = model.bar()
sampler = mh.sampler(
[model.foo()], {model.bar(): torch.tensor(0.0)}, num_samples=10
)
for world in sampler:
assert foo_key in world
assert bar_key in world
assert foo_key in world.get_variable(bar_key).parents
assert bar_key in world.get_variable(foo_key).children
def test_single_site_ancestral_mh_reproducible_results():
model = ReproducibleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
queries = [model.mu()]
observations = {}
torch.manual_seed(42)
samples = mh.infer(queries, observations, num_samples=5, num_chains=1)
run_1 = samples.get_variable(model.mu()).clone()
torch.manual_seed(42)
samples = mh.infer(queries, observations, num_samples=5, num_chains=1)
run_2 = samples.get_variable(model.mu()).clone()
assert run_1.allclose(run_2)
torch.manual_seed(43)
samples = mh.infer(queries, observations, num_samples=5, num_chains=1)
run_3 = samples.get_variable(model.mu()).clone()
assert not run_1.allclose(run_3)
| beanmachine-main | tests/ppl/inference/single_site_ancestral_mh_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
class SingleSiteNoUTurnConjugateTest(unittest.TestCase, AbstractConjugateTests):
def setUp(self):
self.nuts = bm.SingleSiteNoUTurnSampler()
def test_beta_binomial_conjugate_run(self):
self.beta_binomial_conjugate_run(
self.nuts, num_samples=300, num_adaptive_samples=300
)
def test_gamma_gamma_conjugate_run(self):
self.gamma_gamma_conjugate_run(
self.nuts, num_samples=300, num_adaptive_samples=300
)
def test_gamma_normal_conjugate_run(self):
self.gamma_normal_conjugate_run(
self.nuts, num_samples=300, num_adaptive_samples=300
)
def test_normal_normal_conjugate_run(self):
self.normal_normal_conjugate_run(
self.nuts, num_samples=300, num_adaptive_samples=300
)
def test_dirichlet_categorical_conjugate_run(self):
self.dirichlet_categorical_conjugate_run(
self.nuts, num_samples=300, num_adaptive_samples=300
)
| beanmachine-main | tests/ppl/inference/single_site_no_u_turn_conjugate_test_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
@bm.random_variable
def foo():
return dist.Normal(0.0, 1.0)
def test_set_random_seed():
def sample_with_seed(seed):
bm.seed(seed)
return bm.SingleSiteAncestralMetropolisHastings().infer(
[foo()], {}, num_samples=20, num_chains=1
)
samples1 = sample_with_seed(123)
samples2 = sample_with_seed(123)
assert torch.allclose(samples1[foo()], samples2[foo()])
def test_detach_samples():
"""Test to ensure samples are detached from torch computation graphs."""
queries = [foo()]
samples = bm.SingleSiteAncestralMetropolisHastings().infer(
queries=queries,
observations={},
num_samples=20,
num_chains=1,
)
rv_data = samples[foo()]
idata = samples.to_inference_data()
assert hasattr(rv_data, "detach")
assert not hasattr(idata["posterior"][foo()], "detach")
| beanmachine-main | tests/ppl/inference/utils_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import beanmachine.ppl as bm
import pytest
import torch
import torch.distributions as dist
class SampleModel:
@bm.random_variable
def foo(self):
return dist.Normal(0.0, 1.0)
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), 1.0)
@pytest.mark.parametrize(
"algorithm",
[
bm.GlobalNoUTurnSampler(nnc_compile=True),
bm.GlobalHamiltonianMonteCarlo(trajectory_length=1.0, nnc_compile=True),
],
)
def test_nnc_compile(algorithm):
model = SampleModel()
queries = [model.foo()]
observations = {model.bar(): torch.tensor(0.5)}
num_samples = 30
num_chains = 2
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# verify that NNC can run through
samples = algorithm.infer(
queries,
observations,
num_samples,
num_adaptive_samples=num_samples,
num_chains=num_chains,
)
# sanity check: make sure that the samples are valid
assert not torch.isnan(samples[model.foo()]).any()
| beanmachine-main | tests/ppl/inference/nnc_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import sys
import beanmachine.ppl as bm
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.base_proposer import BaseProposer
from beanmachine.ppl.world import init_from_prior, World
class SampleModel:
@bm.random_variable
def foo(self):
return dist.Normal(0.0, 1.0)
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), 1.0)
@bm.functional
def baz(self):
return self.bar() * 2.0
class SampleDoubleModel:
@bm.random_variable
def foo(self):
return dist.Normal(torch.tensor(0.0).double(), torch.tensor(1.0).double())
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), torch.tensor(1.0).double())
@pytest.mark.parametrize("multiprocess", [False, True])
def test_inference(multiprocess):
if multiprocess and sys.platform.startswith("win"):
pytest.skip(
"Windows does not support fork-based multiprocessing (which is necessary "
"for running parallel inference within pytest."
)
model = SampleModel()
mh = bm.SingleSiteAncestralMetropolisHastings()
queries = [model.foo(), model.baz()]
observations = {model.bar(): torch.tensor(0.5)}
num_samples = 30
num_chains = 2
samples = mh.infer(
queries,
observations,
num_samples,
num_adaptive_samples=num_samples,
num_chains=num_chains,
run_in_parallel=multiprocess,
mp_context="fork",
)
assert model.foo() in samples
assert isinstance(samples[model.foo()], torch.Tensor)
assert samples[model.foo()].shape == (num_chains, num_samples)
assert samples.get_num_samples(include_adapt_steps=True) == num_samples * 2
# make sure that the RNG state for each chain is different
assert not torch.equal(
samples.get_chain(0)[model.foo()], samples.get_chain(1)[model.foo()]
)
def test_get_proposers():
world = World()
model = SampleModel()
world.call(model.bar())
nuts = bm.GlobalNoUTurnSampler()
proposers = nuts.get_proposers(world, world.latent_nodes, 10)
assert all(isinstance(proposer, BaseProposer) for proposer in proposers)
def test_initialize_world():
model = SampleModel()
world = World.initialize_world([model.bar()], {})
assert model.foo() in world
assert model.bar() in world
def test_initialize_from_prior():
model = SampleModel()
queries = [model.foo()]
samples_from_prior = []
for _ in range(10000):
world = World.initialize_world(queries, {}, initialize_fn=init_from_prior)
val = world.get(model.foo())
samples_from_prior.append(val.item())
assert samples_from_prior[0] != samples_from_prior[1]
assert math.isclose(sum(samples_from_prior) / 10000.0, 0.0, abs_tol=1e-2)
def test_initialization_resampling():
mh = bm.SingleSiteAncestralMetropolisHastings()
@bm.random_variable
def foo():
return dist.Uniform(3.0, 5.0)
# verify that the method re-sample as expected
retries = 0
def init_after_three_tries(d: dist.Distribution):
nonlocal retries
retries += 1
return torch.tensor(float("nan")) if retries < 3 else d.sample()
sampler = mh.sampler(
[foo()], {}, num_samples=10, initialize_fn=init_after_three_tries
)
for world in sampler:
assert not torch.isinf(world.log_prob()) and not torch.isnan(world.log_prob())
# an extreme case where the init value is always out of the support
def init_to_zero(d: dist.Distribution):
return torch.zeros_like(d.sample())
with pytest.raises(ValueError, match="Cannot find a valid initialization"):
mh.infer([foo()], {}, num_samples=10, initialize_fn=init_to_zero)
@pytest.mark.parametrize(
"algorithm",
[
bm.GlobalNoUTurnSampler(full_mass_matrix=False),
bm.GlobalNoUTurnSampler(full_mass_matrix=True),
bm.GlobalHamiltonianMonteCarlo(trajectory_length=1.0),
bm.SingleSiteAncestralMetropolisHastings(),
bm.SingleSiteNewtonianMonteCarlo(),
bm.SingleSiteUniformMetropolisHastings(),
],
)
def test_inference_with_double_dtype(algorithm):
model = SampleDoubleModel()
queries = [model.foo()]
bar_val = torch.tensor(0.5).double()
# make sure that the inference can run successfully
samples = algorithm.infer(
queries,
{model.bar(): bar_val},
num_samples=20,
num_chains=1,
)
assert samples[model.foo()].dtype == bar_val.dtype
| beanmachine-main | tests/ppl/inference/inference_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
class SingleSiteUniformMetropolisHastingsTest(unittest.TestCase):
class SampleBernoulliModel(object):
@bm.random_variable
def foo(self):
return dist.Beta(torch.tensor(2.0), torch.tensor(2.0))
@bm.random_variable
def bar(self):
return dist.Bernoulli(self.foo())
class SampleCategoricalModel(object):
@bm.random_variable
def foo(self):
return dist.Dirichlet(torch.tensor([0.5, 0.5]))
@bm.random_variable
def bar(self):
return dist.Categorical(self.foo())
def test_single_site_uniform_mh_with_bernoulli(self):
model = self.SampleBernoulliModel()
mh = bm.SingleSiteUniformMetropolisHastings()
foo_key = model.foo()
bar_key = model.bar()
sampler = mh.sampler([foo_key], {bar_key: torch.tensor(0.0)}, num_samples=5)
for world in sampler:
self.assertTrue(foo_key in world)
self.assertTrue(bar_key in world)
self.assertTrue(foo_key in world.get_variable(bar_key).parents)
self.assertTrue(bar_key in world.get_variable(foo_key).children)
def test_single_site_uniform_mh_with_categorical(self):
model = self.SampleCategoricalModel()
mh = bm.SingleSiteUniformMetropolisHastings()
foo_key = model.foo()
bar_key = model.bar()
sampler = mh.sampler([foo_key], {bar_key: torch.tensor(0.0)}, num_samples=5)
for world in sampler:
self.assertTrue(foo_key in world)
self.assertTrue(bar_key in world)
self.assertTrue(foo_key in world.get_variable(bar_key).parents)
self.assertTrue(bar_key in world.get_variable(foo_key).children)
| beanmachine-main | tests/ppl/inference/single_site_uniform_mh_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from typing import Optional
import beanmachine.ppl as bm
import numpy
import pytest
import scipy.stats
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.vi import ADVI, MAP, VariationalInfer
from beanmachine.ppl.inference.vi.gradient_estimator import monte_carlo_approximate_sf
from beanmachine.ppl.inference.vi.variational_world import VariationalWorld
from beanmachine.ppl.world import init_from_prior, RVDict
from torch import optim
from torch.distributions import constraints
from torch.distributions.utils import _standard_normal
cpu_device = torch.device("cpu")
class NealsFunnel(dist.Distribution):
"""
Neal's funnel.
p(x,y) = N(y|0,3) N(x|0,exp(y/2))
"""
support = constraints.real
def __init__(self, validate_args=None):
d = 2
batch_shape, event_shape = torch.Size([]), (d,)
super(NealsFunnel, self).__init__(
batch_shape, event_shape, validate_args=validate_args
)
def rsample(self, sample_shape=None):
if not sample_shape:
sample_shape = torch.Size((1,))
eps = _standard_normal(
(sample_shape[0], 2), dtype=torch.float, device=torch.device("cpu")
)
z = torch.zeros(eps.shape)
z[..., 1] = torch.tensor(3.0) * eps[..., 1]
z[..., 0] = torch.exp(z[..., 1] / 2.0) * eps[..., 0]
return z
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
x = value[..., 0]
y = value[..., 1]
log_prob = dist.Normal(0, 3).log_prob(y)
log_prob += dist.Normal(0, torch.exp(y / 2)).log_prob(x)
return log_prob
class BayesianRobustLinearRegression:
def __init__(self, n, d):
self.n = n
self.d = d
self.X_train = torch.randn(n, d)
self.beta_truth = torch.randn(d + 1, 1)
noise = dist.StudentT(df=4.0).sample((n, 1))
self.y_train = (
torch.cat((self.X_train, torch.ones(n, 1)), -1).mm(self.beta_truth) + noise
)
@bm.random_variable
def beta(self):
return dist.Independent(
dist.StudentT(df=4.0 * torch.ones(self.d + 1)),
1,
)
@bm.random_variable
def X(self):
return dist.Normal(0, 1) # dummy
@bm.random_variable
def y(self):
X_with_ones = torch.cat((self.X(), torch.ones(self.X().shape[0], 1)), -1)
b = self.beta().squeeze()
if b.dim() == 1:
b = b.unsqueeze(0)
mu = X_with_ones.mm(b.T)
return dist.Independent(
dist.StudentT(df=4.0, loc=mu, scale=1),
1,
)
class NormalNormal:
def __init__(
self,
mean_0: float = 0.0,
variance_0: float = 1.0,
variance_x: float = 1.0,
device: Optional[torch.device] = cpu_device,
):
self.device = device
self.mean_0 = mean_0
self.variance_0 = variance_0
self.variance_x = variance_x
@bm.random_variable
def mu(self):
return dist.Normal(
torch.zeros(1).to(self.device), 10 * torch.ones(1).to(self.device)
)
@bm.random_variable
def x(self, i):
return dist.Normal(self.mu(), torch.ones(1).to(self.device))
def conjugate_posterior(self, observations: RVDict) -> torch.dist:
# Normal-Normal conjugate prior formula (https://en.wikipedia.org/wiki/Conjugate_prior#When_likelihood_function_is_a_continuous_distribution)
expected_variance = 1 / (
(1 / self.variance_0) + (sum(observations.values()) / self.variance_x)
)
expected_std = numpy.sqrt(expected_variance)
expected_mean = expected_variance * (
(self.mean_0 / self.variance_0)
+ (sum(observations.values()) / self.variance_x)
)
return dist.Normal(expected_mean, expected_std)
class LogScaleNormal:
@bm.param
def phi(self):
return torch.zeros(2) # mean, log std
@bm.random_variable
def q_mu(self):
params = self.phi()
return dist.Normal(params[0], params[1].exp())
class BinaryGaussianMixture:
@bm.random_variable
def h(self, i):
return dist.Bernoulli(0.5)
@bm.random_variable
def x(self, i):
return dist.Normal(self.h(i).float(), 0.1)
class TestAutoGuide:
@pytest.mark.parametrize("auto_guide_inference", [ADVI, MAP])
def test_can_use_functionals(self, auto_guide_inference):
test_rv = bm.random_variable(lambda: dist.Normal(0, 1))
test_functional = bm.functional(lambda: test_rv() ** 2)
auto_guide = auto_guide_inference(
queries=[test_rv(), test_functional()],
observations={},
)
world = auto_guide.infer(num_steps=10)
assert world.call(test_functional()) is not None
@pytest.mark.parametrize("auto_guide_inference", [ADVI, MAP])
def test_neals_funnel(self, auto_guide_inference):
nf = bm.random_variable(NealsFunnel)
auto_guide = auto_guide_inference(
queries=[nf()],
observations={},
optimizer=lambda params: torch.optim.Adam(params, lr=1e-1),
)
world = auto_guide.infer(
num_steps=100,
)
if auto_guide_inference == ADVI:
# compare 1D marginals of empirical distributions using 2-sample K-S test
nf_samples = NealsFunnel().sample((20,)).squeeze().numpy()
vi_samples = (
world.get_guide_distribution(nf())
.sample((20,))
.detach()
.squeeze()
.numpy()
)
assert (
scipy.stats.ks_2samp(nf_samples[:, 0], vi_samples[:, 0]).pvalue >= 0.05
)
assert (
scipy.stats.ks_2samp(nf_samples[:, 1], vi_samples[:, 1]).pvalue >= 0.05
)
else:
vi_samples = world.get_guide_distribution(nf()).v.detach().squeeze().numpy()
map_truth = [0, -4.5]
assert numpy.isclose(map_truth, vi_samples, atol=0.05).all().item()
@pytest.mark.parametrize("auto_guide_inference", [ADVI, MAP])
def test_normal_normal(self, auto_guide_inference):
model = NormalNormal()
auto_guide = auto_guide_inference(
queries=[model.mu()],
observations={
model.x(1): torch.tensor(9.0),
model.x(2): torch.tensor(10.0),
},
optimizer=lambda params: torch.optim.Adam(params, lr=1e0),
)
world = auto_guide.infer(
num_steps=100,
)
mu_approx = world.get_guide_distribution(model.mu())
sample_mean = mu_approx.sample((100,)).mean()
assert sample_mean > 5.0
if auto_guide_inference == ADVI:
sample_var = mu_approx.sample((100,)).var()
assert sample_var > 0.1
@pytest.mark.parametrize("auto_guide_inference", [ADVI, MAP])
def test_brlr(self, auto_guide_inference):
brlr = BayesianRobustLinearRegression(n=100, d=7)
auto_guide = auto_guide_inference(
queries=[brlr.beta()],
observations={
brlr.X(): brlr.X_train,
brlr.y(): brlr.y_train,
},
optimizer=lambda params: torch.optim.Adam(params, lr=1e-1),
)
world = auto_guide.infer(
num_steps=100,
)
beta_samples = world.get_guide_distribution(brlr.beta()).sample((100,))
for i in range(beta_samples.shape[1]):
assert torch.norm(beta_samples[:, i].mean() - brlr.beta_truth[i]) < 0.2
@pytest.mark.parametrize(
"auto_guide_inference, expected", [(ADVI, 1.0), (MAP, 0.0)]
)
def test_constrained_positive_reals(self, auto_guide_inference, expected):
exp = dist.Exponential(torch.tensor([1.0]))
positive_rv = bm.random_variable(lambda: exp)
auto_guide = auto_guide_inference(queries=[positive_rv()], observations={})
world = auto_guide.infer(num_steps=100)
assert (
abs(
world.get_guide_distribution(positive_rv()).sample((100,)).mean().item()
- expected
)
<= 0.2
)
@pytest.mark.parametrize("auto_guide_inference", [ADVI, MAP])
def test_constrained_interval(self, auto_guide_inference):
beta = dist.Beta(torch.tensor([1.0]), torch.tensor([1.0]))
interval_rv = bm.random_variable(lambda: beta)
auto_guide = auto_guide_inference(
queries=[interval_rv()],
observations={},
)
world = auto_guide.infer(num_steps=100)
assert (
abs(
world.get_guide_distribution(interval_rv()).sample((100,)).mean().item()
- beta.mean
)
<= 0.2
)
@pytest.mark.parametrize("auto_guide_inference", [ADVI, MAP])
def test_dirichlet(self, auto_guide_inference):
dirichlet = dist.Dirichlet(2 * torch.ones(2))
alpha = bm.random_variable(lambda: dirichlet)
auto_guide = auto_guide_inference([alpha()], {})
world = auto_guide.infer(num_steps=100)
map_truth = torch.tensor([0.5, 0.5])
vi_estimate = world.get_guide_distribution(alpha()).sample((100,)).mean(dim=0)
assert vi_estimate.isclose(map_truth, atol=0.1).all().item()
class TestStochasticVariationalInfer:
@pytest.fixture(autouse=True)
def set_seed(self):
bm.seed(41)
def test_normal_normal_guide(self):
normal_normal_model = NormalNormal()
log_scale_normal_model = LogScaleNormal()
world = VariationalInfer(
queries_to_guides={normal_normal_model.mu(): log_scale_normal_model.q_mu()},
observations={
normal_normal_model.x(1): torch.tensor(9.0),
normal_normal_model.x(2): torch.tensor(10.0),
},
optimizer=lambda params: torch.optim.Adam(params, lr=1e0),
).infer(
num_steps=100,
)
mu_approx = world.get_variable(log_scale_normal_model.q_mu()).distribution
sample_mean = mu_approx.sample((100, 1)).mean()
assert sample_mean > 5.0
sample_var = mu_approx.sample((100, 1)).var()
assert sample_var > 0.1
def test_normal_normal_guide_step(self):
normal_normal_model = NormalNormal()
log_scale_normal_model = LogScaleNormal()
# 100 steps, each 1 iteration
world = VariationalInfer(
queries_to_guides={
normal_normal_model.mu(): log_scale_normal_model.q_mu(),
},
observations={
normal_normal_model.x(1): torch.tensor(9.0),
normal_normal_model.x(2): torch.tensor(10.0),
},
optimizer=lambda params: torch.optim.Adam(params, lr=1e0),
).infer(num_steps=100)
mu_approx = world.get_variable(log_scale_normal_model.q_mu()).distribution
sample_mean = mu_approx.sample((100, 1)).mean()
assert sample_mean > 5.0
sample_var = mu_approx.sample((100, 1)).var()
assert sample_var > 0.1
def test_conditional_guide(self):
@bm.random_variable
def mu():
return dist.Normal(torch.zeros(1), torch.ones(1))
@bm.random_variable
def alpha():
return dist.Normal(torch.zeros(1), torch.ones(1))
@bm.random_variable
def x(i):
return dist.Normal(mu() + alpha(), torch.ones(1))
@bm.param
def phi_mu():
return torch.zeros(2) # mean, log std
@bm.random_variable
def q_mu():
params = phi_mu()
return dist.Normal(params[0] - alpha(), params[1].exp())
@bm.param
def phi_alpha():
return torch.zeros(2) # mean, log std
@bm.random_variable
def q_alpha():
params = phi_alpha()
return dist.Normal(params[0], params[1].exp())
world = VariationalInfer(
queries_to_guides={
mu(): q_mu(),
alpha(): q_alpha(),
},
observations={
x(1): torch.tensor(9.0),
x(2): torch.tensor(10.0),
},
optimizer=lambda params: torch.optim.Adam(params, lr=1e0),
).infer(
num_steps=100,
)
vi = VariationalInfer(
queries_to_guides={
mu(): q_mu(),
alpha(): q_alpha(),
},
observations={
x(1): torch.tensor(9.0),
x(2): torch.tensor(10.0),
},
optimizer=lambda params: torch.optim.Adam(params, lr=1e-1),
)
vi.infer(num_steps=100)
world = VariationalWorld(
params=vi.params,
observations={
**{alpha(): torch.tensor(10.0)},
**vi.observations,
},
)
mu_approx, _ = world._run_node(q_mu())
sample_mean_alpha_10 = mu_approx.sample((100, 1)).mean()
world = VariationalWorld(
params=vi.params,
observations={
**{alpha(): torch.tensor(-10.0)},
**vi.observations,
},
)
mu_approx, _ = world._run_node(q_mu())
sample_mean_alpha_neg_10 = mu_approx.sample((100, 1)).mean()
assert sample_mean_alpha_neg_10 > sample_mean_alpha_10
def test_discrete_mixture(self):
model = BinaryGaussianMixture()
N = 10
with bm.world.World.initialize_world(
itertools.chain.from_iterable([model.x(i), model.h(i)] for i in range(N)),
initialize_fn=init_from_prior,
):
data = torch.tensor([[model.x(i), model.h(i)] for i in range(N)])
@bm.param
def phi(i):
return torch.tensor(0.5, requires_grad=True)
@bm.random_variable
def q_h(i):
return dist.Bernoulli(logits=phi(i))
vi = VariationalInfer(
queries_to_guides={model.h(i): q_h(i) for i in range(N)},
observations={model.x(i): data[i, 0] for i in range(N)},
optimizer=lambda p: optim.Adam(p, lr=5e-1),
)
world = vi.infer(
num_steps=30, num_samples=50, mc_approx=monte_carlo_approximate_sf
)
accuracy = (
(
(
torch.stack(
[
world.get_guide_distribution(model.h(i)).probs
for i in range(N)
]
)
> 0.5
)
== data[:, 1]
)
.float()
.mean()
)
assert accuracy.float().item() > 0.80
def test_logistic_regression(self):
n, d = 500, 2
X = torch.randn(n, d)
W = torch.randn(d)
@bm.random_variable
def y():
return dist.Independent(
dist.Bernoulli(probs=torch.sigmoid(X @ W)),
1,
)
@bm.param
def w():
return torch.randn(d)
@bm.random_variable
def q_y():
weights = w()
data = X
p = torch.sigmoid(data @ weights)
return dist.Independent(
dist.Bernoulli(probs=p),
1,
)
world = VariationalInfer(
queries_to_guides={y(): q_y()},
observations={},
optimizer=lambda params: torch.optim.Adam(params, lr=3e-2),
).infer(
num_steps=4000,
num_samples=1,
# NOTE: since y/q_y are discrete and not reparameterizable, we must
# use the score function estimator
mc_approx=monte_carlo_approximate_sf,
)
l2_error = (world.get_param(w()) - W).norm()
assert l2_error < 0.5
def test_subsample(self):
# mu ~ N(0, 10) and x | mu ~ N(mu, 1)
num_total = 3
normal_normal_model = NormalNormal(mean_0=1, variance_0=100, variance_x=1)
log_scale_normal_model = LogScaleNormal()
total_observations = {
normal_normal_model.x(i): torch.tensor(1.0) for i in range(num_total)
}
expected_mean = normal_normal_model.conjugate_posterior(total_observations).mean
expected_stddev = normal_normal_model.conjugate_posterior(
total_observations
).stddev
for num_samples in range(1, num_total):
world = VariationalInfer(
queries_to_guides={
normal_normal_model.mu(): log_scale_normal_model.q_mu(),
},
observations={
normal_normal_model.x(i): torch.tensor(1.0)
for i in range(num_samples)
},
optimizer=lambda params: torch.optim.Adam(params, lr=5e-2),
).infer(
num_steps=50,
subsample_factor=num_samples / num_total,
num_samples=10,
)
mu_approx = world.get_guide_distribution(normal_normal_model.mu())
assert (mu_approx.mean - expected_mean).norm() < 0.05
assert (mu_approx.stddev - expected_stddev).norm() < 0.05
def test_subsample_fail(self):
# mu ~ N(0, 10) and x | mu ~ N(mu, 1)
num_total = 3
normal_normal_model = NormalNormal(mean_0=1, variance_0=100, variance_x=1)
log_scale_normal_model = LogScaleNormal()
total_observations = {
normal_normal_model.x(i): torch.tensor(1.0) for i in range(num_total)
}
expected_mean = normal_normal_model.conjugate_posterior(total_observations).mean
expected_stddev = normal_normal_model.conjugate_posterior(
total_observations
).stddev
for num_samples in range(1, num_total):
world = VariationalInfer(
queries_to_guides={
normal_normal_model.mu(): log_scale_normal_model.q_mu(),
},
observations={
normal_normal_model.x(i): torch.tensor(1.0)
for i in range(num_samples)
},
optimizer=lambda params: torch.optim.Adam(params, lr=5e-2),
).infer(
num_steps=50,
subsample_factor=1.0,
num_samples=10,
)
mu_approx = world.get_guide_distribution(normal_normal_model.mu())
assert (mu_approx.mean - expected_mean).norm() > 0.05 or (
mu_approx.stddev - expected_stddev
).norm() > 0.05
| beanmachine-main | tests/ppl/inference/vi_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import beanmachine.ppl as bm
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.vi import VariationalInfer
cpu_device = torch.device("cpu")
class NormalNormal:
def __init__(self, device: Optional[torch.device] = cpu_device):
self.device = device
@bm.random_variable
def mu(self):
return dist.Normal(
torch.zeros(1).to(self.device), 10 * torch.ones(1).to(self.device)
)
@bm.random_variable
def x(self, i):
return dist.Normal(self.mu(), torch.ones(1).to(self.device))
@pytest.mark.skipif(
not torch.cuda.is_available(), reason="requires GPU access to train the model"
)
def test_normal_normal_guide_step_gpu():
device = torch.device("cuda:0")
model = NormalNormal(device=device)
@bm.param
def phi():
return torch.zeros(2).to(device) # mean, log std
@bm.random_variable
def q_mu():
params = phi()
return dist.Normal(params[0], params[1].exp())
world = VariationalInfer(
queries_to_guides={model.mu(): q_mu()},
observations={
model.x(1): torch.tensor(9.0),
model.x(2): torch.tensor(10.0),
},
optimizer=lambda params: torch.optim.Adam(params, lr=1e-1),
device=device,
).infer(
num_steps=1000,
)
mu_approx = world.get_variable(q_mu()).distribution
assert (mu_approx.mean - 9.6).norm() < 1.0
assert (mu_approx.stddev - 0.7).norm() < 0.3
| beanmachine-main | tests/ppl/inference/vi_gpu_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
from beanmachine.ppl.examples.conjugate_models import (
BetaBinomialModel,
CategoricalDirichletModel,
GammaNormalModel,
NormalNormalModel,
)
# A distribution which apparently takes values on the full number line,
# but in reality it only returns zero when sampled from.
class RealSupportDist(dist.Distribution):
has_enumerate_support = False
support = dist.constraints.real
has_rsample = True
arg_constraints = {}
# Ancestral sampling will only return zero.
def rsample(self, sample_shape):
return torch.zeros(sample_shape)
# Not a properly defined PDF on the full support, but allows MCMC to explore.
def log_prob(self, value):
return torch.zeros(value.shape)
# A distribution which apparently takes values on the non-negative number line,
# but in reality it only returns 1 when sampled from.
class HalfRealSupportDist(dist.Distribution):
has_enumerate_support = False
support = dist.constraints.greater_than(lower_bound=0.0)
has_rsample = True
arg_constraints = {}
# Ancestral sampling will only return one.
def rsample(self, sample_shape):
return torch.ones(sample_shape)
# Not a properly defined PDF on the full support, but allows MCMC to explore.
def log_prob(self, value):
return torch.zeros(value.shape)
# A distribution which apparently takes values on an interval of the number line,
# but in reality it only returns 1 when sampled from.
class IntervalRealSupportDist(dist.Distribution):
has_enumerate_support = False
support = dist.constraints.interval(lower_bound=2.0, upper_bound=20.0)
has_rsample = True
arg_constraints = {}
# Ancestral sampling will only return zero.
def rsample(self, sample_shape):
return 3 * torch.ones(sample_shape)
# Not a properly defined PDF on the full support, but allows MCMC to explore.
def log_prob(self, value):
return torch.zeros(value.shape)
# A distribution which apparently takes values on the non-negative integers,
# but in reality it only returns zero when sampled from.
class IntegerSupportDist(dist.Distribution):
has_enumerate_support = False
support = dist.constraints.integer_interval(0.0, 100.0)
has_rsample = True
arg_constraints = {}
# Ancestral sampling will only return zero.
def rsample(self, sample_shape):
return torch.zeros(sample_shape)
# Not a properly defined PDF on the full support, but allows MCMC to explore.
def log_prob(self, value):
return torch.zeros(value.shape)
class SingleSiteRandomWalkTest(unittest.TestCase):
"""
These tests test for the control flow which branches
based on node_distribution.support
"""
class RealSupportModel(object):
@bm.random_variable
def p(self):
return RealSupportDist()
@bm.random_variable
def q(self):
return dist.Normal(self.p(), torch.tensor(1.0))
class HalfRealSupportModel(object):
@bm.random_variable
def p(self):
return HalfRealSupportDist()
@bm.random_variable
def q(self):
return dist.Normal(self.p(), torch.tensor(1.0))
class IntervalRealSupportModel(object):
def __init__(self):
self.lower_bound = IntervalRealSupportDist().support.lower_bound
self.upper_bound = IntervalRealSupportDist().support.upper_bound
@bm.random_variable
def p(self):
return IntervalRealSupportDist()
@bm.random_variable
def q(self):
return dist.Normal(self.p(), torch.tensor(1.0))
class IntegerSupportModel(object):
@bm.random_variable
def p(self):
return IntegerSupportDist()
@bm.random_variable
def q(self):
return dist.Normal(self.p(), torch.tensor(1.0))
def test_single_site_random_walk_full_support(self):
model = self.RealSupportModel()
mh = bm.SingleSiteRandomWalk()
p_key = model.p()
queries = [p_key]
observations = {model.q(): torch.tensor(1.0)}
predictions = mh.infer(queries, observations, 100)
predictions = predictions.get_chain()[p_key]
"""
If the ancestral sampler is used, then every sample
drawn from the chain will be 0. This is by true by
the construction of the rsample function.
Conversely, normal noise != 0 w.p. 1, giving some sample which != 0.
For RealSupportModel, we expect the RW sampler to be used.
"""
self.assertIn(False, [0 == pred for pred in predictions])
def test_single_site_random_walk_half_support(self):
model = self.HalfRealSupportModel()
mh = bm.SingleSiteRandomWalk()
p_key = model.p()
queries = [p_key]
observations = {model.q(): torch.tensor(100.0)}
predictions = mh.infer(queries, observations, 100)
predictions = predictions.get_chain()[p_key]
# Discard the first sample, it may not be drawn from the node's distribution
predictions = predictions[1:]
"""
If the ancestral sampler is used, then every sample
drawn from the chain will be 1. This is by true by
the construction of the rsample function.
If RW is correctly reached by control flow, then rsample will
draw from a Gamma distribution.
"""
self.assertIn(False, [pred == 1 for pred in predictions])
def test_single_site_random_walk_interval_support(self):
lower_bound = IntervalRealSupportDist().support.lower_bound
upper_bound = IntervalRealSupportDist().support.upper_bound
# Test for a single item of evidence
def inner_fnc(evidence: float):
model = self.IntervalRealSupportModel()
mh = bm.SingleSiteRandomWalk()
p_key = model.p()
queries = [p_key]
observations = {model.q(): evidence.detach().clone()}
predictions = mh.infer(queries, observations, 20)
predictions = predictions.get_chain()[p_key]
"""
All generated samples should remain in the correct support
if the transform is computed properly
"""
self.assertNotIn(
False, [lower_bound <= pred <= upper_bound for pred in predictions]
)
# We're mostly interested in the boundary cases
evidences = torch.cat(
(
torch.linspace(lower_bound + 0.1, lower_bound + 1, 4),
torch.linspace(upper_bound - 1, upper_bound - 0.1, 4),
)
)
for e in evidences:
inner_fnc(e)
"""
Adaptive
"""
def test_single_site_adaptive_random_walk(self):
model = NormalNormalModel(
mu=torch.tensor(0.0), std=torch.tensor(1.0), sigma=torch.ones(1)
)
mh = bm.SingleSiteRandomWalk(step_size=4)
p_key = model.normal_p()
queries = [p_key]
observations = {model.normal(): torch.tensor(100.0)}
predictions = mh.infer(queries, observations, 100, num_adaptive_samples=30)
predictions = predictions.get_chain()[p_key]
self.assertIn(True, [45 < pred < 55 for pred in predictions])
"""
These tests test for quick approximate convergence in conjugate models.
"""
def test_single_site_random_walk_rate(self):
model = NormalNormalModel(
mu=torch.zeros(1), std=torch.ones(1), sigma=torch.ones(1)
)
mh = bm.SingleSiteRandomWalk(step_size=10)
p_key = model.normal_p()
queries = [p_key]
observations = {model.normal(): torch.tensor(100.0)}
predictions = mh.infer(queries, observations, 100)
predictions = predictions.get_chain()[p_key]
self.assertIn(True, [45 < pred < 55 for pred in predictions])
def test_single_site_random_walk_rate_vector(self):
model = NormalNormalModel(
mu=torch.zeros(2), std=torch.ones(2), sigma=torch.ones(2)
)
mh = bm.SingleSiteRandomWalk(step_size=10)
p_key = model.normal_p()
queries = [p_key]
observations = {model.normal(): torch.tensor([100.0, -100.0])}
predictions = mh.infer(queries, observations, 100)
predictions = predictions.get_chain()[p_key]
self.assertIn(True, [45 < pred[0] < 55 for pred in predictions])
self.assertIn(True, [-55 < pred[1] < -45 for pred in predictions])
def test_single_site_random_walk_half_support_rate(self):
model = GammaNormalModel(
shape=torch.ones(1), rate=torch.ones(1), mu=torch.ones(1)
)
mh = bm.SingleSiteRandomWalk(step_size=4.0)
p_key = model.gamma()
queries = [p_key]
observations = {model.normal(): torch.tensor([100.0])}
predictions = mh.infer(queries, observations, 100)
predictions = predictions.get_chain()[p_key]
"""
Our single piece of evidence is the observed value 100.
100 is a very large observation w.r.t our model of mu = 1. This
implies that the normal distirubtion has very high variance, so samples
from the Gamma distribution will have very small values in expectation.
For RWMH with large step size, we expect to see this in < 100 steps.
"""
self.assertIn(True, [pred < 0.01 for pred in predictions])
def test_single_site_random_walk_interval_support_rate(self):
model = BetaBinomialModel(
alpha=torch.ones(1) * 2.0, beta=torch.ones(1), n=torch.ones(1) * 10.0
)
mh = bm.SingleSiteRandomWalk(step_size=0.3)
p_key = model.theta()
queries = [p_key]
observations = {model.x(): torch.tensor([10.0])}
predictions = mh.infer(queries, observations, 50)
predictions = predictions.get_chain()[p_key]
"""
Our single piece of evidence is the observed value 10.
This is a large observation w.r.t our model . This
implies that the Binomial distirubtion has very large parameter p, so
samples from the Beta distribution will have similarly large values in
expectation. For RWMH with small step size, we expect to accept enough
proposals to reach this value in < 50 steps.
"""
self.assertIn(True, [pred > 0.9 for pred in predictions])
def test_single_site_random_walk_simplex_support_rate(self):
model = CategoricalDirichletModel(alpha=torch.tensor([1.0, 10.0]))
mh = bm.SingleSiteRandomWalk(step_size=1.0)
p_key = model.dirichlet()
queries = [p_key]
observations = {model.categorical(): torch.tensor([1.0, 1.0, 1.0])}
predictions = mh.infer(queries, observations, 50)
predictions = predictions.get_chain()[p_key]
"""
Our single piece of evidence is the observed value 1.
This is a large observation w.r.t the simplex, which has interval [0,1].
Based on our model, we expect that this evidence is drawn from
category 1 rather than category 0. So pred[0] << pred[1] typically.
"""
self.assertIn(True, [pred[0] < 0.1 for pred in predictions])
| beanmachine-main | tests/ppl/inference/single_site_random_walk_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
class IntegrationTest(unittest.TestCase):
class LogisticRegressionModel(object):
@bm.random_variable
def theta_0(self):
return dist.Normal(0.0, 1.0)
@bm.random_variable
def theta_1(self):
return dist.Normal(0.0, torch.ones(3))
@bm.random_variable
def y(self, X):
logits = (X * self.theta_1() + self.theta_0()).sum(-1)
return dist.Bernoulli(logits=logits)
def test_logistic_regression(self):
torch.manual_seed(1)
true_coefs = torch.tensor([1.0, 2.0, 3.0])
true_intercept = torch.tensor(1.0)
X = torch.randn(3000, 3)
Y = dist.Bernoulli(logits=(X * true_coefs + true_intercept).sum(-1)).sample()
model = self.LogisticRegressionModel()
nw = bm.SingleSiteNewtonianMonteCarlo()
samples_nw = nw.infer(
queries=[model.theta_1(), model.theta_0()],
observations={model.y(X): Y},
num_samples=1000,
num_chains=1,
)
coefs_mean = samples_nw[model.theta_1()].view(-1, 3).mean(0)
intercept_mean = samples_nw[model.theta_0()].view(-1).mean(0)
self.assertTrue(torch.isclose(coefs_mean, true_coefs, atol=0.15).all())
self.assertTrue(torch.isclose(intercept_mean, true_intercept, atol=0.15).all())
| beanmachine-main | tests/ppl/inference/inference_integration_test_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
from unittest.mock import patch
import beanmachine.ppl as bm
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.nuts_proposer import NUTSProposer
from beanmachine.ppl.inference.proposer.sequential_proposer import SequentialProposer
from beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import (
SingleSiteAncestralProposer,
)
from beanmachine.ppl.inference.proposer.single_site_uniform_proposer import (
SingleSiteUniformProposer,
)
from beanmachine.ppl.world import World
class SampleModel:
@bm.random_variable
def foo(self, i: int):
return dist.Beta(2.0, 2.0)
@bm.random_variable
def bar(self, i: int):
return dist.Bernoulli(self.foo(i))
@bm.random_variable
def baz(self):
return dist.Normal(self.bar(0) + self.bar(1), 1.0)
class ChangingSupportSameShapeModel:
# the support of `component` is changing, but (because we indexed alpha
# by k) all random_variables have the same shape
@bm.random_variable
def K(self):
return dist.Poisson(rate=2.0)
@bm.random_variable
def alpha(self, k):
return dist.Dirichlet(torch.ones(k))
@bm.random_variable
def component(self, i):
alpha = self.alpha(self.K().int().item() + 2)
return dist.Categorical(alpha)
class ChangingShapeModel:
# here since we did not index alpha, its shape in each world is changing
@bm.random_variable
def K(self):
return dist.Poisson(rate=2.0)
@bm.random_variable
def alpha(self):
return dist.Dirichlet(torch.ones(self.K().int().item() + 2))
@bm.random_variable
def component(self, i):
return dist.Categorical(self.alpha())
def test_inference_config():
model = SampleModel()
nuts = bm.GlobalNoUTurnSampler()
compositional = bm.CompositionalInference({model.foo: nuts})
queries = [model.foo(0), model.foo(1)]
observations = {model.baz(): torch.tensor(2.0)}
# verify that inference can run without error
compositional.infer(queries, observations, num_chains=1, num_samples=10)
# verify that proposers are spawned correctly
world = World.initialize_world(queries, observations)
with patch.object(nuts, "get_proposers", wraps=nuts.get_proposers) as mock:
proposers = compositional.get_proposers(
world, target_rvs=world.latent_nodes, num_adaptive_sample=0
)
# NUTS should receive {foo(0), foo(1)} as its target rvs
mock.assert_called_once_with(world, {model.foo(0), model.foo(1)}, 0)
# there should be one NUTS proposer for both foo(0) and foo(1), one ancestral MH
# proposer for bar(0), and another ancestral MH proposer for bar(1)
assert len(proposers) == 3
# TODO: find a way to validate the proposer instead of relying on the order of
# return value
assert isinstance(proposers[0], NUTSProposer)
assert proposers[0]._target_rvs == {model.foo(0), model.foo(1)}
# the rest of nodes are updated by default proposers (uniform proposer for bernoulli)
assert isinstance(proposers[1], SingleSiteUniformProposer)
assert isinstance(proposers[2], SingleSiteUniformProposer)
assert {proposers[1].node, proposers[2].node} == {model.bar(0), model.bar(1)}
# test overriding default kwarg
compositional = bm.CompositionalInference(
{
model.foo: bm.GlobalNoUTurnSampler(),
...: bm.SingleSiteAncestralMetropolisHastings(),
}
)
compositional.infer(queries, observations, num_chains=1, num_samples=2)
world = World.initialize_world(queries, observations)
proposers = compositional.get_proposers(
world, target_rvs=world.latent_nodes, num_adaptive_sample=0
)
assert isinstance(proposers[0], NUTSProposer)
assert isinstance(proposers[1], SingleSiteAncestralProposer)
assert isinstance(proposers[2], SingleSiteAncestralProposer)
assert {proposers[1].node, proposers[2].node} == {model.bar(0), model.bar(1)}
def test_config_inference_with_tuple_of_rv():
model = SampleModel()
nuts = bm.GlobalNoUTurnSampler()
compositional = bm.CompositionalInference({(model.foo, model.baz): nuts})
world = World.initialize_world([model.baz()], {})
with patch.object(nuts, "get_proposers", wraps=nuts.get_proposers) as mock:
compositional.get_proposers(
world, target_rvs=world.latent_nodes, num_adaptive_sample=10
)
# NUTS should receive {foo(0), foo(1), model.baz()} as its target rvs
mock.assert_called_once_with(
world, {model.foo(0), model.foo(1), model.baz()}, 10
)
def test_config_inference_with_tuple_of_inference():
model = SampleModel()
compositional = bm.CompositionalInference(
{
(model.foo, model.bar): (
bm.SingleSiteAncestralMetropolisHastings(),
bm.SingleSiteUniformMetropolisHastings(),
),
model.baz: bm.GlobalNoUTurnSampler(),
}
)
# verify that inference can run without error
compositional.infer([model.baz()], {}, num_chains=1, num_samples=10)
# examine the proposer types
world = World.initialize_world([model.baz()], {})
proposers = compositional.get_proposers(
world, target_rvs=world.latent_nodes, num_adaptive_sample=10
)
assert len(proposers) == 2
sequential_proposer = proposers[int(isinstance(proposers[0], NUTSProposer))]
assert isinstance(sequential_proposer, SequentialProposer)
assert len(sequential_proposer.proposers) == 4
proposer_count = Counter(map(type, sequential_proposer.proposers))
assert proposer_count[SingleSiteAncestralProposer] == 2
def test_nested_compositional_inference():
model = SampleModel()
ancestral_mh = bm.SingleSiteAncestralMetropolisHastings()
compositional = bm.CompositionalInference(
{
(model.foo, model.bar): bm.CompositionalInference(
{
model.foo: bm.GlobalNoUTurnSampler(),
# this ancestral mh class is never going to be invoked
model.baz: ancestral_mh,
}
)
}
)
with patch.object(
ancestral_mh, "get_proposers", wraps=ancestral_mh.get_proposers
) as mock:
# verify that inference can run without error
compositional.infer([model.baz()], {}, num_chains=1, num_samples=10)
# the ancestral_mh instance shouldn't been invoked at all
mock.assert_not_called()
def test_block_inference_with_default_algorithm():
model = SampleModel()
# block foo and baz together, but uses the default inference
compositional = bm.CompositionalInference({(model.foo, model.bar, model.baz): ...})
# make sure that things can run without failure
queries = [model.baz()]
observations = {}
compositional.infer(queries, observations, num_chains=1, num_samples=10)
# check to see if proposers are indeed blocked together
world = World.initialize_world(queries, observations)
proposers = compositional.get_proposers(world, world.latent_nodes, 0)
assert len(proposers) == 1
assert isinstance(proposers[0], SequentialProposer)
@pytest.mark.xfail(
raises=RuntimeError,
reason="Need to redesign how change in support is being handled in block inference",
)
def test_block_inference_changing_support():
model = ChangingSupportSameShapeModel()
queries = [model.K()] + [model.component(j) for j in range(3)]
compositional = bm.CompositionalInference(
{
(model.K, model.component): bm.SingleSiteAncestralMetropolisHastings(),
...: bm.SingleSiteNewtonianMonteCarlo(),
},
)
sampler = compositional.sampler(queries, {}, num_samples=10, num_adaptive_samples=5)
old_world = next(sampler)
for world in sampler: # this should run without failing
# since it's actually possible to sample two identical values, we need
# to check for tensor identity
if world[model.K()] is not old_world[model.K()]:
# if one of the node in a block is updated, the rest of the nodes should
# also been updated
for i in range(3):
assert world[model.component(i)] is not old_world[model.component(i)]
else:
# just as a sanity check to show that the tensor identity check is doing
# what we expected
assert world[model.component(0)] is old_world[model.component(0)]
old_world = world
# disable NNC because changing support => non-static model
compositional = bm.CompositionalInference(
{(model.K, model.component): bm.SingleSiteAncestralMetropolisHastings()},
nnc_compile=False,
)
sampler = compositional.sampler(queries, {})
with pytest.raises(KeyError):
world = next(sampler)
# since the support of poisson is all natural numbers, it's possible that we
# sample a new alue of K that's 1 greater than current one....
K_val = world.call(model.K())
new_world = world.replace({model.K(): K_val + 1})
# Since NUTS only supports static model, this is going to raise an error
# TODO: this error is thrown in hmc_utils when fetching
# transforms but should be checked earlier in the model
sampler.send(new_world)
def test_block_inference_changing_shape():
model = ChangingShapeModel()
queries = [model.K()] + [model.component(j) for j in range(3)]
# disable NNC because changing shape => non-static model
compositional = bm.CompositionalInference(nnc_compile=False)
# cannot perform inference since the shape of alpha can change if the value
# of K changes
with pytest.raises(RuntimeError):
compositional.infer(queries, {}, num_samples=10, num_chains=1)
def test_default_num_adaptive_samples():
model = SampleModel()
num_samples = 100
compositional = bm.CompositionalInference(
{
model.bar: bm.SingleSiteAncestralMetropolisHastings(),
...: bm.SingleSiteRandomWalk(),
}
)
# none of the method in compositional requires adaptation, so default to 0
assert compositional._get_default_num_adaptive_samples(num_samples) == 0
compositional = bm.CompositionalInference(
{
model.foo: bm.GlobalNoUTurnSampler(),
model.bar: bm.SingleSiteAncestralMetropolisHastings(),
}
)
# default to num_samples // 2 due to NUTS' default
assert (
compositional._get_default_num_adaptive_samples(num_samples) == num_samples // 2
)
| beanmachine-main | tests/ppl/inference/compositional_infer_test.py |
beanmachine-main | tests/ppl/inference/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
class SampleModel:
@bm.random_variable
def foo(self):
return dist.Normal(0.0, 1.0)
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), 1.0)
def test_sampler():
model = SampleModel()
nuts = bm.GlobalNoUTurnSampler()
queries = [model.foo()]
observations = {model.bar(): torch.tensor(0.5)}
num_samples = 10
sampler = nuts.sampler(queries, observations, num_samples, num_adaptive_samples=0)
worlds = list(sampler)
assert len(worlds) == num_samples
for world in worlds:
assert model.foo() in world
with world:
assert isinstance(model.foo(), torch.Tensor)
def test_two_samplers():
model = SampleModel()
queries = [model.foo()]
observations = {model.bar(): torch.tensor(0.5)}
nuts_sampler = bm.GlobalNoUTurnSampler().sampler(queries, observations)
hmc_sampler = bm.GlobalHamiltonianMonteCarlo(1.0).sampler(queries, observations)
world = next(nuts_sampler)
# it's possible to use multiple sampler interchangably to update the worlds (or
# in general, pass a new world to sampler and continue inference with existing
# hyperparameters)
for _ in range(3):
world = hmc_sampler.send(world)
world = nuts_sampler.send(world)
assert model.foo() in world
assert model.bar() in world
| beanmachine-main | tests/ppl/inference/sampler_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
class SingleSiteRandomWalkConjugateTest(unittest.TestCase, AbstractConjugateTests):
def setUp(self):
self.mh = bm.SingleSiteRandomWalk(step_size=1.0)
def test_beta_binomial_conjugate_run(self):
mh = bm.SingleSiteRandomWalk(step_size=0.3)
self.beta_binomial_conjugate_run(mh, num_samples=5000)
def test_gamma_gamma_conjugate_run(self):
self.gamma_gamma_conjugate_run(self.mh, num_samples=10000)
def test_gamma_normal_conjugate_run(self):
self.gamma_normal_conjugate_run(self.mh, num_samples=10000)
def test_normal_normal_conjugate_run(self):
mh = bm.SingleSiteRandomWalk(step_size=1.5)
self.normal_normal_conjugate_run(mh, num_samples=1000)
def test_dirichlet_categorical_conjugate_run(self):
self.dirichlet_categorical_conjugate_run(self.mh, num_samples=10000)
| beanmachine-main | tests/ppl/inference/single_site_random_walk_conjugate_test_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import beanmachine.ppl as bm
import pytest
import torch
import torch.distributions as dist
@bm.random_variable
def f():
return 123 # BAD, needs to be a distribution
@bm.random_variable
def g(n):
pass
@bm.functional
def h():
return 123 # BAD; needs to be a tensor
@bm.random_variable
def flip():
return dist.Bernoulli(0.5)
class ErrorDist(torch.distributions.Distribution):
arg_constraints = {}
support = torch.distributions.constraints.real
def __init__(self):
self.counter = 0
super().__init__()
def sample(self):
self.counter += 1
if self.counter == 20:
# throw error
torch.linalg.cholesky(torch.zeros(3, 3))
return torch.randn(1)
def log_prob(self, value):
self.counter += 1
if self.counter == 5:
torch.linalg.cholesky(torch.zeros(3, 3))
return -(value**2)
@bm.random_variable
def bad():
return ErrorDist()
def test_inference_error_reporting():
mh = bm.SingleSiteAncestralMetropolisHastings()
with pytest.raises(TypeError) as ex:
mh.infer(None, {}, 10)
assert (
str(ex.value)
== "Parameter 'queries' is required to be a list but is of type NoneType."
)
with pytest.raises(TypeError) as ex:
mh.infer([], 123, 10)
assert (
str(ex.value)
== "Parameter 'observations' is required to be a dictionary but is of type int."
)
# Should be f():
with pytest.raises(TypeError) as ex:
mh.infer([f], {}, 10)
assert (
str(ex.value)
== "A query is required to be a random variable but is of type function."
)
# Should be f():
with pytest.raises(TypeError) as ex:
mh.infer([f()], {f: torch.tensor(True)}, 10)
assert (
str(ex.value)
== "An observation is required to be a random variable but is of type function."
)
# Should be a tensor
with pytest.raises(TypeError) as ex:
mh.infer([f()], {f(): 123.0}, 10)
assert (
str(ex.value)
== "An observed value is required to be a tensor but is of type float."
)
# You can't make inferences on rv-of-rv
with pytest.raises(TypeError) as ex:
mh.infer([g(f())], {}, 10)
assert str(ex.value) == "The arguments to a query must not be random variables."
# You can't make inferences on rv-of-rv
with pytest.raises(TypeError) as ex:
mh.infer([f()], {g(f()): torch.tensor(123)}, 10)
assert (
str(ex.value) == "The arguments to an observation must not be random variables."
)
# SSAMH requires that observations must be of random variables, not
# functionals
with pytest.raises(TypeError) as ex:
mh.infer([f()], {h(): torch.tensor(123)}, 10)
assert (
str(ex.value)
== "An observation must observe a random_variable, not a functional."
)
# A functional is required to return a tensor.
with pytest.raises(TypeError) as ex:
mh.infer([h()], {}, 10)
assert str(ex.value) == "The value returned by a queried function must be a tensor."
# A random_variable is required to return a distribution
with pytest.raises(TypeError) as ex:
mh.infer([f()], {}, 10)
assert str(ex.value) == "A random_variable is required to return a distribution."
# The lookup key to the samples object is required to be an RVID.
with pytest.raises(TypeError) as ex:
mh.infer([flip()], {}, 10)[flip]
assert (
str(ex.value)
== "The key is required to be a random variable but is of type function."
)
def test_handle_cholesky_error():
mh = bm.SingleSiteAncestralMetropolisHastings()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning
samples = mh.infer([bad()], {}, 20, num_chains=1)
# Verify that the warning is triggered
assert len(w) == 1
assert "Proposal rejected" in str(w[-1])
# Verify that the inference finishes with the right number of samples
assert samples[bad()].shape == (1, 20, 1)
def test_cholesky_error_nuts_adaptation():
nuts = bm.SingleSiteNoUTurnSampler()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning
samples = nuts.infer([bad()], {}, 20, num_chains=1, num_adaptive_samples=30)
# Verify that the warning is triggered
assert len(w) == 1
assert "Numerical error" in str(w[-1])
# Verify that the inference finishes with the right number of samples
assert samples[bad()].shape == (1, 20, 1)
| beanmachine-main | tests/ppl/inference/inference_error_reporting_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
class PredictiveTest(unittest.TestCase):
@bm.random_variable
def prior(self):
return dist.Uniform(torch.tensor(0.0), torch.tensor(1.0))
@bm.random_variable
def likelihood(self):
return dist.Bernoulli(self.prior())
@bm.random_variable
def likelihood_i(self, i):
return dist.Bernoulli(self.prior())
@bm.random_variable
def prior_1(self):
return dist.Uniform(torch.tensor([0.0]), torch.tensor([1.0]))
@bm.random_variable
def likelihood_1(self):
return dist.Bernoulli(self.prior_1())
@bm.random_variable
def likelihood_dynamic(self, i):
if self.likelihood_i(i).item() > 0:
return dist.Normal(torch.zeros(1), torch.ones(1))
else:
return dist.Normal(5.0 * torch.ones(1), torch.ones(1))
@bm.random_variable
def prior_2(self):
return dist.Uniform(torch.zeros(1, 2), torch.ones(1, 2))
@bm.random_variable
def likelihood_2(self, i):
return dist.Bernoulli(self.prior_2())
@bm.random_variable
def likelihood_2_vec(self, i):
return dist.Bernoulli(self.prior_2())
@bm.random_variable
def likelihood_reg(self, x):
return dist.Normal(self.prior() * x, torch.tensor(1.0))
def test_prior_predictive(self):
queries = [self.prior(), self.likelihood()]
predictives = bm.simulate(queries, num_samples=10)
assert predictives[self.prior()].shape == (1, 10)
assert predictives[self.likelihood()].shape == (1, 10)
def test_posterior_predictive(self):
obs = {
self.likelihood_i(0): torch.tensor(1.0),
self.likelihood_i(1): torch.tensor(0.0),
}
post_samples = bm.SingleSiteAncestralMetropolisHastings().infer(
[self.prior()], obs, num_samples=10, num_chains=2
)
assert post_samples[self.prior()].shape == (2, 10)
predictives = bm.simulate(list(obs.keys()), post_samples, vectorized=True)
assert predictives[self.likelihood_i(0)].shape == (2, 10)
assert predictives[self.likelihood_i(1)].shape == (2, 10)
def test_posterior_predictive_seq(self):
obs = {
self.likelihood_i(0): torch.tensor(1.0),
self.likelihood_i(1): torch.tensor(0.0),
}
post_samples = bm.SingleSiteAncestralMetropolisHastings().infer(
[self.prior()], obs, num_samples=10, num_chains=2
)
assert post_samples[self.prior()].shape == (2, 10)
predictives = bm.simulate(list(obs.keys()), post_samples, vectorized=False)
assert predictives[self.likelihood_i(0)].shape == (2, 10)
assert predictives[self.likelihood_i(1)].shape == (2, 10)
def test_predictive_dynamic(self):
obs = {
self.likelihood_dynamic(0): torch.tensor([0.9]),
self.likelihood_dynamic(1): torch.tensor([4.9]),
}
# only query one of the variables
post_samples = bm.SingleSiteAncestralMetropolisHastings().infer(
[self.prior()], obs, num_samples=10, num_chains=2
)
assert post_samples[self.prior()].shape == (2, 10)
predictives = bm.simulate(list(obs.keys()), post_samples, vectorized=False)
assert predictives[self.likelihood_dynamic(0)].shape == (2, 10)
assert predictives[self.likelihood_dynamic(1)].shape == (2, 10)
def test_predictive_data(self):
x = torch.randn(4)
y = torch.randn(4) + 2.0
obs = {self.likelihood_reg(x): y}
post_samples = bm.SingleSiteAncestralMetropolisHastings().infer(
[self.prior()], obs, num_samples=10, num_chains=2
)
assert post_samples[self.prior()].shape == (2, 10)
test_x = torch.randn(4, 1, 1)
test_query = self.likelihood_reg(test_x)
predictives = bm.simulate([test_query], post_samples, vectorized=True)
assert predictives[test_query].shape == (4, 2, 10)
def test_posterior_predictive_1d(self):
obs = {self.likelihood_1(): torch.tensor([1.0])}
post_samples = bm.SingleSiteAncestralMetropolisHastings().infer(
[self.prior_1()], obs, num_samples=10, num_chains=1
)
assert post_samples[self.prior_1()].shape == (1, 10, 1)
predictives = bm.simulate(list(obs.keys()), post_samples, vectorized=True)
y = predictives[self.likelihood_1()].shape
assert y == (1, 10, 1)
def test_multi_chain_infer_predictive_2d(self):
torch.manual_seed(10)
obs = {
self.likelihood_2(0): torch.tensor([[1.0, 1.0]]),
self.likelihood_2(1): torch.tensor([[0.0, 1.0]]),
}
post_samples = bm.SingleSiteAncestralMetropolisHastings().infer(
[self.prior_2()], obs, num_samples=10, num_chains=2
)
assert post_samples[self.prior_2()].shape == (2, 10, 1, 2)
predictives = bm.simulate(list(obs.keys()), post_samples, vectorized=True)
predictive_0 = predictives[self.likelihood_2(0)]
predictive_1 = predictives[self.likelihood_2(1)]
assert predictive_0.shape == (2, 10, 1, 2)
assert predictive_1.shape == (2, 10, 1, 2)
assert (predictive_1 - predictive_0).sum().item() != 0
def test_empirical(self):
obs = {
self.likelihood_i(0): torch.tensor(1.0),
self.likelihood_i(1): torch.tensor(0.0),
self.likelihood_i(2): torch.tensor(0.0),
}
post_samples = bm.SingleSiteAncestralMetropolisHastings().infer(
[self.prior()], obs, num_samples=10, num_chains=4
)
empirical = bm.empirical([self.prior()], post_samples, num_samples=26)
assert empirical[self.prior()].shape == (1, 26)
predictives = bm.simulate(list(obs.keys()), post_samples, vectorized=True)
empirical = bm.empirical(list(obs.keys()), predictives, num_samples=27)
assert len(empirical) == 3
assert empirical[self.likelihood_i(0)].shape == (1, 27)
assert empirical[self.likelihood_i(1)].shape == (1, 27)
def test_return_inference_data(self):
torch.manual_seed(10)
obs = {
self.likelihood_2(0): torch.tensor([[1.0, 1.0]]),
self.likelihood_2(1): torch.tensor([[0.0, 1.0]]),
}
post_samples = bm.SingleSiteAncestralMetropolisHastings().infer(
[self.prior_2()], obs, num_samples=10, num_chains=2
)
assert post_samples[self.prior_2()].shape == (2, 10, 1, 2)
predictives = bm.simulate(
list(obs.keys()),
post_samples,
vectorized=True,
).to_inference_data()
assert "posterior" in predictives
assert "observed_data" in predictives
assert "log_likelihood" in predictives
assert "posterior_predictive" in predictives
assert predictives.posterior_predictive[self.likelihood_2(0)].shape == (
2,
10,
1,
2,
)
assert predictives.posterior_predictive[self.likelihood_2(1)].shape == (
2,
10,
1,
2,
)
def test_posterior_dict(self):
obs = {
self.likelihood_i(0): torch.tensor(1.0),
self.likelihood_i(1): torch.tensor(0.0),
}
posterior = {self.prior(): torch.tensor([0.5, 0.5])}
predictives_dict = bm.simulate(list(obs.keys()), posterior)
assert predictives_dict[self.likelihood_i(0)].shape == (1, 2)
assert predictives_dict[self.likelihood_i(1)].shape == (1, 2)
def test_posterior_dict_predictive(self):
obs = {
self.likelihood_i(0): torch.tensor(1.0),
self.likelihood_i(1): torch.tensor(0.0),
}
post_samples = bm.SingleSiteAncestralMetropolisHastings().infer(
[self.prior()], obs, num_samples=10, num_chains=1
)
assert post_samples[self.prior()].shape == (1, 10)
post_samples_dict = dict(post_samples)
predictives_dict = bm.simulate(list(obs.keys()), post_samples_dict)
assert predictives_dict[self.likelihood_i(0)].shape == (1, 10)
assert predictives_dict[self.likelihood_i(1)].shape == (1, 10)
| beanmachine-main | tests/ppl/inference/predictive_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from sys import float_info
import torch.distributions as dist
from beanmachine.ppl.testlib.hypothesis_testing import (
inverse_normal_cdf,
mean_equality_hypothesis_confidence_interval,
mean_equality_hypothesis_test,
)
from numpy import sqrt
from torch import manual_seed, mean, tensor
class HypothesisTestingTest(unittest.TestCase):
"""This class tests the hypothesis test codes."""
# Uniformly distributed random numbers
def random(self, min_bound, max_bound):
# TODO: Consider replacing with: (max_bound - min_bound) * torch.rand(size) + min_bound
# where size = (max_bound+min_bound).size()
return dist.uniform.Uniform(min_bound, max_bound).sample()
# Determining the range of floating point values we will explore
def float_exponent_range(self, safety_factor=10):
"""Provided exponents for range of floating point
numbers we are willing to test. The parameter
safety_factor should always be greater than 1, and
is used to avoid pressing extreme values."""
exp_min = float_info.min_10_exp / safety_factor
exp_max = float_info.max_10_exp / safety_factor
return exp_min, exp_max
# Determining the range of distribution of means and stds we will explore
def random_mean_and_std(self, exp_min, exp_max):
"""Generates a mean and std from a `reasonable` range
of possible test values. Please note that this generator
is by no means `exhaustive`. The purpose of the method
is to simply provide a based set of values for checking
our basic hypothesis tests."""
exp_mean = self.random(exp_min, exp_max)
exp_std = exp_mean + self.random(-3, 3)
true_mean = self.random(-1, 1) * 10**exp_mean
true_std = self.random(0, 1) * 10**exp_std
return true_mean, true_std
# Main procedure for testing the hypothesis test
# It works by checking the significance level (alpha) semantics
# of the mean equality hypothesis test.
def run_mean_equality_hypothesis_test_on_synthetic_samples(
self, samples, sample_size, alpha, random_seed=42
):
"""Generates as many samples as provided by the parameter of that
name, and performs the mean_equality_hypothesis_test
on each of these samples. Since we use the mean and standard
devaiation of the distribution, which are known, the hypothesis
test *should* faile at a rate fo alpha. In order for this to be
checked, we return the observed_alpha rate. In addition, we check
that the hypothesis_test to confidence_interval methods are consistent,
and return a count of any potential discrepancies between them."""
manual_seed(random_seed)
accepted_test = 0
exp_min, exp_max = self.float_exponent_range()
for _ in range(0, samples):
true_mean, true_std = self.random_mean_and_std(exp_min, exp_max)
d = dist.normal.Normal(loc=true_mean, scale=true_std)
sample_size = tensor([sample_size])
r = d.sample(sample_size)
sample_mean = mean(r)
# Record hypothesis_test_behavior for this single sample
accept_test = mean_equality_hypothesis_test(
sample_mean, true_mean, true_std, sample_size, alpha
)
if accept_test:
accepted_test += 1
# Compare hypothesis_test to confidence_interval
lower_bound, upper_bound = mean_equality_hypothesis_confidence_interval(
true_mean, true_std, sample_size, alpha
)
below_upper = (lower_bound <= sample_mean).all()
above_lower = (sample_mean <= upper_bound).all()
accept_interval = below_upper and above_lower
# accept_interval = min(lower_bound <= sample_mean <= upper_bound).item()
self.assertFalse(
accept_test and not accept_interval, "Interval can be too small"
)
self.assertFalse(
accept_interval and not accept_test, "Interval can be too big"
)
observed_alpha = 1 - accepted_test / samples
return observed_alpha
# Test function for the hypothesis test. Normal operation is to
# take no arguments. Auditing can be done by changing the random_seed.
# An audit would pass if the test returns False for only an alpha
# fraction of the random_seeds on average. Since this is a stochastic
# correctness criteria, we use alpha_meta for this (meta-)test.
def test_mean_equality_hypothesis_test(
self, runs=1000, samples=100, alpha=0.01, alpha_meta=0.01, random_seed=42
):
"""Check that the hypothesis tests are working as expected,
that is, their promised alpha is about the same as the rate at which
they fail. The idea here is that we run a series of checks, and treat
this as a binomial distribution.
Note, the alpha_meta for this test should not be confused with the
alpha of the individual tests.
Yes, this method is using hypothesis testing to test our hypothesis
testing method. We call this a meta-test.
Note:
1) We do the meta-test multiple times (runs)
2) Each meta-test is a Bernoulli trial. The probability of failure
should be exactly alpha.
3) We check that the total runs of the meta-test have an observed
failure rate that is equal to alpha. We do this by checking
that it falls within the alpha_meta CI.
"""
observed_alphas = [
self.run_mean_equality_hypothesis_test_on_synthetic_samples(
samples=samples,
sample_size=100,
alpha=alpha,
random_seed=(random_seed + i) * i,
)
for i in range(0, runs)
]
# Meta-test
true_mean = alpha # For binomial meta-test distribution
true_std = sqrt(alpha * (1 - alpha))
bound = inverse_normal_cdf(1 - alpha_meta / 2)
binomial_results = [
-bound <= (observed_alpha - true_mean) * sqrt(samples) / true_std <= bound
for observed_alpha in observed_alphas
]
# Notice that the meta-tests gives us a series of booleans. How do we interpret
# those? That's what we need the meta-meta-test
# Meta-meta-test.
true_mean = (
1 - alpha_meta
) # So, we'll use alpha_meta for both the meta- and meta-meta- test
true_std = sqrt(alpha_meta * (1 - alpha_meta))
observed_mean = sum(binomial_results) / runs
bound = inverse_normal_cdf(
1 - alpha_meta / 2
) # Yes, this is the same as for meta-test
meta_meta_test = (
-bound <= (observed_mean - true_mean) * sqrt(runs) / true_std <= bound
)
message = str(
(
"true_mean = "
+ str(true_mean)
+ ", "
+ " observed_mean = "
+ str(observed_mean)
+ ", "
+ "adjusted_bound = "
+ str(bound * true_std / sqrt(runs)),
)
)
self.assertTrue(
meta_meta_test,
"Unable to confirm significance level (alpha) semantics: " + message,
)
| beanmachine-main | tests/ppl/inference/hypothesis_testing_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
from beanmachine.ppl.inference import SingleSiteNewtonianMonteCarlo
from beanmachine.ppl.inference.proposer.nmc import (
SingleSiteHalfSpaceNMCProposer,
SingleSiteRealSpaceNMCProposer,
SingleSiteSimplexSpaceNMCProposer,
)
from beanmachine.ppl.world.utils import BetaDimensionTransform
from beanmachine.ppl.world.world import World
from torch import tensor
class SingleSiteNewtonianMonteCarloTest(unittest.TestCase):
class SampleNormalModel:
@bm.random_variable
def foo(self):
return dist.Normal(tensor(2.0), tensor(2.0))
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), torch.tensor(1.0))
class SampleTransformModel:
@bm.random_variable
def realspace(self):
return dist.Normal(tensor(0.0), tensor(1.0))
@bm.random_variable
def halfspace(self):
return dist.Gamma(tensor(2.0), tensor(2.0))
@bm.random_variable
def simplex(self):
return dist.Dirichlet(tensor([0.1, 0.9]))
@bm.random_variable
def interval(self):
return dist.Uniform(tensor(1.0), tensor(3.0))
@bm.random_variable
def beta(self):
return dist.Beta(tensor(1.0), tensor(1.0))
class SampleShapeModel:
@bm.random_variable
def realspace(self):
return dist.Normal(torch.zeros(2, 4), tensor(1.0))
@bm.random_variable
def halfspace(self):
return dist.Gamma(torch.zeros(1, 2, 4) + tensor(2.0), tensor(2.0))
@bm.random_variable
def simplex(self):
return dist.Dirichlet(tensor([0.1, 0.9]))
@bm.random_variable
def interval(self):
return dist.Uniform(tensor(1.0), tensor(3.0))
@bm.random_variable
def beta(self):
return dist.Beta(tensor([1.0, 2.0, 3.0]), tensor([1.0, 2.0, 3.0]))
class SampleIndependentShapeModel:
@bm.random_variable
def realspace(self):
return dist.Independent(dist.Normal(torch.zeros(2, 4), tensor(1.0)), 1)
@bm.random_variable
def halfspace(self):
return dist.Independent(
dist.Gamma(torch.zeros(1, 2, 4) + tensor(2.0), tensor(2.0)), 1
)
@bm.random_variable
def simplex(self):
return dist.Independent(dist.Dirichlet(tensor([[0.1, 0.9], [0.1, 0.9]])), 1)
@bm.random_variable
def interval(self):
return dist.Independent(
dist.Uniform(tensor([1.0, 1.0]), tensor([3.0, 3.0])), 1
)
@bm.random_variable
def beta(self):
return dist.Independent(
dist.Beta(tensor([1.0, 2.0, 3.0]), tensor([1.0, 2.0, 3.0])), 1
)
class SampleStudentTModel:
@bm.random_variable
def x(self):
return dist.StudentT(df=2.0)
def test_single_site_newtonian_monte_carlo_student_t(self):
model = self.SampleStudentTModel()
samples = (
bm.SingleSiteNewtonianMonteCarlo()
.infer(
queries=[model.x()],
observations={},
num_samples=1_000,
num_chains=1,
)
.get_chain(0)[model.x()]
)
self.assertTrue((samples.abs() > 2.0).any())
def test_single_site_newtonian_monte_carlo_no_transform(self):
model = self.SampleTransformModel()
nw = bm.SingleSiteNewtonianMonteCarlo()
real_key = model.realspace()
half_key = model.halfspace()
simplex_key = model.simplex()
interval_key = model.interval()
beta_key = model.beta()
queries = [
model.realspace(),
model.halfspace(),
model.simplex(),
model.interval(),
model.beta(),
]
observations = {}
world = World.initialize_world(queries, observations)
self.assertTrue(real_key in world)
self.assertTrue(half_key in world)
self.assertTrue(simplex_key in world)
self.assertTrue(interval_key in world)
self.assertTrue(beta_key in world)
# trigger proposer initialization
nw.get_proposers(world, world.latent_nodes, 0)
# test that resulting shapes of proposed values are correct
proposer = nw._proposers[real_key]
proposed_value = proposer.propose(world)[0][real_key]
self.assertIsInstance(
proposer,
SingleSiteRealSpaceNMCProposer,
)
self.assertEqual(proposed_value.shape, torch.Size([]))
proposer = nw._proposers[half_key]
proposed_value = proposer.propose(world)[0][half_key]
self.assertIsInstance(
proposer,
SingleSiteHalfSpaceNMCProposer,
)
self.assertEqual(proposed_value.shape, torch.Size([]))
proposer = nw._proposers[simplex_key]
proposed_value = proposer.propose(world)[0][simplex_key]
self.assertIsInstance(
proposer,
SingleSiteSimplexSpaceNMCProposer,
)
self.assertEqual(proposed_value.shape, torch.zeros(2).shape)
proposer = nw._proposers[interval_key]
proposed_value = proposer.propose(world)[0][interval_key]
self.assertEqual(proposed_value.shape, torch.Size([]))
proposer = nw._proposers[beta_key]
proposed_value = proposer.propose(world)[0][beta_key]
self.assertIsInstance(proposer, SingleSiteSimplexSpaceNMCProposer)
self.assertEqual(proposed_value.shape, torch.Size([]))
self.assertEqual(
proposer._transform,
BetaDimensionTransform(),
)
def test_single_site_newtonian_monte_carlo_transform_shape(self):
model = self.SampleShapeModel()
nw = SingleSiteNewtonianMonteCarlo()
real_key = model.realspace()
half_key = model.halfspace()
simplex_key = model.simplex()
interval_key = model.interval()
beta_key = model.beta()
queries = [
model.realspace(),
model.halfspace(),
model.simplex(),
model.interval(),
model.beta(),
]
observations = {}
world = World.initialize_world(queries, observations)
self.assertTrue(real_key in world)
self.assertTrue(half_key in world)
self.assertTrue(simplex_key in world)
self.assertTrue(interval_key in world)
self.assertTrue(beta_key in world)
# trigger proposer initialization
nw.get_proposers(world, world.latent_nodes, 0)
# test that resulting shapes of proposed values are correct
proposer = nw._proposers[real_key]
proposed_value = proposer.propose(world)[0][real_key]
self.assertEqual(proposed_value.shape, torch.Size([2, 4]))
proposer = nw._proposers[half_key]
proposed_value = proposer.propose(world)[0][half_key]
self.assertEqual(proposed_value.shape, torch.Size([1, 2, 4]))
proposer = nw._proposers[simplex_key]
proposed_value = proposer.propose(world)[0][simplex_key]
self.assertEqual(proposed_value.shape, torch.Size([2]))
proposer = nw._proposers[interval_key]
proposed_value = proposer.propose(world)[0][interval_key]
self.assertEqual(proposed_value.shape, torch.Size([]))
proposer = nw._proposers[beta_key]
proposed_value = proposer.propose(world)[0][beta_key]
self.assertEqual(proposed_value.shape, torch.Size([3]))
def test_single_site_newtonian_monte_carlo_no_transform_independent_shape(self):
model = self.SampleIndependentShapeModel()
nw = bm.SingleSiteNewtonianMonteCarlo()
real_key = model.realspace()
half_key = model.halfspace()
simplex_key = model.simplex()
interval_key = model.interval()
beta_key = model.beta()
queries = [
real_key,
half_key,
simplex_key,
interval_key,
beta_key,
]
observations = {}
world = World.initialize_world(queries, observations)
# trigger proposer initialization
nw.get_proposers(world, world.latent_nodes, 0)
# test that resulting shapes of proposed values are correct
proposer = nw._proposers[real_key]
proposed_value = proposer.propose(world)[0][real_key]
self.assertIsInstance(
proposer,
SingleSiteRealSpaceNMCProposer,
)
self.assertEqual(proposed_value.shape, torch.Size([2, 4]))
proposer = nw._proposers[half_key]
proposed_value = proposer.propose(world)[0][half_key]
self.assertIsInstance(
proposer,
SingleSiteHalfSpaceNMCProposer,
)
self.assertEqual(proposed_value.shape, torch.Size([1, 2, 4]))
proposer = nw._proposers[simplex_key]
proposed_value = proposer.propose(world)[0][simplex_key]
self.assertIsInstance(
proposer,
SingleSiteSimplexSpaceNMCProposer,
)
self.assertEqual(proposed_value.shape, torch.Size([2, 2]))
proposer = nw._proposers[interval_key]
proposed_value = proposer.propose(world)[0][interval_key]
self.assertEqual(proposed_value.shape, torch.Size([2]))
proposer = nw._proposers[beta_key]
proposed_value = proposer.propose(world)[0][beta_key]
self.assertIsInstance(
proposer,
SingleSiteSimplexSpaceNMCProposer,
)
self.assertEqual(proposed_value.shape, torch.Size([3]))
| beanmachine-main | tests/ppl/inference/single_site_newtonian_monte_carlo_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
class SingleSiteHamiltonianMonteCarloConjugateTest(
unittest.TestCase, AbstractConjugateTests
):
def test_beta_binomial_conjugate_run(self):
hmc = bm.SingleSiteHamiltonianMonteCarlo(0.5, 0.05)
self.beta_binomial_conjugate_run(hmc, num_samples=500, num_adaptive_samples=500)
def test_gamma_gamma_conjugate_run(self):
hmc = bm.SingleSiteHamiltonianMonteCarlo(0.5, 0.05)
self.gamma_gamma_conjugate_run(hmc, num_samples=500, num_adaptive_samples=500)
def test_gamma_normal_conjugate_run(self):
hmc = bm.SingleSiteHamiltonianMonteCarlo(0.5, 0.05)
self.gamma_normal_conjugate_run(hmc, num_samples=500, num_adaptive_samples=500)
def test_normal_normal_conjugate_run(self):
hmc = bm.SingleSiteHamiltonianMonteCarlo(1.0, 0.05)
self.normal_normal_conjugate_run(hmc, num_samples=500, num_adaptive_samples=500)
@unittest.skip("Known to fail. Investigating in T77865889.")
def test_dirichlet_categorical_conjugate_run(self):
hmc = bm.SingleSiteHamiltonianMonteCarlo(0.1, 0.01)
self.dirichlet_categorical_conjugate_run(
hmc, num_samples=500, num_adaptive_samples=500
)
| beanmachine-main | tests/ppl/inference/single_site_hamiltonian_monte_carlo_conjugate_test_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
class SingleSiteAncestralMetropolisHastingsConjugateTest(
unittest.TestCase, AbstractConjugateTests
):
def setUp(self):
self.mh = bm.SingleSiteAncestralMetropolisHastings()
def test_beta_binomial_conjugate_run(self):
self.beta_binomial_conjugate_run(self.mh)
def test_gamma_gamma_conjugate_run(self):
self.gamma_gamma_conjugate_run(self.mh, random_seed=123)
def test_gamma_normal_conjugate_run(self):
self.gamma_normal_conjugate_run(self.mh, num_samples=20000)
def test_normal_normal_conjugate_run(self):
self.normal_normal_conjugate_run(self.mh, num_samples=5000)
def test_dirichlet_categorical_conjugate_run(self):
self.dirichlet_categorical_conjugate_run(self.mh, num_samples=10000)
| beanmachine-main | tests/ppl/inference/single_site_ancestral_mh_conjugate_test_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from beanmachine.ppl.inference.compositional_infer import CompositionalInference
from beanmachine.ppl.testlib.abstract_conjugate import AbstractConjugateTests
class CompositionalInferenceConjugateTest(unittest.TestCase, AbstractConjugateTests):
def setUp(self):
self.mh = CompositionalInference()
def test_beta_binomial_conjugate_run(self):
self.beta_binomial_conjugate_run(self.mh)
def test_gamma_gamma_conjugate_run(self):
self.gamma_gamma_conjugate_run(self.mh)
def test_gamma_normal_conjugate_run(self):
self.gamma_normal_conjugate_run(self.mh)
def test_normal_normal_conjugate_run(self):
self.normal_normal_conjugate_run(self.mh)
def test_dirichlet_categorical_conjugate_run(self):
self.dirichlet_categorical_conjugate_run(self.mh)
| beanmachine-main | tests/ppl/inference/compositional_infer_conjugate_test_nightly.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.hmc_proposer import HMCProposer
from beanmachine.ppl.world import World
@bm.random_variable
def foo():
return dist.Uniform(0.0, 1.0)
@bm.random_variable
def bar():
return dist.Normal(foo(), 1.0)
@pytest.fixture
def world():
w = World()
w.call(bar())
return w
@pytest.fixture
def hmc(world):
hmc_proposer = HMCProposer(world, world.latent_nodes, 10, trajectory_length=1.0)
return hmc_proposer
def test_potential_grads(hmc):
pe, pe_grad = hmc._potential_grads(hmc._positions)
assert isinstance(pe, torch.Tensor)
assert pe.numel() == 1
assert isinstance(pe_grad, torch.Tensor)
assert pe_grad.shape == hmc._positions.shape
def test_kinetic_grads(hmc):
momentums = hmc._initialize_momentums(hmc._positions)
ke = hmc._kinetic_energy(momentums, hmc._mass_inv)
assert isinstance(ke, torch.Tensor)
assert ke.numel() == 1
ke_grad = hmc._kinetic_grads(momentums, hmc._mass_inv)
assert isinstance(ke_grad, torch.Tensor)
assert ke_grad.shape == hmc._positions.shape
def test_leapfrog_step(hmc):
step_size = torch.tensor(0.0)
momentums = hmc._initialize_momentums(hmc._positions)
new_positions, new_momentums, pe, pe_grad = hmc._leapfrog_step(
hmc._positions, momentums, step_size, hmc._mass_inv
)
assert torch.allclose(momentums, new_momentums)
assert torch.allclose(hmc._positions, new_positions)
@pytest.mark.parametrize(
# forcing the step_size to be 0 for HMC/ NUTS
"algorithm",
[
bm.GlobalNoUTurnSampler(initial_step_size=0.0),
bm.GlobalHamiltonianMonteCarlo(trajectory_length=1.0, initial_step_size=0.0),
],
)
def test_step_size_exception(algorithm):
queries = [foo()]
observations = {bar(): torch.tensor(0.5)}
with pytest.raises(ValueError):
algorithm.infer(
queries,
observations,
num_samples=20,
num_chains=1,
)
| beanmachine-main | tests/ppl/inference/proposer/hmc_proposer_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit test for NormalEig class"""
import unittest
import torch
from beanmachine.ppl.inference.proposer.normal_eig import NormalEig
from torch.distributions.multivariate_normal import MultivariateNormal
class NormalEigTest(unittest.TestCase):
def test_normal_eig(self) -> None:
covar = torch.Tensor([[1, 0.1, 0], [0.1, 2, 0.5], [0, 0.5, 3]])
evals, evecs = torch.linalg.eigh(covar)
mean = torch.Tensor([1.0, 3.5, -1.2])
# we want to test that both distributions are identical
ref_dist = MultivariateNormal(mean, covar)
test_dist = NormalEig(mean, evals, evecs)
# density at the mean should be equal
self.assertAlmostEqual(
ref_dist.log_prob(mean).item(), test_dist.log_prob(mean).item(), 2
)
# density at a random sample should also be equal
val = test_dist.sample()
self.assertEqual(val.shape, torch.Size([3]))
self.assertAlmostEqual(
ref_dist.log_prob(val).item(), test_dist.log_prob(val).item(), 2
)
# test that the empirical mean is correct
emp_mean = sum(test_dist.sample() for _ in range(10000)) / 10000
self.assertTrue(((mean - emp_mean).abs() < 0.1).all())
# test that the empirical covariance is correct
def outerprod(x):
return torch.ger(x, x)
emp_covar = (
sum(outerprod(test_dist.sample() - mean) for _ in range(2000)) / 2000
)
self.assertTrue(((covar - emp_covar).abs() < 0.2).all())
| beanmachine-main | tests/ppl/inference/proposer/normal_eig_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from beanmachine.ppl.inference.proposer.utils import DictToVecConverter
def test_dict_to_vec_conversion():
d = {"a": torch.ones((2, 5)), "b": torch.rand(5), "c": torch.tensor(3.0)}
converter = DictToVecConverter(example_dict=d)
v = converter.to_vec(d)
assert len(v) == 16 # 2x5 + 5 + 1
# applying exp on the flatten tensor is equivalent to applying it to each
# of the tensor in the dictionary
d_exp = converter.to_dict(torch.exp(v))
for key in d:
assert torch.allclose(torch.exp(d[key]), d_exp[key])
| beanmachine-main | tests/ppl/inference/proposer/utils_test.py |
beanmachine-main | tests/ppl/inference/proposer/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import beanmachine.ppl as bm
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.nuts_proposer import (
_Tree,
_TreeArgs,
_TreeNode,
NUTSProposer,
)
from beanmachine.ppl.world import World
@bm.random_variable
def foo():
return dist.Beta(2.0, 2.0)
@bm.random_variable
def bar():
return dist.Bernoulli(foo())
@pytest.fixture
def nuts():
world = World(observations={bar(): torch.tensor(0.8)})
world.call(bar())
nuts_proposer = NUTSProposer(world, world.latent_nodes, 10)
return nuts_proposer
@pytest.fixture
def tree_node(nuts):
momentums = nuts._initialize_momentums(nuts._positions)
return _TreeNode(
positions=nuts._positions, momentums=momentums, pe_grad=nuts._pe_grad
)
@pytest.fixture
def tree_args(tree_node, nuts):
initial_energy = nuts._hamiltonian(
nuts._positions,
tree_node.momentums,
nuts._mass_inv,
nuts._pe,
)
return _TreeArgs(
log_slice=-initial_energy,
direction=torch.tensor(1),
step_size=nuts.step_size,
initial_energy=initial_energy,
mass_inv=nuts._mass_inv,
)
def test_base_tree(tree_node, tree_args, nuts):
nuts._multinomial_sampling = False
tree_args = tree_args._replace(
log_slice=torch.log1p(-torch.rand(())) - tree_args.initial_energy
)
tree = nuts._build_tree_base_case(root=tree_node, args=tree_args)
assert isinstance(tree, _Tree)
assert torch.isclose(tree.log_weight, torch.tensor(float("-inf"))) or torch.isclose(
tree.log_weight, torch.tensor(0.0)
)
assert tree.left == tree.right
def test_base_tree_multinomial(tree_node, tree_args, nuts):
tree = nuts._build_tree_base_case(root=tree_node, args=tree_args)
assert isinstance(tree, _Tree)
# in multinomial sampling, trees are weighted by their accept prob
assert torch.isclose(
torch.clamp(tree.log_weight.exp(), max=1.0), tree.sum_accept_prob
)
def test_build_tree(tree_node, tree_args, nuts):
tree_depth = 3
tree = nuts._build_tree(root=tree_node, tree_depth=tree_depth, args=tree_args)
assert isinstance(tree, _Tree)
assert tree.turned_or_diverged or (tree.left is not tree.right)
assert tree.turned_or_diverged or tree.num_proposals == 2**tree_depth
| beanmachine-main | tests/ppl/inference/proposer/nuts_proposer_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import beanmachine.ppl as bm
import numpy as np
import pytest
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.hmc_utils import (
DualAverageAdapter,
MassMatrixAdapter,
RealSpaceTransform,
WelfordCovariance,
WindowScheme,
)
from beanmachine.ppl.inference.proposer.utils import DictToVecConverter
from beanmachine.ppl.world import World
class SampleModel:
@bm.random_variable
def foo(self):
return dist.Uniform(0.0, 1.0)
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), 1.0)
class DiscreteModel:
@bm.random_variable
def baz(self):
return dist.Poisson(5.0)
def test_dual_average_adapter():
adapter = DualAverageAdapter(torch.tensor(0.1))
epsilon1 = adapter.step(torch.tensor(1.0))
epsilon2 = adapter.step(torch.tensor(0.0))
assert epsilon2 < adapter.finalize() < epsilon1
def test_dual_average_with_different_delta():
adapter1 = DualAverageAdapter(torch.tensor(1.0), delta=0.8)
adapter2 = DualAverageAdapter(torch.tensor(1.0), delta=0.2)
prob = torch.tensor(0.5)
# prob > delta means we can increase the step size, wherease prob < delta means
# we need to decrease the step size
epsilon1 = adapter1.step(prob)
epsilon2 = adapter2.step(prob)
assert epsilon1 < epsilon2
def test_small_window_scheme():
num_adaptive_samples = 10
scheme = WindowScheme(num_adaptive_samples)
for _ in range(num_adaptive_samples):
# no window should be created if num_adaptive_samples is too small
assert not scheme.is_in_window
scheme.step()
def test_middle_window_scheme():
num_adaptive_samples = 125
scheme = WindowScheme(num_adaptive_samples)
num_windows = 0
for i in range(num_adaptive_samples):
if scheme.is_in_window:
# there should be a margin at the beginning and the end of a window
assert i > 0
if scheme.is_end_window:
num_windows += 1
assert i < num_adaptive_samples
scheme.step()
# there should only be a single window
assert num_windows == 1
@pytest.mark.parametrize("num_adaptive_samples", [175, 300, 399, 543])
def test_large_window_scheme(num_adaptive_samples):
scheme = WindowScheme(num_adaptive_samples)
window_sizes = []
for _ in range(num_adaptive_samples):
if scheme.is_end_window:
window_sizes.append(scheme._window_size)
scheme.step()
# size of windows should be monotonically increasing
sorted_window_sizes = sorted(window_sizes)
assert window_sizes == sorted_window_sizes
for win1, win2 in zip(window_sizes[:-1], window_sizes[1:-1]):
# except for last window, window size should keep doubling
assert win2 == win1 * 2
@pytest.mark.parametrize("full_mass_matrix", [True, False])
def test_mass_matrix_adapter(full_mass_matrix):
model = SampleModel()
world = World()
world.call(model.bar())
positions_dict = RealSpaceTransform(world, world.latent_nodes)(dict(world))
dict2vec = DictToVecConverter(positions_dict)
positions = dict2vec.to_vec(positions_dict)
mass_matrix_adapter = MassMatrixAdapter(positions, full_mass_matrix)
momentums = mass_matrix_adapter.initialize_momentums(positions)
assert isinstance(momentums, torch.Tensor)
assert momentums.shape == positions.shape
mass_inv_old = mass_matrix_adapter.mass_inv.clone()
mass_matrix_adapter.step(positions)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mass_matrix_adapter.finalize()
# mass matrix adapter has seen less than 2 samples, so mass_inv is not updated
assert torch.allclose(mass_inv_old, mass_matrix_adapter.mass_inv)
# check the size of the matrix
matrix_width = len(positions)
if full_mass_matrix:
assert mass_inv_old.shape == (matrix_width, matrix_width)
else:
assert mass_inv_old.shape == (matrix_width,)
def test_diagonal_welford_covariance():
samples = dist.MultivariateNormal(
loc=torch.rand(5), scale_tril=torch.randn(5, 5).tril()
).sample((1000,))
welford = WelfordCovariance(diagonal=True)
for sample in samples:
welford.step(sample)
sample_var = torch.var(samples, dim=0)
estimated_var = welford.finalize(regularize=False)
assert torch.allclose(estimated_var, sample_var)
regularized_var = welford.finalize(regularize=True)
assert (torch.argsort(regularized_var) == torch.argsort(estimated_var)).all()
def test_dense_welford_covariance():
samples = dist.MultivariateNormal(
loc=torch.rand(5), scale_tril=torch.randn(5, 5).tril()
).sample((1000,))
welford = WelfordCovariance(diagonal=False)
for sample in samples:
welford.step(sample)
sample_cov = torch.from_numpy(np.cov(samples.T.numpy())).to(samples.dtype)
estimated_cov = welford.finalize(regularize=False)
assert torch.allclose(estimated_cov, sample_cov)
regularized_cov = welford.finalize(regularize=True)
assert (torch.argsort(regularized_cov) == torch.argsort(estimated_cov)).all()
def test_welford_exception():
welford = WelfordCovariance()
welford.step(torch.rand(5))
with pytest.raises(RuntimeError): # number of samples is too small
welford.finalize()
def test_discrete_rv_exception():
model = DiscreteModel()
world = World()
world.call(model.baz())
with pytest.raises(TypeError):
RealSpaceTransform(world, world.latent_nodes)(dict(world))
| beanmachine-main | tests/ppl/inference/proposer/hmc_utils_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
import torch.distributions as dist
from beanmachine import ppl as bm
from beanmachine.ppl.inference.proposer.nmc import SingleSiteSimplexSpaceNMCProposer
from beanmachine.ppl.inference.single_site_nmc import SingleSiteNewtonianMonteCarlo
from beanmachine.ppl.world import World
from torch import tensor
class SingleSiteSimplexNewtonianMonteCarloProposerTest(unittest.TestCase):
def test_alpha_for_dirichlet(self):
alpha = tensor([[0.5, 0.5], [0.5, 0.5]])
@bm.random_variable
def a():
return dist.Dirichlet(alpha)
world_ = World()
with world_:
a()
nw_proposer = SingleSiteSimplexSpaceNMCProposer(a())
is_valid, predicted_alpha = nw_proposer.compute_alpha(world_)
self.assertEqual(is_valid, True)
self.assertAlmostEqual(
alpha.sum().item(), (predicted_alpha).sum().item(), delta=0.0001
)
def test_coin_flip(self):
prior_heads, prior_tails = 2.0, 2.0
p = bm.random_variable(lambda: dist.Beta(2.0, 2.0))
x = bm.random_variable(lambda: dist.Bernoulli(p()))
heads_observed = 5
samples = (
SingleSiteNewtonianMonteCarlo()
.infer(
queries=[p()],
observations={x(): torch.ones(heads_observed)},
num_samples=100,
num_chains=1,
)
.get_chain(0)
)
# assert we are close to the conjugate poserior mean
self.assertAlmostEqual(
samples[p()].mean(),
(prior_heads + heads_observed)
/ (prior_heads + prior_tails + heads_observed),
delta=0.05,
)
| beanmachine-main | tests/ppl/inference/proposer/nmc/single_site_simplex_newtonian_monte_carlo_proposer_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.nmc import SingleSiteHalfSpaceNMCProposer
from beanmachine.ppl.world import World
from torch import tensor
class SingleSiteHalfSpaceNewtonianMonteCarloProposerTest(unittest.TestCase):
class SampleNormalModel:
@bm.random_variable
def foo(self):
return dist.Normal(tensor(2.0), tensor(2.0))
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), torch.tensor(1.0))
class SampleLogisticRegressionModel:
@bm.random_variable
def theta_0(self):
return dist.Normal(tensor(0.0), tensor(1.0))
@bm.random_variable
def theta_1(self):
return dist.Normal(tensor(0.0), tensor(1.0))
@bm.random_variable
def x(self, i):
return dist.Normal(tensor(0.0), tensor(1.0))
@bm.random_variable
def y(self, i):
y = self.theta_1() * self.x(i) + self.theta_0()
probs = 1 / (1 + (y * -1).exp())
return dist.Bernoulli(probs)
class SampleFallbackModel:
@bm.random_variable
def foo(self):
return dist.Gamma(tensor(2.0), tensor(2.0))
@bm.random_variable
def bar(self):
return dist.Normal(self.foo(), torch.tensor(1.0))
def test_alpha_and_beta_for_gamma(self):
alpha = tensor([2.0, 2.0, 2.0])
beta = tensor([2.0, 2.0, 2.0])
@bm.random_variable
def gamma():
return dist.Gamma(alpha, beta)
world = World()
with world:
gamma()
nw_proposer = SingleSiteHalfSpaceNMCProposer(gamma())
is_valid, predicted_alpha, predicted_beta = nw_proposer.compute_alpha_beta(
world
)
self.assertEqual(is_valid, True)
self.assertAlmostEqual(
alpha.sum().item(), (predicted_alpha).sum().item(), delta=0.0001
)
self.assertAlmostEqual(
beta.sum().item(), (predicted_beta).sum().item(), delta=0.0001
)
| beanmachine-main | tests/ppl/inference/proposer/nmc/single_site_half_space_newtonian_monte_carlo_proposer_test.py |
beanmachine-main | tests/ppl/inference/proposer/nmc/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
import torch.autograd
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.nmc.single_site_real_space_nmc_proposer import (
SingleSiteRealSpaceNMCProposer as SingleSiteRealSpaceNewtonianMonteCarloProposer,
)
from beanmachine.ppl.world import World
from beanmachine.ppl.world.variable import Variable
from torch import tensor
class SingleSiteRealSpaceNewtonianMonteCarloProposerTest(unittest.TestCase):
class SampleNormalModel:
@bm.random_variable
def foo(self):
return dist.MultivariateNormal(torch.zeros(2), torch.eye(2))
@bm.random_variable
def bar(self):
return dist.MultivariateNormal(self.foo(), torch.eye(2))
class SampleLogisticRegressionModel:
@bm.random_variable
def theta_0(self):
return dist.Normal(tensor(0.0), tensor(1.0))
@bm.random_variable
def theta_1(self):
return dist.Normal(tensor(0.0), tensor(1.0))
@bm.random_variable
def x(self, i):
return dist.Normal(tensor(0.0), tensor(1.0))
@bm.random_variable
def y(self, i):
y = self.theta_1() * self.x(i) + self.theta_0()
probs = 1 / (1 + (y * -1).exp())
return dist.Bernoulli(probs)
def test_mean_scale_tril_for_node_with_child(self):
foo_key = bm.random_variable(
lambda: dist.MultivariateNormal(
tensor([1.0, 1.0]), tensor([[1.0, 0.8], [0.8, 1]])
)
)
bar_key = bm.random_variable(
lambda: dist.MultivariateNormal(
foo_key(),
tensor([[1.0, 0.8], [0.8, 1.0]]),
)
)
nw_proposer = SingleSiteRealSpaceNewtonianMonteCarloProposer(foo_key())
val = tensor([2.0, 2.0])
queries = [foo_key(), bar_key()]
observed_val = tensor([2.0, 2.0])
observations = {bar_key(): observed_val}
world = World.initialize_world(queries, observations)
world_vars = world._variables
world_vars[foo_key] = val
nw_proposer.learning_rate_ = 1.0
prop_dist = nw_proposer.get_proposal_distribution(world).base_dist
mean, scale_tril = prop_dist.mean, prop_dist.scale_tril
expected_mean = tensor([1.5, 1.5])
expected_scale_tril = torch.linalg.cholesky(
tensor([[0.5000, 0.4000], [0.4000, 0.5000]])
)
self.assertTrue(torch.isclose(mean, expected_mean).all())
self.assertTrue(torch.isclose(scale_tril, expected_scale_tril).all())
def test_mean_scale_tril(self):
model = self.SampleNormalModel()
foo_key = model.foo()
nw_proposer = SingleSiteRealSpaceNewtonianMonteCarloProposer(foo_key)
val = tensor([2.0, 2.0])
val.requires_grad_(True)
distribution = dist.MultivariateNormal(
tensor([1.0, 1.0]), tensor([[1.0, 0.8], [0.8, 1]])
)
queries = [foo_key]
observations = {}
world = World.initialize_world(queries, observations)
world_vars = world._variables
world_vars[foo_key] = Variable(
value=val,
distribution=distribution,
)
nw_proposer.learning_rate_ = 1.0
prop_dist = nw_proposer.get_proposal_distribution(world).base_dist
mean, scale_tril = prop_dist.mean, prop_dist.scale_tril
expected_mean = tensor([1.0, 1.0])
expected_scale_tril = torch.linalg.cholesky(tensor([[1.0, 0.8], [0.8, 1]]))
self.assertTrue(torch.isclose(mean, expected_mean).all())
self.assertTrue(torch.isclose(scale_tril, expected_scale_tril).all())
def test_mean_scale_tril_for_iids(self):
model = self.SampleNormalModel()
foo_key = model.foo()
nw_proposer = SingleSiteRealSpaceNewtonianMonteCarloProposer(foo_key)
val = tensor([[2.0, 2.0], [2.0, 2.0]])
val.requires_grad_(True)
distribution = dist.Normal(
tensor([[1.0, 1.0], [1.0, 1.0]]), tensor([[1.0, 1.0], [1.0, 1.0]])
)
queries = [foo_key]
observations = {}
world = World.initialize_world(queries, observations)
world_vars = world._variables
world_vars[foo_key] = Variable(
value=val,
distribution=distribution,
)
nw_proposer.learning_rate_ = 1.0
prop_dist = nw_proposer.get_proposal_distribution(world).base_dist
mean, scale_tril = prop_dist.mean, prop_dist.scale_tril
expected_mean = tensor([1.0, 1.0, 1.0, 1.0])
expected_scale_tril = torch.eye(4)
self.assertTrue(torch.isclose(mean, expected_mean).all())
self.assertTrue(torch.isclose(scale_tril, expected_scale_tril).all())
def test_multi_mean_scale_tril_computation_in_inference(self):
model = self.SampleLogisticRegressionModel()
theta_0_key = model.theta_0()
theta_1_key = model.theta_1()
nw_proposer = SingleSiteRealSpaceNewtonianMonteCarloProposer(theta_0_key)
x_0_key = model.x(0)
x_1_key = model.x(1)
y_0_key = model.y(0)
y_1_key = model.y(1)
theta_0_value = tensor(1.5708)
theta_0_value.requires_grad_(True)
x_0_value = tensor(0.7654)
x_1_value = tensor(-6.6737)
theta_1_value = tensor(-0.4459)
theta_0_distribution = dist.Normal(torch.tensor(0.0), torch.tensor(1.0))
queries = [theta_0_key, theta_1_key]
observations = {}
world = World.initialize_world(queries, observations)
world_vars = world._variables
world_vars[theta_0_key] = Variable(
value=theta_0_value,
distribution=theta_0_distribution,
children=set({y_0_key, y_1_key}),
)
world_vars[theta_1_key] = Variable(
value=theta_1_value,
distribution=theta_0_distribution,
children=set({y_0_key, y_1_key}),
)
x_distribution = dist.Normal(torch.tensor(0.0), torch.tensor(5.0))
world_vars[x_0_key] = Variable(
value=x_0_value,
distribution=x_distribution,
children=set({y_0_key, y_1_key}),
)
world_vars[x_1_key] = Variable(
value=x_1_value,
distribution=x_distribution,
children=set({y_0_key, y_1_key}),
)
y = theta_0_value + theta_1_value * x_0_value
probs_0 = 1 / (1 + (y * -1).exp())
y_0_distribution = dist.Bernoulli(probs_0)
world_vars[y_0_key] = Variable(
value=tensor(1.0),
distribution=y_0_distribution,
parents=set({theta_0_key, theta_1_key, x_0_key}),
)
y = theta_0_value + theta_1_value * x_1_value
probs_1 = 1 / (1 + (y * -1).exp())
y_1_distribution = dist.Bernoulli(probs_1)
world_vars[y_1_key] = Variable(
value=tensor(1.0),
distribution=y_1_distribution,
parents=set({theta_0_key, theta_1_key, x_1_key}),
)
nw_proposer.learning_rate_ = 1.0
prop_dist = nw_proposer.get_proposal_distribution(world).base_dist
mean, scale_tril = prop_dist.mean, prop_dist.scale_tril
score = theta_0_distribution.log_prob(theta_0_value)
score += (
1 / (1 + (-1 * (theta_0_value + theta_1_value * x_0_value)).exp())
).log()
score += (
1 / (1 + (-1 * (theta_0_value + theta_1_value * x_1_value)).exp())
).log()
expected_first_gradient = torch.autograd.grad(
score, theta_0_value, create_graph=True
)[0]
expected_second_gradient = torch.autograd.grad(
expected_first_gradient, theta_0_value
)[0]
expected_covar = expected_second_gradient.reshape(1, 1).inverse() * -1
expected_scale_tril = torch.linalg.cholesky(expected_covar)
self.assertAlmostEqual(
expected_scale_tril.item(), scale_tril.item(), delta=0.001
)
expected_first_gradient = expected_first_gradient.unsqueeze(0)
expected_mean = (
theta_0_value.unsqueeze(0)
+ expected_first_gradient.unsqueeze(0).mm(expected_covar)
).squeeze(0)
self.assertAlmostEqual(mean.item(), expected_mean.item(), delta=0.001)
proposal_value = (
dist.MultivariateNormal(mean, scale_tril=scale_tril)
.sample()
.reshape(theta_0_value.shape)
)
proposal_value.requires_grad_(True)
world_vars[theta_0_key].value = proposal_value
y = proposal_value + theta_1_value * x_0_value
probs_0 = 1 / (1 + (y * -1).exp())
y_0_distribution = dist.Bernoulli(probs_0)
world_vars[y_0_key].distribution = y_0_distribution
world_vars[y_0_key].log_prob = y_0_distribution.log_prob(tensor(1.0))
y = proposal_value + theta_1_value * x_1_value
probs_1 = 1 / (1 + (y * -1).exp())
y_1_distribution = dist.Bernoulli(probs_1)
world_vars[y_1_key].distribution = y_1_distribution
nw_proposer.learning_rate_ = 1.0
prop_dist = nw_proposer.get_proposal_distribution(world).base_dist
mean, scale_tril = prop_dist.mean, prop_dist.scale_tril
score = tensor(0.0)
score = theta_0_distribution.log_prob(proposal_value)
score += (
1 / (1 + (-1 * (proposal_value + theta_1_value * x_0_value)).exp())
).log()
score += (
1 / (1 + (-1 * (proposal_value + theta_1_value * x_1_value)).exp())
).log()
expected_first_gradient = torch.autograd.grad(
score, proposal_value, create_graph=True
)[0]
expected_second_gradient = torch.autograd.grad(
expected_first_gradient, proposal_value
)[0]
expected_covar = expected_second_gradient.reshape(1, 1).inverse() * -1
expected_scale_tril = torch.linalg.cholesky(expected_covar)
self.assertAlmostEqual(
expected_scale_tril.item(), scale_tril.item(), delta=0.001
)
expected_first_gradient = expected_first_gradient.unsqueeze(0)
expected_mean = (
proposal_value.unsqueeze(0)
+ expected_first_gradient.unsqueeze(0).mm(expected_covar)
).squeeze(0)
self.assertAlmostEqual(mean.item(), expected_mean.item(), delta=0.001)
self.assertAlmostEqual(
scale_tril.item(), expected_scale_tril.item(), delta=0.001
)
def test_adaptive_alpha_beta_computation(self):
model = self.SampleLogisticRegressionModel()
theta_0_key = model.theta_0()
nw_proposer = SingleSiteRealSpaceNewtonianMonteCarloProposer(theta_0_key)
nw_proposer.learning_rate_ = tensor(0.0416, dtype=torch.float64)
nw_proposer.running_mean_, nw_proposer.running_var_ = (
tensor(0.079658),
tensor(0.0039118),
)
nw_proposer.accepted_samples_ = 37
alpha, beta = nw_proposer.compute_beta_priors_from_accepted_lr()
self.assertAlmostEqual(nw_proposer.running_mean_.item(), 0.0786, delta=0.0001)
self.assertAlmostEqual(nw_proposer.running_var_.item(), 0.00384, delta=0.00001)
self.assertAlmostEqual(alpha.item(), 1.4032, delta=0.001)
self.assertAlmostEqual(beta.item(), 16.4427, delta=0.001)
def test_adaptive_vectorized_alpha_beta_computation(self):
model = self.SampleLogisticRegressionModel()
theta_0_key = model.theta_0()
nw_proposer = SingleSiteRealSpaceNewtonianMonteCarloProposer(theta_0_key)
nw_proposer.learning_rate_ = tensor([0.0416, 0.0583], dtype=torch.float64)
nw_proposer.running_mean_, nw_proposer.running_var_ = (
tensor([0.079658, 0.089861]),
tensor([0.0039118, 0.0041231]),
)
nw_proposer.accepted_samples_ = 37
alpha, beta = nw_proposer.compute_beta_priors_from_accepted_lr()
self.assertListEqual(
[round(x.item(), 4) for x in list(nw_proposer.running_mean_)],
[0.0786, 0.089],
)
self.assertListEqual(
[round(x.item(), 4) for x in list(nw_proposer.running_var_)],
[0.0038, 0.004],
)
self.assertListEqual(
[round(x.item(), 4) for x in list(alpha)], [1.4032, 1.6984]
)
self.assertListEqual(
[round(x.item(), 4) for x in list(beta)], [16.4427, 17.3829]
)
| beanmachine-main | tests/ppl/inference/proposer/nmc/single_site_real_space_newtonian_monte_carlo_proposer_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for fix_problems.py"""
import unittest
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.fix_problems import fix_problems
from beanmachine.ppl.compiler.gen_dot import to_dot
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from torch import tensor
class FixProblemsTest(unittest.TestCase):
def test_fix_problems_01(self) -> None:
# Problems that need to be fixed:
#
# * Single-valued tensors are used in contexts where scalars are needed.
# * A multiplication of 0.5 by a probability (from a beta) is used both
# as a probability (by a Bernoulli) and a real (by a normal).
#
# The solutions:
#
# * The constants are replaced by constants of the appropriate kinds.
# * A to-real node is inserted between the multiplication and the normal.
#
self.maxDiff = None
bmg = BMGraphBuilder()
one = bmg.add_constant(tensor(1.0))
two = bmg.add_constant(tensor(2.0))
half = bmg.add_constant(tensor(0.5))
beta = bmg.add_beta(two, two)
betas = bmg.add_sample(beta)
mult = bmg.add_multiplication(half, betas)
norm = bmg.add_normal(mult, one)
bern = bmg.add_bernoulli(mult)
bmg.add_sample(norm)
bmg.add_sample(bern)
bmg.add_query(mult, RVIdentifier(wrapper=lambda a, b: a, arguments=(1, 1)))
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N00[label="0.5:P"];
N01[label="2.0:N"];
N02[label="Beta:P"];
N03[label="Sample:P"];
N04[label="*:P"];
N05[label="1.0:OH"];
N06[label="Normal:R"];
N07[label="Sample:R"];
N08[label="Bernoulli:B"];
N09[label="Sample:B"];
N10[label="Query:P"];
N00 -> N04[label="left:P"];
N01 -> N02[label="alpha:R+"];
N01 -> N02[label="beta:R+"];
N02 -> N03[label="operand:P"];
N03 -> N04[label="right:P"];
N04 -> N06[label="mu:R"];
N04 -> N08[label="probability:P"];
N04 -> N10[label="operator:any"];
N05 -> N06[label="sigma:R+"];
N06 -> N07[label="operand:R"];
N08 -> N09[label="operand:B"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
bmg, error_report = fix_problems(bmg)
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N00[label="0.5:P"];
N01[label="2.0:N"];
N02[label="1.0:OH"];
N03[label="0.5:P"];
N04[label="2.0:R+"];
N05[label="Beta:P"];
N06[label="Sample:P"];
N07[label="*:P"];
N08[label="ToReal:R"];
N09[label="1.0:R+"];
N10[label="Normal:R"];
N11[label="Sample:R"];
N12[label="Bernoulli:B"];
N13[label="Sample:B"];
N14[label="Query:P"];
N03 -> N07[label="left:P"];
N04 -> N05[label="alpha:R+"];
N04 -> N05[label="beta:R+"];
N05 -> N06[label="operand:P"];
N06 -> N07[label="right:P"];
N07 -> N08[label="operand:<=R"];
N07 -> N12[label="probability:P"];
N07 -> N14[label="operator:any"];
N08 -> N10[label="mu:R"];
N09 -> N10[label="sigma:R+"];
N10 -> N11[label="operand:R"];
N12 -> N13[label="operand:B"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_problems_2(self) -> None:
"""test_fix_problems_2"""
# Problems that need to be fixed:
#
# * Single-valued tensors are used in contexts where scalars are needed.
# * A Boolean (from a Bernoulli) is used in an addition to make a positive real.
# * The Boolean is also used as a real and a natural.
#
# The solutions:
#
# * The constants are replaced by constants of the appropriate kinds.
# * A to-positive-real node is inserted between the addition and the Bernoulli.
# * A to-real node is inserted between the normal and the Bernoulli
# * An if-then-else is inserted to make the Bernoulli into a natural.
#
self.maxDiff = None
bmg = BMGraphBuilder()
# @rv def bern():
# return Bernoulli(tensor(0.5))
# @rv def norm():
# return Normal(bern(), bern() + tensor(1.0))
# @rv def bino():
# return Binomial(bern(), 0.5)
one = bmg.add_constant(tensor(1.0))
half = bmg.add_constant(tensor(0.5))
bern = bmg.add_bernoulli(half)
berns = bmg.add_sample(bern)
plus = bmg.add_addition(berns, one)
norm = bmg.add_normal(berns, plus)
bino = bmg.add_binomial(berns, half)
bmg.add_sample(norm)
bmg.add_sample(bino)
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N0[label="0.5:P"];
N1[label="Bernoulli:B"];
N2[label="Sample:B"];
N3[label="1.0:OH"];
N4[label="+:R+"];
N5[label="Normal:R"];
N6[label="Sample:R"];
N7[label="Binomial:N"];
N8[label="Sample:N"];
N0 -> N1[label="probability:P"];
N0 -> N7[label="probability:P"];
N1 -> N2[label="operand:B"];
N2 -> N4[label="left:R+"];
N2 -> N5[label="mu:R"];
N2 -> N7[label="count:N"];
N3 -> N4[label="right:R+"];
N4 -> N5[label="sigma:R+"];
N5 -> N6[label="operand:R"];
N7 -> N8[label="operand:N"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
bmg, error_report = fix_problems(bmg)
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N00[label="0.5:P"];
N01[label="1.0:OH"];
N02[label="0.5:P"];
N03[label="Bernoulli:B"];
N04[label="Sample:B"];
N05[label="ToReal:R"];
N06[label="ToPosReal:R+"];
N07[label="1.0:R+"];
N08[label="+:R+"];
N09[label="Normal:R"];
N10[label="Sample:R"];
N11[label="1:N"];
N12[label="0:N"];
N13[label="if:N"];
N14[label="Binomial:N"];
N15[label="Sample:N"];
N02 -> N03[label="probability:P"];
N02 -> N14[label="probability:P"];
N03 -> N04[label="operand:B"];
N04 -> N05[label="operand:<=R"];
N04 -> N06[label="operand:<=R+"];
N04 -> N13[label="condition:B"];
N05 -> N09[label="mu:R"];
N06 -> N08[label="left:R+"];
N07 -> N08[label="right:R+"];
N08 -> N09[label="sigma:R+"];
N09 -> N10[label="operand:R"];
N11 -> N13[label="consequence:N"];
N12 -> N13[label="alternative:N"];
N13 -> N14[label="count:N"];
N14 -> N15[label="operand:N"];
}"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_problems_3(self) -> None:
"""test_fix_problems_3"""
# This test has some problems that cannot be fixed.
#
# * Two-valued tensor constant used as probability
# * Negative number for standard deviation
# * Fraction used for count
# * Number greater than 1.0 used as probability
# @rv def bern():
# return Bernoulli(tensor([0.5, 0.5]))
# @rv def norm():
# return Normal(-1.0, -1.0)
# @rv def bino():
# return Binomial(3.14, 3.14)
self.maxDiff = None
bmg = BMGraphBuilder()
pi = bmg.add_constant(3.14)
mone = bmg.add_constant(-1.0)
half = bmg.add_constant(tensor([0.5, 0.5]))
bern = bmg.add_bernoulli(half)
norm = bmg.add_normal(mone, mone)
bino = bmg.add_binomial(pi, pi)
bmg.add_sample(bern)
bmg.add_sample(norm)
bmg.add_sample(bino)
bmg, error_report = fix_problems(bmg)
observed = str(error_report)
expected = """
The count of a binomial is required to be a natural but is a positive real.
The probability of a binomial is required to be a probability but is a positive real.
The sigma of a normal is required to be a positive real but is a negative real.
"""
self.assertEqual(observed.strip(), expected.strip())
def test_fix_problems_4(self) -> None:
"""test_fix_problems_4"""
# The problem we have here is:
#
# * Multiplication is only defined on probability or larger
# * We have a multiplication of a bool by a natural
# * We require a natural.
#
# In this scenario, the problem fixer turns the multplication
# into an if-then-else.
#
# @rv def berns():
# return Bernoulli(0.5)
# @rv def nats():
# return Binomial(2, 0.5)
# @rv def bino():
# return Binomial(berns() * nats(), 0.5)
self.maxDiff = None
bmg = BMGraphBuilder()
two = bmg.add_natural(2)
half = bmg.add_probability(0.5)
bern = bmg.add_bernoulli(half)
berns = bmg.add_sample(bern)
nat = bmg.add_binomial(two, half)
nats = bmg.add_sample(nat)
mult = bmg.add_multiplication(berns, nats)
bino = bmg.add_binomial(mult, half)
bmg.add_sample(bino)
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N0[label="0.5:P"];
N1[label="Bernoulli:B"];
N2[label="Sample:B"];
N3[label="2:N"];
N4[label="Binomial:N"];
N5[label="Sample:N"];
N6[label="*:R+"];
N7[label="Binomial:N"];
N8[label="Sample:N"];
N0 -> N1[label="probability:P"];
N0 -> N4[label="probability:P"];
N0 -> N7[label="probability:P"];
N1 -> N2[label="operand:B"];
N2 -> N6[label="left:R+"];
N3 -> N4[label="count:N"];
N4 -> N5[label="operand:N"];
N5 -> N6[label="right:R+"];
N6 -> N7[label="count:N"];
N7 -> N8[label="operand:N"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
bmg, error_report = fix_problems(bmg)
self.assertEqual("", str(error_report).strip())
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N00[label="0.5:P"];
N01[label="Bernoulli:B"];
N02[label="Sample:B"];
N03[label="2:N"];
N04[label="Binomial:N"];
N05[label="Sample:N"];
N06[label="*:R+"];
N07[label="0:N"];
N08[label="if:N"];
N09[label="Binomial:N"];
N10[label="Sample:N"];
N11[label="0.0:Z"];
N00 -> N01[label="probability:P"];
N00 -> N04[label="probability:P"];
N00 -> N09[label="probability:P"];
N01 -> N02[label="operand:B"];
N02 -> N06[label="left:R+"];
N02 -> N08[label="condition:B"];
N03 -> N04[label="count:N"];
N04 -> N05[label="operand:N"];
N05 -> N06[label="right:R+"];
N05 -> N08[label="consequence:N"];
N07 -> N08[label="alternative:N"];
N08 -> N09[label="count:N"];
N09 -> N10[label="operand:N"];
}"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_problems_5(self) -> None:
"""test_fix_problems_5"""
# Division becomes power.
self.maxDiff = None
bmg = BMGraphBuilder()
# @rv def hcs(n):
# return HalfCauchy(1.0)
# @rv def norm():
# return Normal(log(hcs(3) ** (hcs(1) / hcs(2))), 1.0)
one = bmg.add_constant(1.0)
hc = bmg.add_halfcauchy(one)
hcs1 = bmg.add_sample(hc)
hcs2 = bmg.add_sample(hc)
hcs3 = bmg.add_sample(hc)
q = bmg.add_division(hcs1, hcs2)
p = bmg.add_power(hcs3, q)
lg = bmg.add_log(p)
norm = bmg.add_normal(lg, one)
bmg.add_sample(norm)
bmg, error_report = fix_problems(bmg)
observed = str(error_report)
expected = ""
self.assertEqual(observed.strip(), expected.strip())
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N00[label="1.0:OH"];
N01[label="1.0:R+"];
N02[label="HalfCauchy:R+"];
N03[label="Sample:R+"];
N04[label="Sample:R+"];
N05[label="/:U"];
N06[label="Sample:R+"];
N07[label="-1.0:R"];
N08[label="**:R+"];
N09[label="*:R+"];
N10[label="**:R+"];
N11[label="Log:R"];
N12[label="Normal:R"];
N13[label="Sample:R"];
N14[label="-1.0:R-"];
N01 -> N02[label="scale:R+"];
N01 -> N12[label="sigma:R+"];
N02 -> N03[label="operand:R+"];
N02 -> N04[label="operand:R+"];
N02 -> N06[label="operand:R+"];
N03 -> N05[label="left:any"];
N03 -> N09[label="left:R+"];
N04 -> N05[label="right:any"];
N04 -> N08[label="left:R+"];
N06 -> N10[label="left:R+"];
N07 -> N08[label="right:R"];
N08 -> N09[label="right:R+"];
N09 -> N10[label="right:R+"];
N10 -> N11[label="operand:R+"];
N11 -> N12[label="mu:R"];
N12 -> N13[label="operand:R"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_problems_6(self) -> None:
"""test_fix_problems_6"""
# This test shows that we can rewrite a division by a constant
# into a multiplication.
self.maxDiff = None
bmg = BMGraphBuilder()
# def hcs(): return HalfCauchy(1.0)
# def norm(): return Normal(hcs() / 2.5, 1.0)
one = bmg.add_constant(1.0)
two = bmg.add_constant(2.5)
hc = bmg.add_halfcauchy(one)
hcs = bmg.add_sample(hc)
q = bmg.add_division(hcs, two)
norm = bmg.add_normal(q, one)
bmg.add_sample(norm)
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N0[label="1.0:OH"];
N1[label="HalfCauchy:R+"];
N2[label="Sample:R+"];
N3[label="2.5:R+"];
N4[label="/:U"];
N5[label="Normal:U"];
N6[label="Sample:U"];
N0 -> N1[label="scale:R+"];
N0 -> N5[label="sigma:any"];
N1 -> N2[label="operand:R+"];
N2 -> N4[label="left:any"];
N3 -> N4[label="right:any"];
N4 -> N5[label="mu:any"];
N5 -> N6[label="operand:any"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
bmg, error_report = fix_problems(bmg)
self.assertEqual("", str(error_report).strip())
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N00[label="1.0:OH"];
N01[label="1.0:R+"];
N02[label="HalfCauchy:R+"];
N03[label="Sample:R+"];
N04[label="2.5:R+"];
N05[label="/:U"];
N06[label="0.4:R+"];
N07[label="*:R+"];
N08[label="ToReal:R"];
N09[label="Normal:R"];
N10[label="Sample:R"];
N11[label="0.4:P"];
N01 -> N02[label="scale:R+"];
N01 -> N09[label="sigma:R+"];
N02 -> N03[label="operand:R+"];
N03 -> N05[label="left:any"];
N03 -> N07[label="left:R+"];
N04 -> N05[label="right:any"];
N06 -> N07[label="right:R+"];
N07 -> N08[label="operand:<=R"];
N08 -> N09[label="mu:R"];
N09 -> N10[label="operand:R"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_problems_7(self) -> None:
"""test_fix_problems_7"""
# The problem here is that we have two uniform distributions that
# we cannot turn into a flat distribution, and one we can. We therefore
# expect that we will get two errors.
self.maxDiff = None
bmg = BMGraphBuilder()
# @rv def foo1():
# return Uniform(0.0, 1.0) # OK
# @rv def foo2():
# return Uniform(1.0, 2.0) # Bad
# @rv def foo3():
# return Uniform(0.0, foo2()) # Bad
zero = bmg.add_constant(0.0)
one = bmg.add_constant(1.0)
two = bmg.add_constant(2.0)
foo1 = bmg.add_uniform(zero, one)
bmg.add_sample(foo1)
foo2 = bmg.add_uniform(one, two)
foo2s = bmg.add_sample(foo2)
foo3 = bmg.add_uniform(one, foo2s)
bmg.add_sample(foo3)
bmg, error_report = fix_problems(bmg)
observed = str(error_report)
expected = """
The model uses a uniform operation unsupported by Bean Machine Graph.
The unsupported node is the operand of a sample.
The model uses a uniform operation unsupported by Bean Machine Graph.
The unsupported node is the operand of a sample.
"""
self.assertEqual(observed.strip(), expected.strip())
def test_fix_problems_8(self) -> None:
"""test_fix_problems_8"""
# This test shows that we can rewrite a chi2 into a gamma.
self.maxDiff = None
bmg = BMGraphBuilder()
# @rv def hcs():
# return HalfCauchy(1.0)
# @rv def chi2():
# return Chi2(hcs())
one = bmg.add_constant(1.0)
hc = bmg.add_halfcauchy(one)
hcs = bmg.add_sample(hc)
chi2 = bmg.add_chi2(hcs)
bmg.add_sample(chi2)
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N0[label="1.0:OH"];
N1[label="HalfCauchy:R+"];
N2[label="Sample:R+"];
N3[label="Chi2:U"];
N4[label="Sample:U"];
N0 -> N1[label="scale:R+"];
N1 -> N2[label="operand:R+"];
N2 -> N3[label="df:any"];
N3 -> N4[label="operand:any"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
bmg, error_report = fix_problems(bmg)
self.assertEqual("", str(error_report).strip())
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N0[label="1.0:OH"];
N1[label="1.0:R+"];
N2[label="HalfCauchy:R+"];
N3[label="Sample:R+"];
N4[label="Chi2:U"];
N5[label="0.5:R+"];
N6[label="*:R+"];
N7[label="Gamma:R+"];
N8[label="Sample:R+"];
N1 -> N2[label="scale:R+"];
N2 -> N3[label="operand:R+"];
N3 -> N4[label="df:any"];
N3 -> N6[label="left:R+"];
N5 -> N6[label="right:R+"];
N5 -> N7[label="rate:R+"];
N6 -> N7[label="concentration:R+"];
N7 -> N8[label="operand:R+"];
}"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_problems_9(self) -> None:
"""test_fix_problems_9"""
# The problem we have here is that natural raised to bool
# is not supported in BMG without converting both to
# positive real, but natural raised to bool is plainly
# natural. We generate an if-then-else.
# @rv def berns():
# return Bernoulli(0.5)
# @rv def nats():
# return Binomial(2, 0.5)
# @rv def bino():
# return Binomial(nats() ** berns(), 0.5)
self.maxDiff = None
bmg = BMGraphBuilder()
two = bmg.add_natural(2)
half = bmg.add_probability(0.5)
bern = bmg.add_bernoulli(half)
berns = bmg.add_sample(bern)
nat = bmg.add_binomial(two, half)
nats = bmg.add_sample(nat)
powr = bmg.add_power(nats, berns)
bino = bmg.add_binomial(powr, half)
bmg.add_sample(bino)
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N0[label="2:N"];
N1[label="0.5:P"];
N2[label="Binomial:N"];
N3[label="Sample:N"];
N4[label="Bernoulli:B"];
N5[label="Sample:B"];
N6[label="**:R+"];
N7[label="Binomial:N"];
N8[label="Sample:N"];
N0 -> N2[label="count:N"];
N1 -> N2[label="probability:P"];
N1 -> N4[label="probability:P"];
N1 -> N7[label="probability:P"];
N2 -> N3[label="operand:N"];
N3 -> N6[label="left:R+"];
N4 -> N5[label="operand:B"];
N5 -> N6[label="right:R+"];
N6 -> N7[label="count:N"];
N7 -> N8[label="operand:N"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
bmg, error_report = fix_problems(bmg)
self.assertEqual("", str(error_report).strip())
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N00[label="2:N"];
N01[label="0.5:P"];
N02[label="Binomial:N"];
N03[label="Sample:N"];
N04[label="Bernoulli:B"];
N05[label="Sample:B"];
N06[label="**:R+"];
N07[label="1:N"];
N08[label="if:N"];
N09[label="Binomial:N"];
N10[label="Sample:N"];
N11[label="1.0:OH"];
N00 -> N02[label="count:N"];
N01 -> N02[label="probability:P"];
N01 -> N04[label="probability:P"];
N01 -> N09[label="probability:P"];
N02 -> N03[label="operand:N"];
N03 -> N06[label="left:R+"];
N03 -> N08[label="consequence:N"];
N04 -> N05[label="operand:B"];
N05 -> N06[label="right:R+"];
N05 -> N08[label="condition:B"];
N07 -> N08[label="alternative:N"];
N08 -> N09[label="count:N"];
N09 -> N10[label="operand:N"];
}"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_problems_10(self) -> None:
"""test_fix_problems_10"""
# Demonstrate that we can rewrite 1-p for probability p into
# complement(p) -- which is of type P -- instead of
# add(1, negate(p)) which is of type R.
# TODO: Also demonstrate that this works for 1-b
# TODO: Get this working for the "not" operator, since 1-b
# and "not b" are the same thing for bool b.
self.maxDiff = None
bmg = BMGraphBuilder()
# @rv def beta():
# return Beta(2.0, 2.0)
# @rv def bern():
# return Bernoulli(1 - beta()) # good!
one = bmg.add_constant(1.0)
two = bmg.add_constant(2.0)
beta = bmg.add_beta(two, two)
betas = bmg.add_sample(beta)
negate = bmg.add_negate(betas)
complement = bmg.add_addition(one, negate)
bern = bmg.add_bernoulli(complement)
bmg.add_sample(bern)
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N0[label="1.0:OH"];
N1[label="2.0:N"];
N2[label="Beta:P"];
N3[label="Sample:P"];
N4[label="-:R-"];
N5[label="+:R"];
N6[label="Bernoulli:B"];
N7[label="Sample:B"];
N0 -> N5[label="left:R"];
N1 -> N2[label="alpha:R+"];
N1 -> N2[label="beta:R+"];
N2 -> N3[label="operand:P"];
N3 -> N4[label="operand:R+"];
N4 -> N5[label="right:R"];
N5 -> N6[label="probability:P"];
N6 -> N7[label="operand:B"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
bmg, error_report = fix_problems(bmg)
self.assertEqual("", str(error_report).strip())
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N0[label="2.0:N"];
N1[label="1.0:OH"];
N2[label="2.0:R+"];
N3[label="Beta:P"];
N4[label="Sample:P"];
N5[label="-:R-"];
N6[label="+:R"];
N7[label="complement:P"];
N8[label="Bernoulli:B"];
N9[label="Sample:B"];
N1 -> N6[label="left:R"];
N2 -> N3[label="alpha:R+"];
N2 -> N3[label="beta:R+"];
N3 -> N4[label="operand:P"];
N4 -> N5[label="operand:R+"];
N4 -> N7[label="operand:P"];
N5 -> N6[label="right:R"];
N7 -> N8[label="probability:P"];
N8 -> N9[label="operand:B"];
}"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_problems_11(self) -> None:
"""test_fix_problems_11"""
# Here we demonstrate that we treat the negative log of a
# probability as a positive real. (In a previous iteration
# we generated a special negative log node, but now we can
# do it directly without fixing up the graph.)
# @rv def beta1():
# return Beta(2.0, 2.0)
# @rv def beta2():
# return Beta(-beta1.log(), 2.0)
self.maxDiff = None
bmg = BMGraphBuilder()
two = bmg.add_constant(2.0)
beta1 = bmg.add_beta(two, two)
beta1s = bmg.add_sample(beta1)
logprob = bmg.add_log(beta1s)
neglogprob = bmg.add_negate(logprob)
beta2 = bmg.add_beta(neglogprob, two)
bmg.add_sample(beta2)
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N0[label="2.0:N"];
N1[label="Beta:P"];
N2[label="Sample:P"];
N3[label="Log:R-"];
N4[label="-:R+"];
N5[label="Beta:P"];
N6[label="Sample:P"];
N0 -> N1[label="alpha:R+"];
N0 -> N1[label="beta:R+"];
N0 -> N5[label="beta:R+"];
N1 -> N2[label="operand:P"];
N2 -> N3[label="operand:P"];
N3 -> N4[label="operand:R-"];
N4 -> N5[label="alpha:R+"];
N5 -> N6[label="operand:P"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
bmg, error_report = fix_problems(bmg)
self.assertEqual("", str(error_report).strip())
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N0[label="2.0:N"];
N1[label="2.0:R+"];
N2[label="Beta:P"];
N3[label="Sample:P"];
N4[label="Log:R-"];
N5[label="-:R+"];
N6[label="Beta:P"];
N7[label="Sample:P"];
N1 -> N2[label="alpha:R+"];
N1 -> N2[label="beta:R+"];
N1 -> N6[label="beta:R+"];
N2 -> N3[label="operand:P"];
N3 -> N4[label="operand:P"];
N4 -> N5[label="operand:R-"];
N5 -> N6[label="alpha:R+"];
N6 -> N7[label="operand:P"];
}"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_problems_12(self) -> None:
"""test_fix_problems_12"""
# We flag impossible observations as errors.
self.maxDiff = None
bmg = BMGraphBuilder()
# @rv def bern():
# return Bernoulli(0.5)
# @rv def bino():
# return Binomial(2, 0.5)
# @rv def norm():
# return Normal(0, 1)
zero = bmg.add_constant(0.0)
one = bmg.add_constant(1.0)
two = bmg.add_constant(2.0)
half = bmg.add_constant(0.5)
bern = bmg.add_bernoulli(half)
berns = bmg.add_sample(bern)
bino = bmg.add_binomial(two, half)
binos = bmg.add_sample(bino)
norm = bmg.add_normal(zero, one)
norms = bmg.add_sample(norm)
bmg.add_observation(berns, -1.5) # Bad
bmg.add_observation(binos, 5.25) # Bad
bmg.add_observation(norms, True) # OK; can be converted to 1.0
bmg, error_report = fix_problems(bmg)
observed = str(error_report)
expected = """
A Bernoulli distribution is observed to have value -1.5 but only produces samples of type bool.
A binomial distribution is observed to have value 5.25 but only produces samples of type natural.
"""
self.assertEqual(observed.strip(), expected.strip())
def test_fix_problems_13(self) -> None:
"""test_fix_problems_13"""
# Observations of the wrong type are fixed up.
self.maxDiff = None
bmg = BMGraphBuilder()
# @rv def bern():
# return Bernoulli(0.5)
# @rv def bino():
# return Binomial(2, 0.5)
# @rv def norm():
# return Normal(0, 1)
zero = bmg.add_constant(0.0)
one = bmg.add_constant(1.0)
two = bmg.add_constant(2.0)
half = bmg.add_constant(0.5)
bern = bmg.add_bernoulli(half)
berns = bmg.add_sample(bern)
bino = bmg.add_binomial(two, half)
binos = bmg.add_sample(bino)
norm = bmg.add_normal(zero, one)
norms = bmg.add_sample(norm)
bmg.add_observation(berns, 0.0) # Should be bool
bmg.add_observation(binos, 5.0) # Should be int
bmg.add_observation(norms, True) # Should be real
bmg, error_report = fix_problems(bmg)
self.assertEqual(str(error_report).strip(), "")
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
# The observations have been converted to the correct types:
expected = """
digraph "graph" {
N00[label="0.5:P"];
N01[label="0.5:P"];
N02[label="Bernoulli:B"];
N03[label="Sample:B"];
N04[label="Observation False:B"];
N05[label="2.0:N"];
N06[label="2:N"];
N07[label="Binomial:N"];
N08[label="Sample:N"];
N09[label="Observation 5:N"];
N10[label="0.0:Z"];
N11[label="1.0:OH"];
N12[label="0.0:R"];
N13[label="1.0:R+"];
N14[label="Normal:R"];
N15[label="Sample:R"];
N16[label="Observation 1.0:R"];
N01 -> N02[label="probability:P"];
N01 -> N07[label="probability:P"];
N02 -> N03[label="operand:B"];
N03 -> N04[label="operand:any"];
N06 -> N07[label="count:N"];
N07 -> N08[label="operand:N"];
N08 -> N09[label="operand:any"];
N12 -> N14[label="mu:R"];
N13 -> N14[label="sigma:R+"];
N14 -> N15[label="operand:R"];
N15 -> N16[label="operand:any"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_problems_14(self) -> None:
"""test_fix_problems_14"""
# Fixes for problems involving negative reals.
self.maxDiff = None
bmg = BMGraphBuilder()
# Right now the only node we have of type negative real is
# a constant; if we force a scenario where a negative real
# constant is used in a context where a real is needed,
# we generate a new real constant.
m = bmg.add_neg_real(-1.0)
s = bmg.add_pos_real(1.0)
norm = bmg.add_normal(m, s)
bmg.add_sample(norm)
bmg, error_report = fix_problems(bmg)
self.assertEqual(str(error_report).strip(), "")
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
)
expected = """
digraph "graph" {
N0[label="-1.0:R-"];
N1[label="-1.0:R"];
N2[label="1.0:R+"];
N3[label="Normal:R"];
N4[label="Sample:R"];
N1 -> N3[label="mu:R"];
N2 -> N3[label="sigma:R+"];
N3 -> N4[label="operand:R"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/fix_problems_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# BM -> BMG compiler index tests
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli, Binomial, HalfCauchy, Normal
# Simplexes are tested in dirichlet_test.py
# TODO: Test array of Booleans
@bm.random_variable
def flip():
return Bernoulli(0.5)
@bm.random_variable
def real():
return Normal(tensor([1.5, -1.5])[flip()], 1.0)
@bm.random_variable
def pos_real():
return Normal(0.0, tensor([1.5, 2.5])[flip()])
@bm.random_variable
def neg_real():
return Bernoulli(tensor([-1.5, -2.5])[flip()].exp())
@bm.random_variable
def prob():
return Bernoulli(tensor([0.5, 0.25])[flip()])
@bm.random_variable
def natural():
return Binomial(tensor([2, 3])[flip()], 0.75)
@bm.random_variable
def normal():
return Normal(0.0, 1.0)
@bm.random_variable
def hc():
return HalfCauchy(0.0)
@bm.random_variable
def optimize_away_index():
t = tensor([normal(), hc()])
return Normal(t[0], t[1])
@bm.functional
def column_index():
t = tensor([[normal(), hc()], [hc(), normal()]])
return t[flip()][flip()]
@bm.functional
def tuple_index_0():
# Normal tensor, normal tuple index
t = tensor([[2.0, 3.0], [4.0, 5.0]])
return flip() * t[(1, 1)]
@bm.functional
def tuple_index_1():
# Normal tensor, stochastic tuple index
t = tensor([[2.0, 3.0], [4.0, 5.0]])
return t[flip(), flip()]
@bm.functional
def tuple_index_2():
# Stochastic tensor, normal tuple index
t = tensor([[normal(), hc()], [hc(), normal()]])
return t[1, 1]
@bm.functional
def tuple_index_3():
# Stochastic tensor, stochastic tuple index
t = tensor([[normal(), hc()], [hc(), normal()]])
return t[flip(), flip()]
@bm.functional
def negative_constant_index():
# Python allows an index to be negative; it means to start counting from
# the other end. BMG does not. Verify that we give an error message.
# TODO: Consider allowing this if the index is a constant; we can do
# a transformation to t[1] here.
t = tensor([hc(), normal()])
return t[-1]
@bm.functional
def unsupported_slice_1():
t = tensor([hc(), normal()])
return t[1::]
@bm.functional
def unsupported_slice_2():
t = tensor([1.0, 2.0])
return t[flip() : :]
class IndexTest(unittest.TestCase):
def test_index_constant_vector_stochastic_index(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(
[pos_real(), real(), neg_real(), prob(), natural()],
{},
)
expected = """
digraph "graph" {
N00[label=0.5];
N01[label=Bernoulli];
N02[label=Sample];
N03[label=0.0];
N04[label="[1.5,2.5]"];
N05[label=1];
N06[label=0];
N07[label=if];
N08[label=index];
N09[label=Normal];
N10[label=Sample];
N11[label=Query];
N12[label="[1.5,-1.5]"];
N13[label=index];
N14[label=1.0];
N15[label=Normal];
N16[label=Sample];
N17[label=Query];
N18[label="[-1.5,-2.5]"];
N19[label=index];
N20[label=Exp];
N21[label=Bernoulli];
N22[label=Sample];
N23[label=Query];
N24[label="[0.5,0.25]"];
N25[label=index];
N26[label=Bernoulli];
N27[label=Sample];
N28[label=Query];
N29[label="[2,3]"];
N30[label=index];
N31[label=0.75];
N32[label=Binomial];
N33[label=Sample];
N34[label=Query];
N00 -> N01;
N01 -> N02;
N02 -> N07;
N03 -> N09;
N04 -> N08;
N05 -> N07;
N06 -> N07;
N07 -> N08;
N07 -> N13;
N07 -> N19;
N07 -> N25;
N07 -> N30;
N08 -> N09;
N09 -> N10;
N10 -> N11;
N12 -> N13;
N13 -> N15;
N14 -> N15;
N15 -> N16;
N16 -> N17;
N18 -> N19;
N19 -> N20;
N20 -> N21;
N21 -> N22;
N22 -> N23;
N24 -> N25;
N25 -> N26;
N26 -> N27;
N27 -> N28;
N29 -> N30;
N30 -> N32;
N31 -> N32;
N32 -> N33;
N33 -> N34;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_index_stochastic_tensor_constant_index(self) -> None:
self.maxDiff = None
# Here we demonstrate that we can make a tensor containing graph
# nodes and index into that with a constant; the indexing operation
# is optimized out.
observed = BMGInference().to_dot([optimize_away_index()], {})
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=0.0];
N5[label=HalfCauchy];
N6[label=Sample];
N7[label=Normal];
N8[label=Sample];
N9[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N7;
N4 -> N5;
N5 -> N6;
N6 -> N7;
N7 -> N8;
N8 -> N9;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_column_index(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot([column_index()], {})
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=0.0];
N05[label=HalfCauchy];
N06[label=Sample];
N07[label=0.5];
N08[label=Bernoulli];
N09[label=Sample];
N10[label=2];
N11[label=ToReal];
N12[label=ToMatrix];
N13[label=1];
N14[label=0];
N15[label=if];
N16[label=ColumnIndex];
N17[label=index];
N18[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N03 -> N12;
N03 -> N12;
N04 -> N05;
N05 -> N06;
N06 -> N11;
N07 -> N08;
N08 -> N09;
N09 -> N15;
N10 -> N12;
N10 -> N12;
N11 -> N12;
N11 -> N12;
N12 -> N16;
N13 -> N15;
N14 -> N15;
N15 -> N16;
N15 -> N17;
N16 -> N17;
N17 -> N18;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_tuple_index(self) -> None:
self.maxDiff = None
# Normal tensor, normal tuple index, so there should be no stochastic
# index operation in the graph:
observed = BMGInference().to_dot([tuple_index_0()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label=5];
N4[label=0];
N5[label=if];
N6[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N5;
N3 -> N5;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# Normal tensor, stochastic tuple index:
observed = BMGInference().to_dot([tuple_index_1()], {})
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Bernoulli];
N2[label=Sample];
N3[label="[[2.0,3.0],\\\\n[4.0,5.0]]"];
N4[label=1];
N5[label=0];
N6[label=if];
N7[label=ColumnIndex];
N8[label=index];
N9[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N6;
N3 -> N7;
N4 -> N6;
N5 -> N6;
N6 -> N7;
N6 -> N8;
N7 -> N8;
N8 -> N9;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# Stochastic tensor, normal tuple index. Note that in this case
# we optimize away the stochastic tensor entirely since the
# index is a constant.
observed = BMGInference().to_dot([tuple_index_2()], {})
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=0.0];
N5[label=HalfCauchy];
N6[label=Sample];
N7[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N7;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# Stochastic tensor, stochastic tuple index.
observed = BMGInference().to_dot([tuple_index_3()], {})
expected = """
digraph "graph" {
N00[label=0.0];
N01[label=1.0];
N02[label=Normal];
N03[label=Sample];
N04[label=0.0];
N05[label=HalfCauchy];
N06[label=Sample];
N07[label=0.5];
N08[label=Bernoulli];
N09[label=Sample];
N10[label=2];
N11[label=ToReal];
N12[label=ToMatrix];
N13[label=1];
N14[label=0];
N15[label=if];
N16[label=ColumnIndex];
N17[label=index];
N18[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N03 -> N12;
N03 -> N12;
N04 -> N05;
N05 -> N06;
N06 -> N11;
N07 -> N08;
N08 -> N09;
N09 -> N15;
N10 -> N12;
N10 -> N12;
N11 -> N12;
N11 -> N12;
N12 -> N16;
N13 -> N15;
N14 -> N15;
N15 -> N16;
N15 -> N17;
N16 -> N17;
N17 -> N18;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_negative_index(self) -> None:
self.maxDiff = None
with self.assertRaises(ValueError) as ex:
BMGInference().to_dot([negative_constant_index()], {})
self.assertEqual(
"The right of an index is required to be a natural but is a negative real.",
str(ex.exception),
)
def test_unsupported_slice(self) -> None:
self.maxDiff = None
with self.assertRaises(ValueError) as ex:
BMGInference().to_dot([unsupported_slice_1()], {})
self.assertEqual(
"Stochastic slices are not yet implemented.",
str(ex.exception),
)
with self.assertRaises(ValueError) as ex:
BMGInference().to_dot([unsupported_slice_2()], {})
self.assertEqual(
"Stochastic slices are not yet implemented.",
str(ex.exception),
)
| beanmachine-main | tests/ppl/compiler/index_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""End-to-end test for tutorial on GMM with Poisson number of components"""
# This file is a manual replica of the Bento tutorial with the same name
# TODO: The disabled test generates the following error:
# E TypeError: Distribution 'Poisson' is not supported by Bean Machine Graph.
# This will need to be fixed for OSS readiness task
import logging
import unittest
# Comments after imports suggest alternative comment style (for original tutorial)
import beanmachine.ppl as bm
import torch # from torch import manual_seed, tensor
import torch.distributions as dist # from torch.distributions import Bernoulli, Normal, Uniform
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
# This makes the results deterministic and reproducible.
logging.getLogger("beanmachine").setLevel(50)
torch.manual_seed(42)
# Model
class GaussianMixtureModel(object):
def __init__(self, K):
self.K = k
@bm.random_variable
def alpha(self, k):
return dist.Dirichlet(5 * torch.ones(k))
@bm.random_variable
def mu(self, c):
return dist.MultivariateNormal(
loc=torch.zeros(2), covariance_matrix=10.0 * torch.eye(2)
)
@bm.random_variable
def sigma(self, c):
return dist.Gamma(1, 10)
@bm.random_variable
def component(self, i):
alpha = self.alpha(self.K)
return dist.Categorical(alpha)
@bm.random_variable
def y(self, i):
c = self.component(i).item()
return dist.MultivariateNormal(
loc=self.mu(c), covariance_matrix=self.sigma(c) ** 2 * torch.eye(2) + 1e-3
)
# Creating sample data
n = 32 # num observations
k = 4 # true number of clusters
gmm = GaussianMixtureModel(K=4)
ground_truth = {
**{
gmm.alpha(k): torch.ones(k) * 1.0 / k,
},
**{gmm.mu(i): tensor(i % 2).float() for i in range(k)},
**{gmm.sigma(i): tensor(0.1) for i in range(k)},
**{gmm.component(i): tensor(i % k).float() for i in range(n)},
}
# [Visualization code in tutorial skipped]
# Inference parameters
num_samples = (
1 ###00 Sample size should not affect (the ability to find) compilation issues.
)
queries = (
[gmm.alpha(gmm.K)]
+ [gmm.component(j) for j in range(n)]
+ [gmm.mu(i) for i in range(k)]
+ [gmm.sigma(i) for i in range(k)]
)
observations = {
gmm.y(i): ground_truth[gmm.mu(ground_truth[gmm.component(i)].item())]
for i in range(n)
}
class tutorialGMMwith2DimensionsAnd4Components(unittest.TestCase):
def test_tutorial_GMM_with_2_dimensions_and_4_components(self) -> None:
"""Check BM and BMG inference both terminate"""
self.maxDiff = None
# Inference with BM
torch.manual_seed(
42
) # Note: Second time we seed. Could be a good tutorial style
mh = bm.CompositionalInference({...: bm.SingleSiteNewtonianMonteCarlo()})
mh.infer(
queries,
observations,
num_samples=num_samples,
num_chains=1,
)
self.assertTrue(True, msg="We just want to check this point is reached")
@unittest.skip("TODO: enable when passing")
def test_tutorial_GMM_with_2_dimensions_and_4_components_to_dot_cpp_python(
self,
) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(queries, observations)
expected = """
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_cpp(queries, observations)
expected = """
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_python(queries, observations)
expected = """
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/tutorial_GMM_with_2_dimensions_and_4_components_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Compare original and conjugate prior transformed
Beta-Bernoulli model"""
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli, Beta
class HeadsRateModel(object):
"""Original, untransformed model"""
@bm.random_variable
def theta(self):
return Beta(2.0, 2.0)
@bm.random_variable
def y(self, i):
return Bernoulli(self.theta())
def run(self):
queries = [self.theta()]
observations = {
self.y(0): tensor(0.0),
self.y(1): tensor(0.0),
self.y(2): tensor(1.0),
self.y(3): tensor(0.0),
}
num_samples = 1000
bmg = BMGInference()
skip_optimizations = set()
posterior = bmg.infer(
queries, observations, num_samples, 1, skip_optimizations=skip_optimizations
)
bmg_graph = bmg.to_dot(
queries, observations, num_samples, skip_optimizations=skip_optimizations
)
theta_samples = posterior[self.theta()][0]
return theta_samples, bmg_graph
class HeadsRateModelTransformed(object):
"""Conjugate Prior Transformed model"""
@bm.random_variable
def theta(self):
return Beta(2.0, 2.0)
@bm.random_variable
def y(self, i):
return Bernoulli(self.theta())
@bm.random_variable
def theta_transformed(self):
# Analytical posterior Beta(alpha + sum y_i, beta + n - sum y_i)
return Beta(2.0 + 1.0, 2.0 + (4.0 - 1.0))
def run(self):
# queries = [self.theta()]
queries_transformed = [self.theta_transformed()]
# observations = {
# self.y(0): tensor(0.0),
# self.y(1): tensor(0.0),
# self.y(2): tensor(1.0),
# self.y(3): tensor(0.0),
# }
observations_transformed = {}
num_samples = 1000
bmg = BMGInference()
# posterior = bmg.infer(queries, observations, num_samples)
posterior_transformed = bmg.infer(
queries_transformed, observations_transformed, num_samples
)
# theta_samples = posterior[self.theta](0)
bmg_graph = bmg.to_dot(queries_transformed, observations_transformed)
theta_samples_transformed = posterior_transformed[self.theta_transformed()][0]
return theta_samples_transformed, bmg_graph
class HeadsRateModelTest(unittest.TestCase):
def test_beta_bernoulli_conjugate_graph(self) -> None:
_, heads_rate_model_graph = HeadsRateModel().run()
_, heads_rate_model_transformed_graph = HeadsRateModelTransformed().run()
self.assertEqual(heads_rate_model_graph, heads_rate_model_transformed_graph)
| beanmachine-main | tests/ppl/compiler/fix_beta_bernoulli_basic_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import platform
import re
import unittest
import beanmachine.graph as graph
import beanmachine.ppl as bm
import beanmachine.ppl.compiler.performance_report as pr
from beanmachine.ppl.inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli, Beta
@bm.random_variable
def coin():
return Beta(2.0, 2.0)
@bm.random_variable
def flip():
return Bernoulli(coin())
def tidy(s):
s = re.sub(r"generated_at:.*\n", "generated_at: --\n", s)
s = re.sub(r"\d+ ms", "-- ms", s)
s = re.sub(r"\(\d+\)", "(--)", s)
return s
class PerfReportTest(unittest.TestCase):
def test_bmg_performance_report_1(self) -> None:
if platform.system() == "Windows":
self.skipTest("Disabling perf tests until flakiness is resolved")
# How to obtain the performance report from BMGInference
self.maxDiff = None
queries = [coin()]
observations = {flip(): tensor(1.0)}
num_samples = 1000
# We have an _infer method which returns both samples and a
# performance report.
_, report = BMGInference()._infer(queries, observations, num_samples)
# You can convert the report to a string:
observed = str(report)
expected = """
title: Bean Machine Graph performance report
generated_at: --
num_samples: 1000
algorithm: 3
seed: 5123401
node_count: 5
edge_count: 5
factor_count: 0
dist_count: 2
const_count: 1
op_count: 2
add_count: 0
det_supp_count: [0]
bmg_profiler_report: nmc_infer:(1) -- ms
initialize:(1) -- ms
collect_samples:(1) -- ms
"""
# Note that there are two profiler reports: one for time spent
# in the compiler and one for time spent in BMG inference.
#
# See next test for details of how to access the elements of the
# perf report and the profile reports
self.assertTrue(tidy(observed).strip().startswith(tidy(expected).strip()))
def test_bmg_performance_report_2(self) -> None:
if platform.system() == "Windows":
self.skipTest("Disabling perf tests until flakiness is resolved")
# How to use the performance reporter calling BMG directly
# rather than through BMGInference / BMGraphBuilder.
self.maxDiff = None
g = graph.Graph()
# Turn on data collection
g.collect_performance_data(True)
# Build a simple model:
#
# BETA(2, 2) --> SAMPLE --> BERNOULLI --> SAMPLE --> observe False
#
n0 = g.add_constant_pos_real(2.0)
n1 = g.add_distribution(
graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [n0, n0]
)
n2 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
n3 = g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [n2]
)
n4 = g.add_operator(graph.OperatorType.SAMPLE, [n3])
g.observe(n4, False)
g.query(n2)
# Run inference
num_samples = 1000
g.infer(num_samples, graph.InferenceType.NMC)
# Fetch raw perf data (JSON string)
js = g.performance_report()
# decode
perf_report = pr.json_to_perf_report(js)
# You can dump the entire report as a string. Notice that this
# version of the report does not include beanstalk compiler timings
# because of course we did not run the compiler in this test.
observed = str(perf_report)
expected = """
title: Bean Machine Graph performance report
generated_at: --
num_samples: 1000
algorithm: 3
seed: 5123401
node_count: 5
edge_count: 5
factor_count: 0
dist_count: 2
const_count: 1
op_count: 2
add_count: 0
det_supp_count: [0]
bmg_profiler_report: nmc_infer:(1) -- ms
initialize:(1) -- ms
collect_samples:(1) -- ms
step:(1000) -- ms
create_prop:(2000) -- ms
compute_grads:(--) -- ms
unattributed: -- ms
sample:(1000) -- ms
save_old:(1000) -- ms
eval:(1000) -- ms
clear_grads:(1000) -- ms
restore_old:(7) -- ms
unattributed: -- ms
collect_sample:(1000) -- ms
unattributed: -- ms
unattributed: -- ms
Total time: -- ms
"""
self.assertEqual(tidy(expected).strip(), tidy(observed).strip())
# Or you can look at each element programmatically:
self.assertEqual("Bean Machine Graph performance report", perf_report.title)
self.assertEqual(3, perf_report.algorithm)
self.assertEqual(num_samples, perf_report.num_samples)
self.assertEqual(5, perf_report.node_count)
self.assertEqual(2, perf_report.dist_count)
self.assertEqual(1, perf_report.const_count)
self.assertEqual(0, perf_report.factor_count)
self.assertEqual(2, perf_report.op_count)
self.assertEqual(0, perf_report.add_count)
self.assertEqual(5, perf_report.edge_count)
# You can also look at profiler elements programmatically.
#
# Ex: how much time do we spend initializing the inference algorithm
# data structures?
prof_report = perf_report.bmg_profiler_report
self.assertLess(0, prof_report.nmc_infer.total_time)
self.assertLess(0, prof_report.nmc_infer.initialize.total_time)
# How many times did we do a step?
self.assertEqual(1000, prof_report.nmc_infer.collect_samples.step.calls)
# Or you can dump just the profiler report as a string.
observed = str(prof_report)
expected = """
nmc_infer:(1) -- ms
initialize:(1) -- ms
collect_samples:(1) -- ms
step:(1000) -- ms
create_prop:(2000) -- ms
compute_grads:(--) -- ms
unattributed: -- ms
sample:(1000) -- ms
save_old:(1000) -- ms
eval:(1000) -- ms
clear_grads:(1000) -- ms
restore_old:(7) -- ms
unattributed: -- ms
collect_sample:(1000) -- ms
unattributed: -- ms
unattributed: -- ms
Total time: -- ms
"""
self.assertEqual(tidy(expected).strip(), tidy(observed).strip())
| beanmachine-main | tests/ppl/compiler/perf_report_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""End-to-end test of realistic coin flip model"""
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli, Beta
@bm.random_variable
def beta():
return Beta(2.0, 2.0)
@bm.random_variable
def flip(n):
return Bernoulli(beta())
class CoinFlipTest(unittest.TestCase):
def test_coin_flip_inference(self) -> None:
"""test_inference from coin_flip_test.py"""
# We've got a prior on the coin of Beta(2,2), so it is most
# likely to be actually fair, but still with some probability
# of being unfair in either direction.
#
# We flip the coin four times and get heads 25% of the time,
# so this is some evidence that the true fairness of the coin is
# closer to 25% than 50%.
#
# We sample 1000 times from the posterior and take the average;
# it should come out that the true fairness is now most likely
# to be around 37%.
self.maxDiff = None
queries = [beta()]
observations = {
flip(0): tensor(0.0),
flip(1): tensor(0.0),
flip(2): tensor(1.0),
flip(3): tensor(0.0),
}
num_samples = 1000
inference = BMGInference()
mcsamples = inference.infer(queries, observations, num_samples)
samples = mcsamples[beta()]
observed = samples.mean()
expected = 0.37
self.assertAlmostEqual(first=observed, second=expected, delta=0.05)
def test_coin_flip_to_dot_cpp_python(self) -> None:
self.maxDiff = None
queries = [beta()]
observations = {
flip(0): tensor(0.0),
flip(1): tensor(0.0),
flip(2): tensor(1.0),
flip(3): tensor(0.0),
}
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=Beta];
N02[label=Sample];
N03[label=Bernoulli];
N04[label=Sample];
N05[label="Observation False"];
N06[label=Sample];
N07[label="Observation False"];
N08[label=Sample];
N09[label="Observation True"];
N10[label=Sample];
N11[label="Observation False"];
N12[label=Query];
N00 -> N01;
N00 -> N01;
N01 -> N02;
N02 -> N03;
N02 -> N12;
N03 -> N04;
N03 -> N06;
N03 -> N08;
N03 -> N10;
N04 -> N05;
N06 -> N07;
N08 -> N09;
N10 -> N11;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_cpp(queries, observations)
expected = """
graph::Graph g;
uint n0 = g.add_constant_pos_real(2.0);
uint n1 = g.add_distribution(
graph::DistributionType::BETA,
graph::AtomicType::PROBABILITY,
std::vector<uint>({n0, n0}));
uint n2 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n3 = g.add_distribution(
graph::DistributionType::BERNOULLI,
graph::AtomicType::BOOLEAN,
std::vector<uint>({n2}));
uint n4 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
g.observe(n4, false);
uint n5 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
g.observe(n5, false);
uint n6 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
g.observe(n6, true);
uint n7 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
g.observe(n7, false);
uint q0 = g.query(n2);"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_python(queries, observations)
expected = """
from beanmachine import graph
from torch import tensor
g = graph.Graph()
n0 = g.add_constant_pos_real(2.0)
n1 = g.add_distribution(
graph.DistributionType.BETA,
graph.AtomicType.PROBABILITY,
[n0, n0],
)
n2 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
n3 = g.add_distribution(
graph.DistributionType.BERNOULLI,
graph.AtomicType.BOOLEAN,
[n2],
)
n4 = g.add_operator(graph.OperatorType.SAMPLE, [n3])
g.observe(n4, False)
n5 = g.add_operator(graph.OperatorType.SAMPLE, [n3])
g.observe(n5, False)
n6 = g.add_operator(graph.OperatorType.SAMPLE, [n3])
g.observe(n6, True)
n7 = g.add_operator(graph.OperatorType.SAMPLE, [n3])
g.observe(n7, False)
q0 = g.query(n2)"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/coin_flip_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch.distributions import HalfCauchy, Normal
@bm.random_variable
def hc(n):
return HalfCauchy(1)
@bm.random_variable
def normal():
return Normal(0, 1)
@bm.functional
def logprob():
# Demonstrate that we can apply operators other than
# sample to stochastic distributions.
normal_sample = normal() # Sample
normal_dist_1 = Normal(0, hc(1))
normal_dist_2 = Normal(0, hc(2))
# "instance receiver" form
weight_1 = normal_dist_1.log_prob(normal_sample)
# "static receiver" form
weight_2 = Normal.log_prob(normal_dist_2, normal_sample)
# Non-stochastic distribution, stochastic value
weight_3 = Normal(2, 3).log_prob(normal_sample)
return weight_1 + weight_2 + weight_3
class LogProbTest(unittest.TestCase):
def test_logprob(self) -> None:
self.maxDiff = None
queries = [logprob()]
observed = BMGInference().to_dot(queries, {})
expected = """
digraph "graph" {
N00[label=1.0];
N01[label=HalfCauchy];
N02[label=Sample];
N03[label=0.0];
N04[label=Normal];
N05[label=Sample];
N06[label=Sample];
N07[label=Normal];
N08[label=LogProb];
N09[label=Normal];
N10[label=LogProb];
N11[label=2.0];
N12[label=3.0];
N13[label=Normal];
N14[label=LogProb];
N15[label="+"];
N16[label=Query];
N00 -> N01;
N00 -> N04;
N01 -> N02;
N01 -> N06;
N02 -> N07;
N03 -> N04;
N03 -> N07;
N03 -> N09;
N04 -> N05;
N05 -> N08;
N05 -> N10;
N05 -> N14;
N06 -> N09;
N07 -> N08;
N08 -> N15;
N09 -> N10;
N10 -> N15;
N11 -> N13;
N12 -> N13;
N13 -> N14;
N14 -> N15;
N15 -> N16;
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/log_prob_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for poisson distribution"""
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference import BMGInference
from torch import tensor
from torch.distributions import Gamma, Poisson
@bm.random_variable
def poisson_1():
return Poisson(rate=0.5)
@bm.random_variable
def gamma_1():
return Gamma(1.0, 4.0)
@bm.random_variable
def poisson_2():
return Poisson(rate=gamma_1())
@bm.random_variable
def poisson_3():
return Poisson(rate=-1 * gamma_1())
@bm.random_variable
def poisson_4():
return Poisson(rate=tensor([1.0, 2.0]))
class distributionPoissonTest(unittest.TestCase):
def test_graphs_poisson_with_constant_rate(self) -> None:
self.maxDiff = None
queries = [poisson_1()]
observations = {}
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N0[label=0.5];
N1[label=Poisson];
N2[label=Sample];
N3[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed_cpp = BMGInference().to_cpp(queries, observations)
expected_cpp = """
graph::Graph g;
uint n0 = g.add_constant_pos_real(0.5);
uint n1 = g.add_distribution(
graph::DistributionType::POISSON,
graph::AtomicType::NATURAL,
std::vector<uint>({n0}));
uint n2 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint q0 = g.query(n2);
"""
self.assertEqual(expected_cpp.strip(), observed_cpp.strip())
observed_python = BMGInference().to_python(queries, observations)
expected_python = """
from beanmachine import graph
from torch import tensor
g = graph.Graph()
n0 = g.add_constant_pos_real(0.5)
n1 = g.add_distribution(
graph.DistributionType.POISSON,
graph.AtomicType.NATURAL,
[n0],
)
n2 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
q0 = g.query(n2)
"""
self.assertEqual(expected_python.strip(), observed_python.strip())
def test_poisson_rate_with_sample_from_distribution(self) -> None:
self.maxDiff = None
queries = [poisson_2()]
observations = {}
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N0[label=1.0];
N1[label=4.0];
N2[label=Gamma];
N3[label=Sample];
N4[label=Poisson];
N5[label=Sample];
N6[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_poisson_tensor_input(self) -> None:
self.maxDiff = None
queries = [poisson_4()]
observations = {}
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N0[label=1.0];
N1[label=Poisson];
N2[label=Sample];
N3[label=2.0];
N4[label=Poisson];
N5[label=Sample];
N6[label=2];
N7[label=1];
N8[label=ToMatrix];
N9[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N8;
N3 -> N4;
N4 -> N5;
N5 -> N8;
N6 -> N8;
N7 -> N8;
N8 -> N9;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_poisson_rate_error_reporting(self) -> None:
self.maxDiff = None
queries = [poisson_3()]
observations = {}
with self.assertRaises(ValueError) as ex:
BMGInference().to_dot(queries, observations)
self.assertEqual(
str(ex.exception),
"The rate of a Poisson is required to be a positive real but is a negative real.\n"
"The Poisson was created in function call poisson_3().",
msg="Poisson distribution with non-positive real rates should throw an exception.",
)
| beanmachine-main | tests/ppl/compiler/distribution_poisson_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""A basic unit test for the Python interface of the BMG C++ Graph.infer method"""
import unittest
import beanmachine.ppl as bm
from beanmachine.graph import InferenceType
from beanmachine.ppl.inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli, Dirichlet
@bm.functional
def c():
return tensor(2.5)
@bm.functional
def c2():
return tensor([1.5, -2.5])
@bm.random_variable
def flip():
return Bernoulli(0.5)
@bm.functional
def flip2():
return flip()
@bm.functional
def flip3():
return flip() + 0
@bm.functional
def flip4():
return 0 + flip()
@bm.functional
def always_false_1():
return 1 < flip()
@bm.functional
def always_false_2():
return flip() < 0
@bm.functional
def invalid_tensor_1():
return tensor([])
@bm.functional
def invalid_tensor_2():
return tensor([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
class BMGInferInterfaceTest(unittest.TestCase):
def test_infer_interface_constant_functional(self) -> None:
self.maxDiff = None
# First, let's check expected behavior from a regular BM inference method
samples = bm.SingleSiteNewtonianMonteCarlo().infer([c(), c2()], {}, 1, 1)
observed = samples[c()]
expected = "tensor([[2.5000]])"
self.assertEqual(expected.strip(), str(observed).strip())
observed = samples[c2()]
expected = "tensor([[[ 1.5000, -2.5000]]])" # Note, no ", dtype=torch.float64)"
self.assertEqual(expected.strip(), str(observed).strip())
# Now let's do this in BMG Inference
samples = BMGInference().infer([c(), c2()], {}, 1, 1)
observed = samples[c()]
expected = "tensor([[2.5000]])"
self.assertEqual(expected.strip(), str(observed).strip())
observed = samples[c2()]
expected = "tensor([[[ 1.5000, -2.5000]]], dtype=torch.float64)"
self.assertEqual(expected.strip(), str(observed).strip())
# Again, let's check expected behavior from a regular BM inference method
samples = bm.SingleSiteNewtonianMonteCarlo().infer([c(), c2()], {}, 1, 2)
observed = samples[c()]
expected = """
tensor([[2.5000],
[2.5000]])"""
self.assertEqual(expected.strip(), str(observed).strip())
observed = samples[c2()]
expected = """
tensor([[[ 1.5000, -2.5000]],
[[ 1.5000, -2.5000]]])""" # Note, no ", dtype=torch.float64)"
self.assertEqual(expected.strip(), str(observed).strip())
# And again, in BMG inference
samples = BMGInference().infer([c(), c2()], {}, 1, 2)
observed = samples[c()]
expected = """
tensor([[2.5000],
[2.5000]])"""
self.assertEqual(expected.strip(), str(observed).strip())
observed = samples[c2()]
expected = """
tensor([[[ 1.5000, -2.5000]],
[[ 1.5000, -2.5000]]], dtype=torch.float64)"""
self.assertEqual(expected.strip(), str(observed).strip())
def test_infer_interface_redundant_functionals_1(self) -> None:
self.maxDiff = None
samples = BMGInference().infer([flip(), flip2()], {}, 10)
f = samples[flip()]
f2 = samples[flip2()]
self.assertEqual(str(f), str(f2))
samples = BMGInference().infer([always_false_1(), always_false_2()], {}, 2, 1)
af1 = samples[always_false_1()]
af2 = samples[always_false_2()]
expected = "tensor([[False, False]])"
self.assertEqual(expected, str(af1))
self.assertEqual(expected, str(af2))
def test_infer_interface_redundant_functionals_2(self) -> None:
self.maxDiff = None
samples = BMGInference().infer([flip3(), flip4()], {}, 10)
f3 = samples[flip3()]
f4 = samples[flip4()]
self.assertEqual(str(f3), str(f4))
def test_infer_interface_burn_in(self) -> None:
# Check default case when num_adaptive_samples = 0
num_samples = 25
num_adaptive_samples = 0
samples = BMGInference().infer([c(), c2()], {}, num_samples, 1)
observed = len(samples[c()][0])
expected = num_samples
self.assertEqual(expected, observed)
# Check case when num_adaptive_samples = 10
num_samples = 25
num_adaptive_samples = 10
samples = BMGInference().infer(
[c(), c2()], {}, num_samples, 1, num_adaptive_samples=num_adaptive_samples
)
observed = len(samples[c()][0])
expected = num_samples
self.assertEqual(expected, observed)
def test_infer_interface_nuts(self) -> None:
# Check default case when num_adaptive_samples = 0
num_samples = 25
num_adaptive_samples = 0
samples = BMGInference().infer(
[c(), c2()], {}, num_samples, 1, inference_type=InferenceType.NUTS
)
observed = len(samples[c()][0])
expected = num_samples
self.assertEqual(expected, observed)
# Check case when num_adaptive_samples = 10
num_samples = 25
num_adaptive_samples = 10
samples = BMGInference().infer(
[c(), c2()],
{},
num_samples,
1,
num_adaptive_samples=num_adaptive_samples,
inference_type=InferenceType.NUTS,
)
observed = len(samples[c()][0])
expected = num_samples
self.assertEqual(expected, observed)
class SampleModel:
@bm.random_variable
def a(self):
return Dirichlet(tensor([0.5, 0.5]))
@bm.functional
def b(self):
return self.a()[2] ## The index 2 is intentionally out of bounds
def test_infer_interface_runtime_error(self) -> None:
model = self.SampleModel()
with self.assertRaisesRegex(RuntimeError, "Error during BMG inference.*"):
BMGInference().infer([model.a(), model.b()], {}, 10, 4)
| beanmachine-main | tests/ppl/compiler/bmg_infer_interface_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""End-to-end test for an example use of the normal distribution"""
import logging
import unittest
import beanmachine.ppl as bm
import torch # from torch import manual_seed, tensor
import torch.distributions as dist # from torch.distributions import Bernoulli, Normal, Uniform
from beanmachine.ppl.inference.bmg_inference import BMGInference
# TODO: Check imports for consistency
# This makes the results deterministic and reproducible.
logging.getLogger("beanmachine").setLevel(50)
torch.manual_seed(12)
# Model
@bm.random_variable
def x():
"""
A random variable drawn from a half normal distribution
"""
return dist.HalfNormal(1000)
num_samples = (
2 ###000 - Sample size reduced since it should not affect compilation issues
)
num_chains = 4
observations = {} ### This means we will just get the distribution as declared
queries = [x()]
class distributionHalfNormalTest(unittest.TestCase):
def test_distribution_half_normal_e2e(self) -> None:
"""Check BM and BMG inference both terminate"""
self.maxDiff = None
# Inference with BM
# Note: No explicit seed here (in original tutorial model). Should we add one?
amh = bm.SingleSiteAncestralMetropolisHastings() # Added local binding
bm_samples = amh.infer(
queries=queries,
observations=observations,
num_samples=num_samples,
num_chains=num_chains,
)
self.assertTrue(
bm_samples.get_num_samples() == num_samples,
msg="Got wrong number of samples back from BM inference",
)
# Inference with BMG
bmg_samples = BMGInference().infer(
queries=queries,
observations=observations,
num_samples=num_samples,
num_chains=1, # TODO[Walid]: 1 should be replaced by num_chains
)
self.assertTrue(
bmg_samples.get_num_samples() == num_samples,
msg="Got wrong number of samples back from BMG inference",
)
def test_distribution_half_normal_to_dot_cpp_python(
self,
) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N0[label=1000.0];
N1[label=HalfNormal];
N2[label=Sample];
N3[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_cpp(queries, observations)
expected = """
graph::Graph g;
uint n0 = g.add_constant_pos_real(1000.0);
uint n1 = g.add_distribution(
graph::DistributionType::HALF_NORMAL,
graph::AtomicType::POS_REAL,
std::vector<uint>({n0}));
uint n2 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint q0 = g.query(n2);
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_python(queries, observations)
expected = """
from beanmachine import graph
from torch import tensor
g = graph.Graph()
n0 = g.add_constant_pos_real(1000.0)
n1 = g.add_distribution(
graph.DistributionType.HALF_NORMAL,
graph.AtomicType.POS_REAL,
[n0],
)
n2 = g.add_operator(graph.OperatorType.SAMPLE, [n1])
q0 = g.query(n2)
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/distribution_half_normal_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""End-to-end test for an example use of the normal distribution"""
import logging
import unittest
import beanmachine.ppl as bm
import torch # from torch import manual_seed, tensor
import torch.distributions as dist # from torch.distributions import Bernoulli, Normal, Uniform
from beanmachine.ppl.inference.bmg_inference import BMGInference
# TODO: Check imports for consistency
# This makes the results deterministic and reproducible.
logging.getLogger("beanmachine").setLevel(50)
torch.manual_seed(12)
# Model
@bm.random_variable
def x():
"""
A random variable drawn from a normal (Gaussian) distribution
"""
return dist.Normal(0, 1000)
num_samples = (
2 ###000 - Sample size reduced since it should not affect compilation issues
)
num_chains = 4
observations = {} ### This means we will just get the distribution as declared
queries = [x()]
class distributionNormalTest(unittest.TestCase):
def test_distribution_normal_e2e(self) -> None:
"""Check BM and BMG inference both terminate"""
self.maxDiff = None
# Inference with BM
# Note: No explicit seed here (in original tutorial model). Should we add one?
amh = bm.SingleSiteAncestralMetropolisHastings() # Added local binding
bm_samples = amh.infer(
queries=queries,
observations=observations,
num_samples=num_samples,
num_chains=num_chains,
)
self.assertTrue(
bm_samples.get_num_samples() == num_samples,
msg="Got wrong number of samples back from BM inference",
)
# Inference with BMG
bmg_samples = BMGInference().infer(
queries=queries,
observations=observations,
num_samples=num_samples,
num_chains=1, # TODO[Walid]: 1 should be num_chains
)
self.assertTrue(
bmg_samples.get_num_samples() == num_samples,
msg="Got wrong number of samples back from BMG inference",
)
def test_distribution_normal_to_dot_cpp_python(
self,
) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1000.0];
N2[label=Normal];
N3[label=Sample];
N4[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N4;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_cpp(queries, observations)
expected = """
graph::Graph g;
uint n0 = g.add_constant_real(0.0);
uint n1 = g.add_constant_pos_real(1000.0);
uint n2 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n0, n1}));
uint n3 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n2}));
uint q0 = g.query(n3);
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_python(queries, observations)
expected = """
from beanmachine import graph
from torch import tensor
g = graph.Graph()
n0 = g.add_constant_real(0.0)
n1 = g.add_constant_pos_real(1000.0)
n2 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n0, n1],
)
n3 = g.add_operator(graph.OperatorType.SAMPLE, [n2])
q0 = g.query(n3)
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/distribution_normal_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for ast_tools.py"""
import ast
import unittest
import beanmachine.ppl.compiler.ast_tools as ast_tools
class ASTToolsTest(unittest.TestCase):
def disabled_test_ast_tools_print_tree(self) -> None:
# PYTHON VERSIONING ISSUE
# TODO: This test is disabled because it has different output on
# different versions of Python. Enable the test once we have sorted
# out what our required version of Python is.
"""test_ast_tools_print_tree"""
node = ast.parse("2 + 3")
observed = ast_tools.print_tree(node, False)
expected = """
Module
+-list
+-Expr
+-BinOp
+-Num
| +-2
+-Add
+-Num
+-3
"""
self.maxDiff = None
self.assertEqual(observed.strip(), expected.strip())
def disabled_test_ast_tools_print_graph(self) -> None:
"""test_ast_tools_print_graph"""
# PYTHON VERSIONING ISSUE
# TODO: This test is disabled because it has different output on
# different versions of Python. Enable the test once we have sorted
# out what our required version of Python is.
node = ast.parse("2 + 3")
observed = ast_tools.print_graph(node)
expected = """
digraph "graph" {
N0[label=Module];
N1[label=list];
N2[label=Expr];
N3[label=BinOp];
N4[label=Num];
N5[label=Add];
N6[label=Num];
N7[label=3];
N8[label=2];
N0 -> N1[label=body];
N1 -> N2[label=0];
N2 -> N3[label=value];
N3 -> N4[label=left];
N3 -> N5[label=op];
N3 -> N6[label=right];
N4 -> N8[label=n];
N6 -> N7[label=n];
}"""
self.maxDiff = None
self.assertEqual(observed.strip(), expected.strip())
def disabled_test_ast_tools_print_python(self) -> None:
"""test_ast_tools_print_python"""
# PYTHON VERSIONING ISSUE
# TODO: This test is disabled because it has different output on
# different versions of Python. Enable the test once we have sorted
# out what our required version of Python is.
node = ast.parse("x = f(2 + 3)")
observed = ast_tools.print_python(node)
expected = """
Module(
body=[
Assign(
targets=[Name(id="x", ctx=Store())],
value=Call(
func=Name(id="f", ctx=Load()),
args=[BinOp(left=Num(n=2), op=Add(), right=Num(n=3))],
keywords=[],
),
)
]
)
"""
self.maxDiff = None
self.assertEqual(observed.strip(), expected.strip())
| beanmachine-main | tests/ppl/compiler/ast_tools_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""End-to-end test for 1D GMM with K > 2 number of components"""
import logging
import unittest
# Comments after imports suggest alternative comment style (for original tutorial)
import beanmachine.ppl as bm
import torch # from torch import manual_seed, tensor
import torch.distributions as dist # from torch.distributions import Bernoulli, Normal, Uniform
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch import tensor
# This makes the results deterministic and reproducible.
logging.getLogger("beanmachine").setLevel(50)
torch.manual_seed(42)
# Model
class GaussianMixtureModel(object):
def __init__(self, k):
self.K = k
@bm.random_variable
def alpha(self, k):
return dist.Dirichlet(5 * torch.ones(k))
@bm.random_variable
def mu(self, c):
return dist.Normal(0, 10)
@bm.random_variable
def sigma(self, c):
return dist.Gamma(1, 10)
@bm.random_variable
def component(self, i):
alpha = self.alpha(self.K)
return dist.Categorical(alpha)
@bm.random_variable
def y(self, i):
c = self.component(i)
return dist.Normal(self.mu(c), self.sigma(c))
# Creating sample data
n = 6 # num observations
k = 4 # true number of clusters
gmm = GaussianMixtureModel(k=k)
ground_truth = {
**{
gmm.alpha(k): torch.ones(k) * 1.0 / k,
},
**{gmm.mu(i): tensor(i % 2).float() for i in range(k)},
**{gmm.sigma(i): tensor(0.1) for i in range(k)},
**{gmm.component(i): tensor(i % k).float() for i in range(n)},
}
# [Visualization code in tutorial skipped]
# Inference parameters
num_samples = (
1 ###00 Sample size should not affect (the ability to find) compilation issues.
)
queries = (
[gmm.alpha(gmm.K)]
+ [gmm.component(j) for j in range(n)]
+ [gmm.mu(i) for i in range(k)]
+ [gmm.sigma(i) for i in range(k)]
)
observations = {
gmm.y(i): ground_truth[gmm.mu(ground_truth[gmm.component(i)].item())]
for i in range(n)
}
class tutorialGMMwith1DimensionsAnd4Components(unittest.TestCase):
def test_tutorial_GMM_with_1_dimensions_and_4_components(self) -> None:
"""Check BM and BMG inference both terminate"""
self.maxDiff = None
# Inference with BM
torch.manual_seed(
42
) # Note: Second time we seed. Could be a good tutorial style
mh = bm.CompositionalInference({...: bm.SingleSiteNewtonianMonteCarlo()})
mh.infer(
queries,
observations,
num_samples=num_samples,
num_chains=1,
)
self.assertTrue(True, msg="We just want to check this point is reached")
def test_tutorial_GMM_with_1_dimensions_and_4_components_to_dot_cpp_python(
self,
) -> None:
self.maxDiff = None
observed = BMGInference().to_dot(queries, observations)
expected = """digraph "graph" {
N00[label="[5.0,5.0,5.0,5.0]"];
N01[label=Dirichlet];
N02[label=Sample];
N03[label=Categorical];
N04[label=Sample];
N05[label=0.0];
N06[label=10.0];
N07[label=Normal];
N08[label=Sample];
N09[label=Sample];
N10[label=Sample];
N11[label=Sample];
N12[label=1.0];
N13[label=Gamma];
N14[label=Sample];
N15[label=Sample];
N16[label=Sample];
N17[label=Sample];
N18[label=Choice];
N19[label=Choice];
N20[label=Normal];
N21[label=Sample];
N22[label="Observation 0.0"];
N23[label=Sample];
N24[label=Choice];
N25[label=Choice];
N26[label=Normal];
N27[label=Sample];
N28[label="Observation 1.0"];
N29[label=Sample];
N30[label=Choice];
N31[label=Choice];
N32[label=Normal];
N33[label=Sample];
N34[label="Observation 0.0"];
N35[label=Sample];
N36[label=Choice];
N37[label=Choice];
N38[label=Normal];
N39[label=Sample];
N40[label="Observation 1.0"];
N41[label=Sample];
N42[label=Choice];
N43[label=Choice];
N44[label=Normal];
N45[label=Sample];
N46[label="Observation 0.0"];
N47[label=Sample];
N48[label=Choice];
N49[label=Choice];
N50[label=Normal];
N51[label=Sample];
N52[label="Observation 1.0"];
N53[label=Query];
N54[label=Query];
N55[label=Query];
N56[label=Query];
N57[label=Query];
N58[label=Query];
N59[label=Query];
N60[label=Query];
N61[label=Query];
N62[label=Query];
N63[label=Query];
N64[label=Query];
N65[label=Query];
N66[label=Query];
N67[label=Query];
N00 -> N01;
N01 -> N02;
N02 -> N03;
N02 -> N53;
N03 -> N04;
N03 -> N23;
N03 -> N29;
N03 -> N35;
N03 -> N41;
N03 -> N47;
N04 -> N18;
N04 -> N19;
N04 -> N54;
N05 -> N07;
N06 -> N07;
N06 -> N13;
N07 -> N08;
N07 -> N09;
N07 -> N10;
N07 -> N11;
N08 -> N18;
N08 -> N24;
N08 -> N30;
N08 -> N36;
N08 -> N42;
N08 -> N48;
N08 -> N60;
N09 -> N18;
N09 -> N24;
N09 -> N30;
N09 -> N36;
N09 -> N42;
N09 -> N48;
N09 -> N61;
N10 -> N18;
N10 -> N24;
N10 -> N30;
N10 -> N36;
N10 -> N42;
N10 -> N48;
N10 -> N62;
N11 -> N18;
N11 -> N24;
N11 -> N30;
N11 -> N36;
N11 -> N42;
N11 -> N48;
N11 -> N63;
N12 -> N13;
N13 -> N14;
N13 -> N15;
N13 -> N16;
N13 -> N17;
N14 -> N19;
N14 -> N25;
N14 -> N31;
N14 -> N37;
N14 -> N43;
N14 -> N49;
N14 -> N64;
N15 -> N19;
N15 -> N25;
N15 -> N31;
N15 -> N37;
N15 -> N43;
N15 -> N49;
N15 -> N65;
N16 -> N19;
N16 -> N25;
N16 -> N31;
N16 -> N37;
N16 -> N43;
N16 -> N49;
N16 -> N66;
N17 -> N19;
N17 -> N25;
N17 -> N31;
N17 -> N37;
N17 -> N43;
N17 -> N49;
N17 -> N67;
N18 -> N20;
N19 -> N20;
N20 -> N21;
N21 -> N22;
N23 -> N24;
N23 -> N25;
N23 -> N55;
N24 -> N26;
N25 -> N26;
N26 -> N27;
N27 -> N28;
N29 -> N30;
N29 -> N31;
N29 -> N56;
N30 -> N32;
N31 -> N32;
N32 -> N33;
N33 -> N34;
N35 -> N36;
N35 -> N37;
N35 -> N57;
N36 -> N38;
N37 -> N38;
N38 -> N39;
N39 -> N40;
N41 -> N42;
N41 -> N43;
N41 -> N58;
N42 -> N44;
N43 -> N44;
N44 -> N45;
N45 -> N46;
N47 -> N48;
N47 -> N49;
N47 -> N59;
N48 -> N50;
N49 -> N50;
N50 -> N51;
N51 -> N52;
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = BMGInference().to_cpp(queries, observations)
expected = """graph::Graph g;
Eigen::MatrixXd m0(4, 1);
m0 << 5.0, 5.0, 5.0, 5.0;
uint n0 = g.add_constant_pos_matrix(m0);
uint n1 = g.add_distribution(
graph::DistributionType::DIRICHLET,
graph::ValueType(
graph::VariableType::COL_SIMPLEX_MATRIX,
graph::AtomicType::PROBABILITY,
4,
1
),
std::vector<uint>({n0}));
uint n2 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n1}));
uint n3 = g.add_distribution(
graph::DistributionType::CATEGORICAL,
graph::AtomicType::NATURAL,
std::vector<uint>({n2}));
uint n4 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
uint n5 = g.add_constant_real(0.0);
uint n6 = g.add_constant_pos_real(10.0);
uint n7 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n5, n6}));
uint n8 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n7}));
uint n9 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n7}));
uint n10 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n7}));
uint n11 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n7}));
uint n12 = g.add_constant_pos_real(1.0);
uint n13 = g.add_distribution(
graph::DistributionType::GAMMA,
graph::AtomicType::POS_REAL,
std::vector<uint>({n12, n6}));
uint n14 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n13}));
uint n15 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n13}));
uint n16 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n13}));
uint n17 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n13}));
uint n18 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n4, n8, n9, n10, n11}));
uint n19 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n4, n14, n15, n16, n17}));
uint n20 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n18, n19}));
uint n21 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n20}));
g.observe(n21, 0.0);
uint n22 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
uint n23 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n22, n8, n9, n10, n11}));
uint n24 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n22, n14, n15, n16, n17}));
uint n25 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n23, n24}));
uint n26 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n25}));
g.observe(n26, 1.0);
uint n27 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
uint n28 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n27, n8, n9, n10, n11}));
uint n29 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n27, n14, n15, n16, n17}));
uint n30 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n28, n29}));
uint n31 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n30}));
g.observe(n31, 0.0);
uint n32 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
uint n33 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n32, n8, n9, n10, n11}));
uint n34 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n32, n14, n15, n16, n17}));
uint n35 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n33, n34}));
uint n36 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n35}));
g.observe(n36, 1.0);
uint n37 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
uint n38 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n37, n8, n9, n10, n11}));
uint n39 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n37, n14, n15, n16, n17}));
uint n40 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n38, n39}));
uint n41 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n40}));
g.observe(n41, 0.0);
uint n42 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n3}));
uint n43 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n42, n8, n9, n10, n11}));
uint n44 = g.add_operator(
graph::OperatorType::CHOICE,
std::vector<uint>({n42, n14, n15, n16, n17}));
uint n45 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n43, n44}));
uint n46 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n45}));
g.observe(n46, 1.0);
uint q0 = g.query(n2);
uint q1 = g.query(n4);
uint q2 = g.query(n22);
uint q3 = g.query(n27);
uint q4 = g.query(n32);
uint q5 = g.query(n37);
uint q6 = g.query(n42);
uint q7 = g.query(n8);
uint q8 = g.query(n9);
uint q9 = g.query(n10);
uint q10 = g.query(n11);
uint q11 = g.query(n14);
uint q12 = g.query(n15);
uint q13 = g.query(n16);
uint q14 = g.query(n17);
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/tutorial_GMM_with_1_dimensions_and_4_components_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.gen_bmg_cpp import to_bmg_cpp
from beanmachine.ppl.compiler.gen_bmg_graph import to_bmg_graph
from beanmachine.ppl.compiler.gen_bmg_python import to_bmg_python
from beanmachine.ppl.compiler.gen_dot import to_dot
from beanmachine.ppl.model.rv_identifier import RVIdentifier
def _rv_id() -> RVIdentifier:
return RVIdentifier(lambda a, b: a, (1, 1))
class ColumnIndexTest(unittest.TestCase):
def test_column_index_1(self) -> None:
self.maxDiff = None
bmg = BMGraphBuilder()
t = bmg.add_natural(2)
o = bmg.add_natural(1)
z = bmg.add_natural(0)
n = bmg.add_normal(z, o)
ns = bmg.add_sample(n)
e = bmg.add_exp(ns)
h = bmg.add_probability(0.5)
b = bmg.add_bernoulli(h)
bs = bmg.add_sample(b)
m = bmg.add_to_matrix(t, t, e, ns, ns, ns)
ci = bmg.add_column_index(m, bs)
lsev = bmg.add_logsumexp_vector(ci)
bmg.add_query(lsev, _rv_id())
observed = to_dot(
bmg,
node_types=True,
edge_requirements=True,
after_transform=True,
label_edges=True,
)
expected = """
digraph "graph" {
N00[label="0.0:R"];
N01[label="1.0:R+"];
N02[label="Normal:R"];
N03[label="Sample:R"];
N04[label="0.5:P"];
N05[label="Bernoulli:B"];
N06[label="Sample:B"];
N07[label="2:N"];
N08[label="Exp:R+"];
N09[label="ToReal:R"];
N10[label="ToMatrix:MR[2,2]"];
N11[label="1:N"];
N12[label="0:N"];
N13[label="if:N"];
N14[label="ColumnIndex:MR[2,1]"];
N15[label="LogSumExp:R"];
N16[label="Query:R"];
N00 -> N02[label="mu:R"];
N01 -> N02[label="sigma:R+"];
N02 -> N03[label="operand:R"];
N03 -> N08[label="operand:R"];
N03 -> N10[label="1:R"];
N03 -> N10[label="2:R"];
N03 -> N10[label="3:R"];
N04 -> N05[label="probability:P"];
N05 -> N06[label="operand:B"];
N06 -> N13[label="condition:B"];
N07 -> N10[label="columns:N"];
N07 -> N10[label="rows:N"];
N08 -> N09[label="operand:<=R"];
N09 -> N10[label="0:R"];
N10 -> N14[label="left:MR[2,2]"];
N11 -> N13[label="consequence:N"];
N12 -> N13[label="alternative:N"];
N13 -> N14[label="right:N"];
N14 -> N15[label="operand:MR[2,1]"];
N15 -> N16[label="operator:any"];
}
"""
self.assertEqual(expected.strip(), observed.strip())
observed = to_bmg_cpp(bmg).code
expected = """
graph::Graph g;
uint n0 = g.add_constant_real(0.0);
uint n1 = g.add_constant_pos_real(1.0);
uint n2 = g.add_distribution(
graph::DistributionType::NORMAL,
graph::AtomicType::REAL,
std::vector<uint>({n0, n1}));
uint n3 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n2}));
uint n4 = g.add_constant_probability(0.5);
uint n5 = g.add_distribution(
graph::DistributionType::BERNOULLI,
graph::AtomicType::BOOLEAN,
std::vector<uint>({n4}));
uint n6 = g.add_operator(
graph::OperatorType::SAMPLE, std::vector<uint>({n5}));
uint n7 = g.add_constant_natural(2);
uint n8 = g.add_operator(
graph::OperatorType::EXP, std::vector<uint>({n3}));
uint n9 = g.add_operator(
graph::OperatorType::TO_REAL, std::vector<uint>({n8}));
uint n10 = g.add_operator(
graph::OperatorType::TO_MATRIX,
std::vector<uint>({n7, n7, n9, n3, n3, n3}));
uint n11 = g.add_constant_natural(1);
uint n12 = g.add_constant_natural(0);
uint n13 = g.add_operator(
graph::OperatorType::IF_THEN_ELSE,
std::vector<uint>({n6, n11, n12}));
uint n14 = g.add_operator(
graph::OperatorType::COLUMN_INDEX, std::vector<uint>({n10, n13}));
uint n15 = g.add_operator(
graph::OperatorType::LOGSUMEXP_VECTOR, std::vector<uint>({n14}));
uint q0 = g.query(n15);
"""
self.assertEqual(expected.strip(), observed.strip())
observed = to_bmg_python(bmg).code
expected = """
from beanmachine import graph
from torch import tensor
g = graph.Graph()
n0 = g.add_constant_real(0.0)
n1 = g.add_constant_pos_real(1.0)
n2 = g.add_distribution(
graph.DistributionType.NORMAL,
graph.AtomicType.REAL,
[n0, n1],
)
n3 = g.add_operator(graph.OperatorType.SAMPLE, [n2])
n4 = g.add_constant_probability(0.5)
n5 = g.add_distribution(
graph.DistributionType.BERNOULLI,
graph.AtomicType.BOOLEAN,
[n4],
)
n6 = g.add_operator(graph.OperatorType.SAMPLE, [n5])
n7 = g.add_constant_natural(2)
n8 = g.add_operator(graph.OperatorType.EXP, [n3])
n9 = g.add_operator(graph.OperatorType.TO_REAL, [n8])
n10 = g.add_operator(
graph.OperatorType.TO_MATRIX,
[n7, n7, n9, n3, n3, n3],
)
n11 = g.add_constant_natural(1)
n12 = g.add_constant_natural(0)
n13 = g.add_operator(
graph.OperatorType.IF_THEN_ELSE,
[n6, n11, n12],
)
n14 = g.add_operator(graph.OperatorType.COLUMN_INDEX, [n10, n13])
n15 = g.add_operator(graph.OperatorType.LOGSUMEXP_VECTOR, [n14])
q0 = g.query(n15)
"""
self.assertEqual(expected.strip(), observed.strip())
observed = to_bmg_graph(bmg).graph.to_dot()
expected = """
digraph "graph" {
N0[label="0"];
N1[label="1"];
N2[label="Normal"];
N3[label="~"];
N4[label="0.5"];
N5[label="Bernoulli"];
N6[label="~"];
N7[label="2"];
N8[label="exp"];
N9[label="ToReal"];
N10[label="ToMatrix"];
N11[label="1"];
N12[label="0"];
N13[label="IfThenElse"];
N14[label="ColumnIndex"];
N15[label="LogSumExp"];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N8;
N3 -> N10;
N3 -> N10;
N3 -> N10;
N4 -> N5;
N5 -> N6;
N6 -> N13;
N7 -> N10;
N7 -> N10;
N8 -> N9;
N9 -> N10;
N10 -> N14;
N11 -> N13;
N12 -> N13;
N13 -> N14;
N14 -> N15;
Q0[label="Query"];
N15 -> Q0;
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/column_index_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl.compiler.bmg_types as bt
import torch
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.lattice_typer import LatticeTyper
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from torch import Size
def _rv_id() -> RVIdentifier:
return RVIdentifier(lambda a, b: a, (1, 1))
class LatticeTyperTest(unittest.TestCase):
def test_lattice_typer_matrix_ops(self) -> None:
self.maxDiff = None
bmg = BMGraphBuilder()
typer = LatticeTyper()
# create non constant real matrix
zeros = bmg.add_real_matrix(torch.zeros(2, 2))
ones = bmg.add_pos_real_matrix(torch.ones(2, 2))
tensor_elements = []
for row in range(0, 2):
row_node = bmg.add_natural(row)
row_mu = bmg.add_column_index(zeros, row_node)
row_sigma = bmg.add_column_index(ones, row_node)
for column in range(0, 2):
index_node = bmg.add_natural(column)
index_mu = bmg.add_vector_index(row_mu, index_node)
index_sigma = bmg.add_vector_index(row_sigma, index_node)
normal = bmg.add_normal(index_mu, index_sigma)
sample = bmg.add_sample(normal)
tensor_elements.append(sample)
real_matrix = bmg.add_tensor(Size([2, 2]), *tensor_elements)
# create non constant bool matrix
probs = bmg.add_real_matrix(torch.tensor([[0.75, 0.25], [0.125, 0.875]]))
tensor_elements = []
for row in range(0, 2):
row_node = bmg.add_natural(row)
row_prob = bmg.add_column_index(probs, row_node)
for column in range(0, 2):
col_index = bmg.add_natural(column)
prob = bmg.add_vector_index(row_prob, col_index)
bernoulli = bmg.add_bernoulli(prob)
sample = bmg.add_sample(bernoulli)
tensor_elements.append(sample)
bool_matrix = bmg.add_tensor(Size([2, 2]), *tensor_elements)
neg_real = bmg.add_neg_real_matrix(torch.tensor([[-1.2, -1.3], [-4.7, -1.2]]))
pos_real = bmg.add_matrix_exp(real_matrix)
add_pos_to_reg = bmg.add_matrix_addition(pos_real, neg_real)
mult_pos_to_neg = bmg.add_elementwise_multiplication(pos_real, neg_real)
sum_bool = bmg.add_matrix_sum(bool_matrix)
bmg.add_query(sum_bool, _rv_id())
tpe_neg_real = typer[neg_real]
tpe_real = typer[real_matrix]
tpe_pos_real = typer[pos_real]
tpe_add = typer[add_pos_to_reg]
tpe_mult = typer[mult_pos_to_neg]
tpe_sum = typer[sum_bool]
self.assertTrue(isinstance(tpe_real, bt.RealMatrix))
self.assertTrue(isinstance(tpe_neg_real, bt.NegativeRealMatrix))
self.assertTrue(isinstance(tpe_pos_real, bt.PositiveRealMatrix))
self.assertTrue(isinstance(tpe_add, bt.RealMatrix))
self.assertTrue(isinstance(tpe_mult, bt.RealMatrix))
self.assertTrue(isinstance(tpe_sum, bt.BooleanMatrix))
def test_lattice_typer_1(self) -> None:
self.maxDiff = None
bmg = BMGraphBuilder()
typer = LatticeTyper()
# Lattice type of an untyped constant is based on its value.
c0 = bmg.add_constant(0.0)
self.assertEqual(bt.Zero, typer[c0])
c1 = bmg.add_constant(1.0)
self.assertEqual(bt.One, typer[c1])
c2 = bmg.add_constant(2.0)
self.assertEqual(bt.Natural, typer[c2])
c3 = bmg.add_constant(1.5)
self.assertEqual(bt.PositiveReal, typer[c3])
c4 = bmg.add_constant(-1.5)
self.assertEqual(bt.NegativeReal, typer[c4])
c5 = bmg.add_constant(0.5)
self.assertEqual(bt.Probability, typer[c5])
# BMG type of tensor is given assuming that when we emit it into
# the BMG graph, it will be transposed into column-major form.
# In BMG, it will be [[1.5], [-1.5]] and therefore this tensor is
# typed as having two rows, one column, not one row, two columns
# as it does in torch.
c6 = bmg.add_constant(torch.tensor([1.5, -1.5]))
self.assertEqual(bt.Real.with_dimensions(2, 1), typer[c6])
# Lattice type of a typed constant is based on its type,
# not its value. This real node is a real, even though its
# value fits into a natural.
c7 = bmg.add_real(2.0)
self.assertEqual(bt.Real, typer[c7])
# Lattice type of distributions is fixed:
d0 = bmg.add_beta(c2, c2)
prob = bmg.add_sample(d0)
self.assertEqual(bt.Probability, typer[prob])
d1 = bmg.add_bernoulli(prob)
bo = bmg.add_sample(d1)
self.assertEqual(bt.Boolean, typer[bo])
d2 = bmg.add_binomial(c2, prob)
nat = bmg.add_sample(d2)
self.assertEqual(bt.Natural, typer[nat])
d3 = bmg.add_halfcauchy(c3)
posr = bmg.add_sample(d3)
self.assertEqual(bt.PositiveReal, typer[posr])
negr = bmg.add_negate(posr)
self.assertEqual(bt.NegativeReal, typer[negr])
d4 = bmg.add_normal(c0, c1)
re = bmg.add_sample(d4)
self.assertEqual(bt.Real, typer[re])
d5 = bmg.add_poisson(c1)
re = bmg.add_sample(d5)
self.assertEqual(bt.Natural, typer[nat])
# Lattice type of unsupported distributions and all descendents
# is "untypable".
d5 = bmg.add_chi2(c2)
unt1 = bmg.add_sample(d5)
unt2 = bmg.add_addition(unt1, unt1)
self.assertEqual(bt.Untypable, typer[unt1])
self.assertEqual(bt.Untypable, typer[unt2])
# Spot check some operators.
add1 = bmg.add_addition(prob, nat)
self.assertEqual(bt.PositiveReal, typer[add1])
pow1 = bmg.add_power(prob, posr)
self.assertEqual(bt.Probability, typer[pow1])
# TODO: Add more operators
| beanmachine-main | tests/ppl/compiler/lattice_typer_test.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Suppose we have a mixture of k normal distributions each with standard
# deviation equal to 1, but different means. Our prior on means is that
# mean(0), ... mean(k) are normally distributed.
#
# To make samples mixed(0), ... from this distribution we first choose which
# mean we want with category(0), ..., use that to sample mean(category(0))
# to get the mean, and then use that mean to sample from a normal distribution.
#
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference import BMGInference
from torch import tensor
from torch.distributions import Categorical, Normal
@bm.random_variable
def mean(k):
# Means of the components are normally distributed
return Normal(0, 1)
@bm.random_variable
def category(item):
# Choose a category, 0, 1 or 2 with ratio 1:3:4.
return Categorical(tensor([1.0, 3.0, 4.0]))
@bm.random_variable
def mixed(item):
return Normal(mean(category(item)), 2)
class GaussianMixtureModelTest(unittest.TestCase):
def test_gmm_to_dot(self) -> None:
self.maxDiff = None
queries = [mixed(0)]
observations = {}
# Here we use a categorical distribution to choose from three possible
# samples.
#
# TODO: The inference step on categorical distributions in BMG is not
# yet implemented because the gradients are not yet computed correctly
# and because BMG NMC does not yet implement a discrete sampler. Once
# that work is complete, update this test to actually do inference.
observed = BMGInference().to_dot(queries, observations)
expected = """
digraph "graph" {
N00[label="[0.125,0.375,0.5]"];
N01[label=Categorical];
N02[label=Sample];
N03[label=0.0];
N04[label=1.0];
N05[label=Normal];
N06[label=Sample];
N07[label=Sample];
N08[label=Sample];
N09[label=Choice];
N10[label=2.0];
N11[label=Normal];
N12[label=Sample];
N13[label=Query];
N00 -> N01;
N01 -> N02;
N02 -> N09;
N03 -> N05;
N04 -> N05;
N05 -> N06;
N05 -> N07;
N05 -> N08;
N06 -> N09;
N07 -> N09;
N08 -> N09;
N09 -> N11;
N10 -> N11;
N11 -> N12;
N12 -> N13;
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/gaussian_mixture_model_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference.bmg_inference import BMGInference
from torch.distributions import Bernoulli, Beta
# If we use a positive real valued *operator* in a context where
# a probability is required, we allow it. But we don't allow constants.
#
# For example, if we have a probability divided by two, that's still a
# probability. But adding it to another probability results in a positive
# real even though we know it is still between 0.0 and 1.0.
#
# What we do in this situation is automatically insert a "to probability"
# operator that coerces the positive real to a probability.
@bm.random_variable
def beta(n):
return Beta(2.0, 2.0)
@bm.random_variable
def flip():
return Bernoulli(beta(0) * 0.5 + 0.5)
# However, we should still reject constants that are out of bounds.
@bm.random_variable
def bad_flip():
return Bernoulli(2.5)
# Similarly for log-probabilities which are negative reals.
def log1mexp(x):
return (1 - x.exp()).log()
@bm.functional
def to_neg_real():
pr1 = beta(1) * 0.5 + 0.5 # positive real
pr2 = beta(2) * 0.5 + 0.5 # positive real
lg1 = pr1.log() # real
lg2 = pr2.log() # real
# Because we think pr1 and pr2 are positive reals instead of probabilities,
# we also think that lg1 and lg2 are reals instead of negative reals.
inv = log1mexp(lg1 + lg2) # needs a negative real
# We should insert a TO_NEG_REAL node on the sum above.
return inv
class ToProbabilityTest(unittest.TestCase):
def test_to_probability_1(self) -> None:
self.maxDiff = None
bmg = BMGInference()
observed = bmg.to_dot([flip()], {})
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=Beta];
N02[label=Sample];
N03[label=0.5];
N04[label="*"];
N05[label=ToPosReal];
N06[label=0.5];
N07[label="+"];
N08[label=ToProb];
N09[label=Bernoulli];
N10[label=Sample];
N11[label=Query];
N00 -> N01;
N00 -> N01;
N01 -> N02;
N02 -> N04;
N03 -> N04;
N04 -> N05;
N05 -> N07;
N06 -> N07;
N07 -> N08;
N08 -> N09;
N09 -> N10;
N10 -> N11;
}
"""
self.assertEqual(observed.strip(), expected.strip())
def test_to_probability_2(self) -> None:
self.maxDiff = None
bmg = BMGInference()
# TODO: Raise a better error than a generic ValueError
with self.assertRaises(ValueError) as ex:
bmg.infer([bad_flip()], {}, 10)
expected = """
The probability of a Bernoulli is required to be a probability but is a positive real.
The Bernoulli was created in function call bad_flip()."""
self.assertEqual(expected.strip(), str(ex.exception).strip())
def test_to_neg_real_1(self) -> None:
self.maxDiff = None
observed = BMGInference().to_dot([to_neg_real()], {})
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=Beta];
N02[label=Sample];
N03[label=Sample];
N04[label=0.5];
N05[label="*"];
N06[label=ToPosReal];
N07[label=0.5];
N08[label="+"];
N09[label=Log];
N10[label="*"];
N11[label=ToPosReal];
N12[label="+"];
N13[label=Log];
N14[label="+"];
N15[label=ToNegReal];
N16[label=Log1mexp];
N17[label=Query];
N00 -> N01;
N00 -> N01;
N01 -> N02;
N01 -> N03;
N02 -> N05;
N03 -> N10;
N04 -> N05;
N04 -> N10;
N05 -> N06;
N06 -> N08;
N07 -> N08;
N07 -> N12;
N08 -> N09;
N09 -> N14;
N10 -> N11;
N11 -> N12;
N12 -> N13;
N13 -> N14;
N14 -> N15;
N15 -> N16;
N16 -> N17;
}
"""
self.assertEqual(expected.strip(), observed.strip())
| beanmachine-main | tests/ppl/compiler/to_probability_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.compiler.copy_and_replace import copy_and_replace
from beanmachine.ppl.compiler.devectorizer_transformer import Devectorizer
from beanmachine.ppl.compiler.gen_dot import to_dot
from beanmachine.ppl.compiler.runtime import BMGRuntime
from torch import mm, tensor
from torch.distributions import Bernoulli, Gamma, HalfCauchy, Normal, StudentT
@bm.random_variable
def norm_tensor(n):
return Normal(tensor([0.0, 0.5]), tensor([0.6, 1.0]))
class DevectorizeTransformerTest(unittest.TestCase):
def test_needs_transform_because_parent_cannot_be_merged(self) -> None:
self.maxDiff = None
bmg = BMGRuntime().accumulate_graph([norm_tensor(0)], {})
transformed_graph, error_report = copy_and_replace(
bmg, lambda c, s: Devectorizer(c, s)
)
observed = to_dot(transformed_graph)
expected = """
digraph "graph" {
N00[label="[0.0,0.5]"];
N01[label=0];
N02[label=index];
N03[label="[0.6000000238418579,1.0]"];
N04[label=index];
N05[label=Normal];
N06[label=Sample];
N07[label=1];
N08[label=index];
N09[label=index];
N10[label=Normal];
N11[label=Sample];
N12[label=Tensor];
N13[label=Query];
N00 -> N02[label=left];
N00 -> N08[label=left];
N01 -> N02[label=right];
N01 -> N04[label=right];
N02 -> N05[label=mu];
N03 -> N04[label=left];
N03 -> N09[label=left];
N04 -> N05[label=sigma];
N05 -> N06[label=operand];
N06 -> N12[label=left];
N07 -> N08[label=right];
N07 -> N09[label=right];
N08 -> N10[label=mu];
N09 -> N10[label=sigma];
N10 -> N11[label=operand];
N11 -> N12[label=right];
N12 -> N13[label=operator];
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_transform_multiple_operands(self) -> None:
_y_obs = tensor([33.3, 68.3])
@bm.random_variable
def sigma_out():
return Gamma(1, 1)
@bm.functional
def multiple_operands():
mu = norm_tensor(0)
ns = Normal(mu, sigma_out())
return ns.log_prob(_y_obs)
self.maxDiff = None
bmg = BMGRuntime().accumulate_graph([multiple_operands()], {})
transformed_graph, error_report = copy_and_replace(
bmg, lambda c, s: Devectorizer(c, s)
)
observed = to_dot(transformed_graph)
expected = """
digraph "graph" {
N00[label="[0.0,0.5]"];
N01[label=0];
N02[label=index];
N03[label="[0.6000000238418579,1.0]"];
N04[label=index];
N05[label=Normal];
N06[label=Sample];
N07[label=1.0];
N08[label=Gamma];
N09[label=Sample];
N10[label=Normal];
N11[label="[33.29999923706055,68.30000305175781]"];
N12[label=index];
N13[label=LogProb];
N14[label=1];
N15[label=index];
N16[label=index];
N17[label=Normal];
N18[label=Sample];
N19[label=Normal];
N20[label=index];
N21[label=LogProb];
N22[label=Tensor];
N23[label=Query];
N00 -> N02[label=left];
N00 -> N15[label=left];
N01 -> N02[label=right];
N01 -> N04[label=right];
N01 -> N12[label=right];
N02 -> N05[label=mu];
N03 -> N04[label=left];
N03 -> N16[label=left];
N04 -> N05[label=sigma];
N05 -> N06[label=operand];
N06 -> N10[label=mu];
N07 -> N08[label=concentration];
N07 -> N08[label=rate];
N08 -> N09[label=operand];
N09 -> N10[label=sigma];
N09 -> N19[label=sigma];
N10 -> N13[label=distribution];
N11 -> N12[label=left];
N11 -> N20[label=left];
N12 -> N13[label=value];
N13 -> N22[label=left];
N14 -> N15[label=right];
N14 -> N16[label=right];
N14 -> N20[label=right];
N15 -> N17[label=mu];
N16 -> N17[label=sigma];
N17 -> N18[label=operand];
N18 -> N19[label=mu];
N19 -> N21[label=distribution];
N20 -> N21[label=value];
N21 -> N22[label=right];
N22 -> N23[label=operator];
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_needs_merge(self) -> None:
@bm.functional
def foo():
return mm(tensor([2.0, 7.5]), norm_tensor(0))
self.maxDiff = None
bmg = BMGRuntime().accumulate_graph([foo()], {})
transformed_graph, error_report = copy_and_replace(
bmg, lambda c, s: Devectorizer(c, s)
)
observed = to_dot(transformed_graph)
expected = """
digraph "graph" {
N00[label="[2.0,7.5]"];
N01[label="[0.0,0.5]"];
N02[label=0];
N03[label=index];
N04[label="[0.6000000238418579,1.0]"];
N05[label=index];
N06[label=Normal];
N07[label=Sample];
N08[label=1];
N09[label=index];
N10[label=index];
N11[label=Normal];
N12[label=Sample];
N13[label=Tensor];
N14[label="@"];
N15[label=Query];
N00 -> N14[label=left];
N01 -> N03[label=left];
N01 -> N09[label=left];
N02 -> N03[label=right];
N02 -> N05[label=right];
N03 -> N06[label=mu];
N04 -> N05[label=left];
N04 -> N10[label=left];
N05 -> N06[label=sigma];
N06 -> N07[label=operand];
N07 -> N13[label=left];
N08 -> N09[label=right];
N08 -> N10[label=right];
N09 -> N11[label=mu];
N10 -> N11[label=sigma];
N11 -> N12[label=operand];
N12 -> N13[label=right];
N13 -> N14[label=right];
N14 -> N15[label=operator];
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_broadcast(self) -> None:
@bm.random_variable
def flip_const_2_3():
return Bernoulli(tensor([[0.25, 0.75, 0.5], [0.125, 0.875, 0.625]]))
@bm.random_variable
def normal_2_3():
mus = flip_const_2_3() # 2 x 3 tensor of 0 or 1
sigmas = tensor([2.0, 3.0, 4.0])
return Normal(mus, sigmas)
@bm.random_variable
def hc_3():
return HalfCauchy(tensor([1.0, 2.0, 3.0]))
@bm.random_variable
def studentt_2_3():
return StudentT(hc_3(), normal_2_3(), hc_3())
self.maxDiff = None
bmg = BMGRuntime().accumulate_graph([studentt_2_3()], {})
transformed_graph, error_report = copy_and_replace(
bmg, lambda c, s: Devectorizer(c, s)
)
observed = to_dot(transformed_graph)
expected = """
digraph "graph" {
N00[label="[1.0,2.0,3.0]"];
N01[label=0];
N02[label=index];
N03[label=HalfCauchy];
N04[label=Sample];
N05[label="[[0.25,0.75,0.5],\\\\n[0.125,0.875,0.625]]"];
N06[label=index];
N07[label=index];
N08[label=Bernoulli];
N09[label=Sample];
N10[label="[2.0,3.0,4.0]"];
N11[label=index];
N12[label=Normal];
N13[label=Sample];
N14[label=StudentT];
N15[label=Sample];
N16[label=1];
N17[label=index];
N18[label=HalfCauchy];
N19[label=Sample];
N20[label=index];
N21[label=Bernoulli];
N22[label=Sample];
N23[label=index];
N24[label=Normal];
N25[label=Sample];
N26[label=StudentT];
N27[label=Sample];
N28[label=2];
N29[label=index];
N30[label=HalfCauchy];
N31[label=Sample];
N32[label=index];
N33[label=Bernoulli];
N34[label=Sample];
N35[label=index];
N36[label=Normal];
N37[label=Sample];
N38[label=StudentT];
N39[label=Sample];
N40[label=index];
N41[label=index];
N42[label=Bernoulli];
N43[label=Sample];
N44[label=Normal];
N45[label=Sample];
N46[label=StudentT];
N47[label=Sample];
N48[label=index];
N49[label=Bernoulli];
N50[label=Sample];
N51[label=Normal];
N52[label=Sample];
N53[label=StudentT];
N54[label=Sample];
N55[label=index];
N56[label=Bernoulli];
N57[label=Sample];
N58[label=Normal];
N59[label=Sample];
N60[label=StudentT];
N61[label=Sample];
N62[label=Tensor];
N63[label=Query];
N00 -> N02[label=left];
N00 -> N17[label=left];
N00 -> N29[label=left];
N01 -> N02[label=right];
N01 -> N06[label=right];
N01 -> N07[label=right];
N01 -> N11[label=right];
N01 -> N41[label=right];
N02 -> N03[label=scale];
N03 -> N04[label=operand];
N04 -> N14[label=df];
N04 -> N14[label=scale];
N04 -> N46[label=df];
N04 -> N46[label=scale];
N05 -> N06[label=left];
N05 -> N40[label=left];
N06 -> N07[label=left];
N06 -> N20[label=left];
N06 -> N32[label=left];
N07 -> N08[label=probability];
N08 -> N09[label=operand];
N09 -> N12[label=mu];
N10 -> N11[label=left];
N10 -> N23[label=left];
N10 -> N35[label=left];
N11 -> N12[label=sigma];
N11 -> N44[label=sigma];
N12 -> N13[label=operand];
N13 -> N14[label=loc];
N14 -> N15[label=operand];
N15 -> N62[label=0];
N16 -> N17[label=right];
N16 -> N20[label=right];
N16 -> N23[label=right];
N16 -> N40[label=right];
N16 -> N48[label=right];
N17 -> N18[label=scale];
N18 -> N19[label=operand];
N19 -> N26[label=df];
N19 -> N26[label=scale];
N19 -> N53[label=df];
N19 -> N53[label=scale];
N20 -> N21[label=probability];
N21 -> N22[label=operand];
N22 -> N24[label=mu];
N23 -> N24[label=sigma];
N23 -> N51[label=sigma];
N24 -> N25[label=operand];
N25 -> N26[label=loc];
N26 -> N27[label=operand];
N27 -> N62[label=1];
N28 -> N29[label=right];
N28 -> N32[label=right];
N28 -> N35[label=right];
N28 -> N55[label=right];
N29 -> N30[label=scale];
N30 -> N31[label=operand];
N31 -> N38[label=df];
N31 -> N38[label=scale];
N31 -> N60[label=df];
N31 -> N60[label=scale];
N32 -> N33[label=probability];
N33 -> N34[label=operand];
N34 -> N36[label=mu];
N35 -> N36[label=sigma];
N35 -> N58[label=sigma];
N36 -> N37[label=operand];
N37 -> N38[label=loc];
N38 -> N39[label=operand];
N39 -> N62[label=2];
N40 -> N41[label=left];
N40 -> N48[label=left];
N40 -> N55[label=left];
N41 -> N42[label=probability];
N42 -> N43[label=operand];
N43 -> N44[label=mu];
N44 -> N45[label=operand];
N45 -> N46[label=loc];
N46 -> N47[label=operand];
N47 -> N62[label=3];
N48 -> N49[label=probability];
N49 -> N50[label=operand];
N50 -> N51[label=mu];
N51 -> N52[label=operand];
N52 -> N53[label=loc];
N53 -> N54[label=operand];
N54 -> N62[label=4];
N55 -> N56[label=probability];
N56 -> N57[label=operand];
N57 -> N58[label=mu];
N58 -> N59[label=operand];
N59 -> N60[label=loc];
N60 -> N61[label=operand];
N61 -> N62[label=5];
N62 -> N63[label=operator];
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_failure(self) -> None:
# in order to devectorize correctly, all sizes must be known.
# note that "ns.log_prob" has an unsizable node since we are asking
# what the log prob of a tensor of size 3 is with respect to a distribution
# whose samples are of size 2.
_y_obs = tensor([33.3, 68.3, 6.7])
@bm.random_variable
def sigma_out():
return Gamma(1, 1)
@bm.functional
def unsizable():
mu = norm_tensor(0)
ns = Normal(mu, sigma_out())
return ns.log_prob(_y_obs)
self.maxDiff = None
bmg = BMGRuntime().accumulate_graph([unsizable()], {})
transformed_graph, error_report = copy_and_replace(
bmg, lambda c, s: Devectorizer(c, s)
)
if len(error_report.errors) == 1:
error = error_report.errors[0].__str__()
expected = """
The node log_prob cannot be sized.The operand sizes may be incompatible or the size may not be computable at compile time. The operand sizes are: [torch.Size([2]), torch.Size([3])]
The unsizable node was created in function call unsizable().
"""
self.assertEqual(expected.strip(), error.strip())
else:
self.fail(
"A single error message should have been generated since the sizer cannot size every node"
)
| beanmachine-main | tests/ppl/compiler/devectorizer_transformer_test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.